diff --git a/spaces/101-5/gpt4free/LEGAL_NOTICE.md b/spaces/101-5/gpt4free/LEGAL_NOTICE.md
deleted file mode 100644
index 7b9c4a348668119dc22af7512dccc4af8d431919..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/LEGAL_NOTICE.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Legal Notice
-
-This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security or request the removal of their site from this repository.
-
-Please note the following:
-
-1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers mentioned.
-
-2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the TOS of the each Website.
-
-3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations.
-
-4. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
-
-5. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
\ No newline at end of file
diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/models/realesrnet_model.py b/spaces/17TheWord/RealESRGAN/realesrgan/models/realesrnet_model.py
deleted file mode 100644
index d11668f3712bffcd062c57db14d22ca3a0e1e59d..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/realesrgan/models/realesrnet_model.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import numpy as np
-import random
-import torch
-from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
-from basicsr.data.transforms import paired_random_crop
-from basicsr.models.sr_model import SRModel
-from basicsr.utils import DiffJPEG, USMSharp
-from basicsr.utils.img_process_util import filter2D
-from basicsr.utils.registry import MODEL_REGISTRY
-from torch.nn import functional as F
-
-
-@MODEL_REGISTRY.register()
-class RealESRNetModel(SRModel):
- """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
-
- It is trained without GAN losses.
- It mainly performs:
- 1. randomly synthesize LQ images in GPU tensors
- 2. optimize the networks with GAN training.
- """
-
- def __init__(self, opt):
- super(RealESRNetModel, self).__init__(opt)
- self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
- self.usm_sharpener = USMSharp().cuda() # do usm sharpening
- self.queue_size = opt.get('queue_size', 180)
-
- @torch.no_grad()
- def _dequeue_and_enqueue(self):
- """It is the training pair pool for increasing the diversity in a batch.
-
- Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
- batch could not have different resize scaling factors. Therefore, we employ this training pair pool
- to increase the degradation diversity in a batch.
- """
- # initialize
- b, c, h, w = self.lq.size()
- if not hasattr(self, 'queue_lr'):
- assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
- self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
- _, c, h, w = self.gt.size()
- self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
- self.queue_ptr = 0
- if self.queue_ptr == self.queue_size: # the pool is full
- # do dequeue and enqueue
- # shuffle
- idx = torch.randperm(self.queue_size)
- self.queue_lr = self.queue_lr[idx]
- self.queue_gt = self.queue_gt[idx]
- # get first b samples
- lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
- gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
- # update the queue
- self.queue_lr[0:b, :, :, :] = self.lq.clone()
- self.queue_gt[0:b, :, :, :] = self.gt.clone()
-
- self.lq = lq_dequeue
- self.gt = gt_dequeue
- else:
- # only do enqueue
- self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
- self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
- self.queue_ptr = self.queue_ptr + b
-
- @torch.no_grad()
- def feed_data(self, data):
- """Accept data from dataloader, and then add two-order degradations to obtain LQ images.
- """
- if self.is_train and self.opt.get('high_order_degradation', True):
- # training data synthesis
- self.gt = data['gt'].to(self.device)
- # USM sharpen the GT images
- if self.opt['gt_usm'] is True:
- self.gt = self.usm_sharpener(self.gt)
-
- self.kernel1 = data['kernel1'].to(self.device)
- self.kernel2 = data['kernel2'].to(self.device)
- self.sinc_kernel = data['sinc_kernel'].to(self.device)
-
- ori_h, ori_w = self.gt.size()[2:4]
-
- # ----------------------- The first degradation process ----------------------- #
- # blur
- out = filter2D(self.gt, self.kernel1)
- # random resize
- updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
- if updown_type == 'up':
- scale = np.random.uniform(1, self.opt['resize_range'][1])
- elif updown_type == 'down':
- scale = np.random.uniform(self.opt['resize_range'][0], 1)
- else:
- scale = 1
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(out, scale_factor=scale, mode=mode)
- # add noise
- gray_noise_prob = self.opt['gray_noise_prob']
- if np.random.uniform() < self.opt['gaussian_noise_prob']:
- out = random_add_gaussian_noise_pt(
- out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
- else:
- out = random_add_poisson_noise_pt(
- out,
- scale_range=self.opt['poisson_scale_range'],
- gray_prob=gray_noise_prob,
- clip=True,
- rounds=False)
- # JPEG compression
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
- out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
- out = self.jpeger(out, quality=jpeg_p)
-
- # ----------------------- The second degradation process ----------------------- #
- # blur
- if np.random.uniform() < self.opt['second_blur_prob']:
- out = filter2D(out, self.kernel2)
- # random resize
- updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
- if updown_type == 'up':
- scale = np.random.uniform(1, self.opt['resize_range2'][1])
- elif updown_type == 'down':
- scale = np.random.uniform(self.opt['resize_range2'][0], 1)
- else:
- scale = 1
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(
- out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
- # add noise
- gray_noise_prob = self.opt['gray_noise_prob2']
- if np.random.uniform() < self.opt['gaussian_noise_prob2']:
- out = random_add_gaussian_noise_pt(
- out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
- else:
- out = random_add_poisson_noise_pt(
- out,
- scale_range=self.opt['poisson_scale_range2'],
- gray_prob=gray_noise_prob,
- clip=True,
- rounds=False)
-
- # JPEG compression + the final sinc filter
- # We also need to resize images to desired sizes. We group [resize back + sinc filter] together
- # as one operation.
- # We consider two orders:
- # 1. [resize back + sinc filter] + JPEG compression
- # 2. JPEG compression + [resize back + sinc filter]
- # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
- if np.random.uniform() < 0.5:
- # resize back + the final sinc filter
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
- out = filter2D(out, self.sinc_kernel)
- # JPEG compression
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
- out = torch.clamp(out, 0, 1)
- out = self.jpeger(out, quality=jpeg_p)
- else:
- # JPEG compression
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
- out = torch.clamp(out, 0, 1)
- out = self.jpeger(out, quality=jpeg_p)
- # resize back + the final sinc filter
- mode = random.choice(['area', 'bilinear', 'bicubic'])
- out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
- out = filter2D(out, self.sinc_kernel)
-
- # clamp and round
- self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
-
- # random crop
- gt_size = self.opt['gt_size']
- self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale'])
-
- # training pair pool
- self._dequeue_and_enqueue()
- self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
- else:
- # for paired training or validation
- self.lq = data['lq'].to(self.device)
- if 'gt' in data:
- self.gt = data['gt'].to(self.device)
- self.gt_usm = self.usm_sharpener(self.gt)
-
- def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
- # do not use the synthetic process during validation
- self.is_train = False
- super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
- self.is_train = True
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aerofly Rc 7 Cracked Pepper - The Ultimate Flight Simulator Experience.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aerofly Rc 7 Cracked Pepper - The Ultimate Flight Simulator Experience.md
deleted file mode 100644
index d11a0354d841834d8585020712f91416252f1c6d..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aerofly Rc 7 Cracked Pepper - The Ultimate Flight Simulator Experience.md
+++ /dev/null
@@ -1,73 +0,0 @@
-
-
Aerofly RC 7: A Realistic and Fun Simulator for RC Enthusiasts
-
If you love flying radio controlled (RC) models, you know how important it is to practice and improve your skills. But sometimes, the weather, the location, or the budget can limit your flying opportunities. That's why you need a good simulator that can give you a realistic and fun experience of flying RC models anytime, anywhere. And that's exactly what Aerofly RC 7 can offer you.
Aerofly RC 7 is a simulator for Windows and Mac operating systems that lets you learn and improve your RC flying skills with over 200 models and 50 sceneries to choose from. Whether you prefer airplanes, helicopters, jets, gliders, or quadcopters, you will find something that suits your taste and skill level. You can also fly with friends online, compete in game-like challenges, or create your own content with the DLC and user-created content available.
-
In this article, we will show you the features, benefits, and tips of using Aerofly RC 7 as your RC simulator. By the end of this article, you will be ready to take off and enjoy the thrill of flying RC models with Aerofly RC 7.
-
Features of Aerofly RC 7
-
Aerofly RC 7 is not just a game, it's a realistic simulation that mimics the physics, dynamics, and graphics of real RC models. Here are some of the features that make Aerofly RC 7 stand out from other simulators:
-
-
Over 200 models and 50 sceneries to choose from: You can fly a wide variety of models, from aerobatic airplanes to scale models, from helicopters to jets, from gliders to quadcopters. You can also choose from different sceneries, such as fields, mountains, lakes, cities, or even aircraft carriers. You can customize your models with different colors, decals, or accessories. You can also scale your models up or down to fit your preference.
-
State-of-the-art physics simulation and stunning graphics: Aerofly RC 7 uses a sophisticated physics engine that gives you a realistic feeling of flying. You can adjust the wind speed and direction, the time of day, or the weather conditions to challenge yourself. You can also enjoy the high-quality graphics that show every detail of your model and the scenery. You can zoom in or out, change the camera angle, or use different views to get the best perspective.
-
Different model types and flying modes: Aerofly RC 7 supports different types of models, such as electric or gas powered airplanes, helicopters with collective pitch or fixed pitch, jets with thrust vectoring or without, gliders with flaps or without, or quadcopters with different flight modes. You can also choose from different flying modes, such as beginner mode that limits the bank angle and altitude, normal mode that gives you full control but prevents stalling or crashing, expert mode that simulates real-world physics without any assistance.
-
Multiplayer mode and game-like competitions: Aerofly RC 7 allows you to fly with friends or other pilots online in multiplayer mode. You can chat with them, share tips, or challenge them to races or aerobatic contests. You can also compete in game-like competitions that test your skills in different tasks, such as landing on a moving target, flying through hoops, or performing stunts.
-
DLC and user-created content available: Aerofly RC 7 offers DLC (downloadable content) that adds more models and sceneries to your simulator. You can also download user-created content from the official website or the Steam community that adds more variety and creativity to your simulator.
-
-
How to Get Started with Aerofly RC 7
-
If you are new to Aerofly RC 7 or simulators in general, don't worry. Getting started with Aerofly RC 7 is easy and fun. Here are some steps to help you get going:
-
Aerofly Rc 7 Cracked Pepper - download full version
-Aerofly Rc 7 Cracked Pepper - best flight simulator game
-Aerofly Rc 7 Cracked Pepper - how to install and play
-Aerofly Rc 7 Cracked Pepper - review and rating
-Aerofly Rc 7 Cracked Pepper - free trial and activation code
-Aerofly Rc 7 Cracked Pepper - system requirements and compatibility
-Aerofly Rc 7 Cracked Pepper - tips and tricks for beginners
-Aerofly Rc 7 Cracked Pepper - realistic physics and graphics
-Aerofly Rc 7 Cracked Pepper - online multiplayer mode and features
-Aerofly Rc 7 Cracked Pepper - custom planes and scenarios
-Aerofly Rc 7 Cracked Pepper - comparison with other rc simulators
-Aerofly Rc 7 Cracked Pepper - troubleshooting and support
-Aerofly Rc 7 Cracked Pepper - latest updates and patches
-Aerofly Rc 7 Cracked Pepper - controller options and settings
-Aerofly Rc 7 Cracked Pepper - tutorial and training mode
-Aerofly Rc 7 Cracked Pepper - modding and community
-Aerofly Rc 7 Cracked Pepper - screenshots and videos
-Aerofly Rc 7 Cracked Pepper - cheats and hacks
-Aerofly Rc 7 Cracked Pepper - steam version and discounts
-Aerofly Rc 7 Cracked Pepper - vr compatibility and experience
-Aerofly Rc 7 Cracked Pepper - mac version and availability
-Aerofly Rc 7 Cracked Pepper - helicopter mode and controls
-Aerofly Rc 7 Cracked Pepper - glider mode and challenges
-Aerofly Rc 7 Cracked Pepper - jet mode and speed
-Aerofly Rc 7 Cracked Pepper - aerobatic mode and stunts
-Aerofly Rc 7 Cracked Pepper - scale mode and realism
-Aerofly Rc 7 Cracked Pepper - quadcopter mode and fun
-Aerofly Rc 7 Cracked Pepper - night mode and lighting effects
-Aerofly Rc 7 Cracked Pepper - water mode and landing skills
-Aerofly Rc 7 Cracked Pepper - wind mode and turbulence effects
-Aerofly Rc 7 Cracked Pepper - thermal mode and soaring skills
-Aerofly Rc 7 Cracked Pepper - slope mode and flying techniques
-Aerofly Rc 7 Cracked Pepper - dynamic mode and aerodynamics
-Aerofly Rc 7 Cracked Pepper - crash mode and damage effects
-Aerofly Rc 7 Cracked Pepper - cockpit mode and instruments
-Aerofly Rc 7 Cracked Pepper - chase mode and camera angles
-Aerofly Rc 7 Cracked Pepper - follow mode and formation flying
-Aerofly Rc 7 Cracked Pepper - race mode and competition rules
-Aerofly Rc 7 Cracked Pepper - combat mode and weapons systems
-Aerofly Rc 7 Cracked Pepper - rescue mode and missions objectives
-Aerofly Rc 7 Cracked Pepper - exploration mode and hidden secrets
-Aerofly Rc 7 Cracked Pepper - fun mode and easter eggs
-Aerofly Rc 7 Cracked Pepper - expert mode and difficulty levels
-Aerofly Rc 7 Cracked Pepper - challenge mode and achievements
-Aerofly Rc 7 Cracked Pepper - editor mode and creation tools
-Aerofly Rc 7 Cracked Pepper - sound mode and audio quality
-Aerofly Rc 7 Cracked Pepper - weather mode and climate effects
-Aerofly Rc 7 Cracked Pepper - scenery mode and landscape details
-Aerofly Rc 7 Cracked Pepper - location mode and geographic accuracy
-Aerofly Rc 7 Cracked Pepper - history mode and historical planes
-
-
Check the system requirements and install the simulator: Before you buy Aerofly RC 7, make sure your computer meets the minimum system requirements for Windows or Mac operating systems. You can find them on the official website or on Steam. Once you buy Aerofly RC 7 from Steam or from a retailer, follow the instructions to install it on your computer.
-
Choose a model and a scenery: After launching Aerofly RC 7, you will see the main menu where you can choose a model and a scenery. You can browse through different categories of models and sceneries by clicking on the arrows on the left and right sides of the screen. You can also use the filters on the top right corner of the screen to narrow down your choices by model type, difficulty level, size, etc. Once you find a model and a scenery that you like, click on them to select them.
-
Basic controls and settings: After selecting a model and a scenery, you will see a screen where you can adjust some basic controls and settings before flying. You can use your mouse, keyboard, joystick, gamepad, or an actual RC transmitter to control your model. You can also change some settings such as sound volume, graphics quality,
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Can I Download Photoshop For Free.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Can I Download Photoshop For Free.md
deleted file mode 100644
index abeb5ec5b274408543ec7368f13230035a755ee2..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Can I Download Photoshop For Free.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
Can I Download Photoshop for Free?
-
Photoshop is one of the most popular and powerful photo editing software in the world. It has a lot of features and tools to help you create stunning images and graphics. However, Photoshop is not free and requires a subscription to access all the functions and content.
If you want to use Photoshop without paying anything, you might be tempted to look for a free download. However, this is not a good idea for several reasons.
-
-
First of all, downloading Photoshop for free is illegal and unethical. You are violating the terms of service and the intellectual property rights of the developers. You are also depriving them of their rightful income and support.
-
Secondly, downloading Photoshop for free is risky and dangerous. You never know what kind of malware or viruses might be hidden in the file. You could expose your device and your personal data to hackers and cybercriminals. You could also damage your device or lose your files.
-
Thirdly, downloading Photoshop for free is unreliable and unsatisfying. You might not get the latest version or the full functionality of the software. You might encounter bugs, errors, crashes, or compatibility issues. You might also miss out on updates, new features, and content.
-
-
Therefore, the best way to enjoy Photoshop is to download it from the official source and pay for the subscription. This way, you can support the developers, protect your device and data, and have the best user experience possible.
-
If you still want to try Photoshop for free, you can take advantage of the free trial period that they offer. You can also look for discounts, coupons, or promotions that they might have from time to time. Alternatively, you can look for other free or cheaper photo editing software that suit your needs and preferences.
-
-
In conclusion, Photoshop free download is not worth it. It is illegal, unethical, risky, dangerous, unreliable, and unsatisfying. The best way to use Photoshop is to download it from the official source and pay for the subscription. This way, you can enjoy all the benefits and features of this amazing photo editing software.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/City Car Driving Free Download V2.2.7 Crack.md b/spaces/1gistliPinn/ChatGPT4/Examples/City Car Driving Free Download V2.2.7 Crack.md
deleted file mode 100644
index 14e8867374762239c500abe4c9de84ede8ee8097..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/City Car Driving Free Download V2.2.7 Crack.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-September 15, 2563 B.C. - Minimum OS : Windows 7 SP1/8/8.1/10 (64 Bit) Processor: Intel Pentium Dual Core 3.2 GHz / AMD Athlon II X4 3.1 GHz Memory: 4 GB RAM Graphics: NVIDIA GeForce GTS 450 / AMD Radeon HD 5670 DirectX: version 9.0c Video memory: 1 GB or more Additional software: DirectX
-9.0c or higher HDD space on HDD 5 GB
-Start the game, register.
-Go to the folder C:\\GAMES\\The Dark Crystal Chronicles - Age of Ages\\AppData\\LocalLow\\Sid Meier's Civilization V (in the AppData folder you can see the path to the Civilization V folder, in the one that for Windows 7 the path will look like this: C :\\Users\\username\\AppData\\LocalLow\\Sid Meier's Civilization V). 8a78ff9644
-
-
-
diff --git a/spaces/1line/AutoGPT/autogpt/json_utils/__init__.py b/spaces/1line/AutoGPT/autogpt/json_utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/100 In 1 Offline collection APK - Free Download for Android Devices.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/100 In 1 Offline collection APK - Free Download for Android Devices.md
deleted file mode 100644
index 9bbcfb03bf856437f640c2688bf730bf63ccf13b..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/100 In 1 Offline collection APK - Free Download for Android Devices.md
+++ /dev/null
@@ -1,137 +0,0 @@
-
-
100+ in 1 Offline Collection APK: A Review
-
If you are looking for a way to enjoy a variety of games on your Android device without having to worry about internet connection, ads, or in-app purchases, then you might want to check out the 100+ in 1 Offline Collection APK. This is an app that contains over 100 games that you can play offline and for free. Sounds too good to be true, right? Well, in this article, we will review this app and see what it has to offer, how to download and install it, and what are its pros and cons. Let's get started!
100+ in 1 Offline Collection APK is an app developed by Superxyz Lab, a game studio that specializes in creating offline games for Android devices. The app is also known as Gamebanjo Deluxe, and it features the most popular independent games from various genres and categories. You can find games like arcade, puzzle, racing, shooting, sports, strategy, adventure, and more. The app is designed to provide you with endless entertainment and fun without requiring any internet connection or spending any money.
-
Features of 100+ in 1 Offline Collection APK
-
100+ games in one app
-
The main feature of this app is that it contains over 100 games that you can play anytime and anywhere. You don't need to download or install each game separately, as they are all included in the app. You can easily switch between games by using the menu or the swipe gesture. You can also bookmark your favorite games for quick access. Some of the games that you can find in this app are:
-
-
Angry Birds: The classic game where you have to launch birds at pigs using a slingshot.
-
Cut the Rope: A physics-based puzzle game where you have to cut ropes to feed candy to a cute monster.
-
Fruit Ninja: A game where you have to slice fruits with your finger as they fly across the screen.
-
Temple Run: A game where you have to run away from monkeys while avoiding obstacles and collecting coins.
-
Subway Surfers: A game where you have to run on subway tracks while dodging trains and other hazards.
-
Plants vs Zombies: A game where you have to plant flowers and vegetables to defend your house from zombies.
-
Candy Crush Saga: A game where you have to match candies of the same color to clear them from the board.
-
Asphalt 8: A game where you have to race cars on various tracks and perform stunts.
-
Clash of Clans: A game where you have to build your own village and fight against other players.
-
Minecraft: A game where you can create your own world using blocks and explore other players 's worlds.
-
And many more!
-
-
Offline and free to play
-
Another feature of this app is that it does not require any internet connection to play the games. You can enjoy them offline without worrying about data usage, Wi-Fi availability, or network issues. You can also play them for free without having to pay for any subscription, membership, or premium features. The app does not contain any ads or in-app purchases that could interrupt your gaming experience or tempt you to spend money.
-
100+ in 1 offline collection apk download
-100+ in 1 offline collection game for android
-100+ in 1 offline collection apk latest version
-100+ in 1 offline collection app free download
-100+ in 1 offline collection gamebanjo deluxe
-How to install 100+ in 1 offline collection apk
-100+ in 1 offline collection apk for pc windows
-100+ in 1 offline collection apk mod
-100+ in 1 offline collection apk old version
-100+ in 1 offline collection apk no ads
-Best offline games collection apk
-Offline games collection apk download
-Offline games collection app for android
-Offline games collection apk latest version
-Offline games collection app free download
-How to play offline games collection apk
-Offline games collection apk for pc windows
-Offline games collection apk mod
-Offline games collection apk old version
-Offline games collection apk no ads
-Gamebanjo deluxe apk download
-Gamebanjo deluxe game for android
-Gamebanjo deluxe apk latest version
-Gamebanjo deluxe app free download
-Gamebanjo deluxe 100 most popular games
-How to play gamebanjo deluxe apk
-Gamebanjo deluxe apk for pc windows
-Gamebanjo deluxe apk mod
-Gamebanjo deluxe apk old version
-Gamebanjo deluxe apk no ads
-Superxyz lab apk download
-Superxyz lab game for android
-Superxyz lab apk latest version
-Superxyz lab app free download
-Superxyz lab 100 in 1 game features
-How to play superxyz lab apk
-Superxyz lab apk for pc windows
-Superxyz lab apk mod
-Superxyz lab apk old version
-Superxyz lab apk no ads
-
High quality and full size games
-
The app also boasts of having high quality and full size games that are not compromised or reduced in any way. The games have the same graphics, sound, and gameplay as the original versions. You can play them in full screen mode and adjust the settings according to your preference. The app also supports various screen resolutions and orientations, so you can play the games on any device.
-
Various genres and categories
-
The app also offers a wide range of genres and categories to suit your mood and taste. You can find games that are fun, challenging, relaxing, educational, or addictive. You can also find games that are suitable for different age groups and preferences. Whether you like action, adventure, puzzle, strategy, racing, shooting, sports, or anything else, you can find it in this app.
-
How to download and install 100+ in 1 Offline Collection APK?
-
Download the APK file from a trusted source
-
To download and install this app, you need to get the APK file from a trusted source. An APK file is an Android application package that contains all the files and data needed to run an app on your device. You can find the APK file for this app on various websites that offer free and safe downloads of Android apps. Some of the websites that you can use are:
-
-
[APKPure]: A website that provides pure APK files for Android apps and games.
-
[APKMirror]: A website that hosts a large collection of APK files for Android apps and games.
-
[Uptodown]: A website that offers downloads of Android apps and games in various languages.
-
-
Once you find the APK file for this app on one of these websites, you can download it by clicking on the download button or link. The file size is about 300 MB, so make sure you have enough space on your device and a stable internet connection.
-
Enable unknown sources on your device
-
After downloading the APK file, you need to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than the Google Play Store. To enable unknown sources, follow these steps:
-
-
Go to your device's settings and look for security or privacy options.
-
Find the option that says unknown sources or install unknown apps and toggle it on.
-
You may see a warning message that says installing apps from unknown sources could harm your device. Tap on OK or Allow to proceed.
-
-
Install the APK file and launch the app
-
Once you have enabled unknown sources, you can install the APK file by following these steps:
-
-
Locate the APK file on your device's file manager or downloads folder.
-
Tap on the APK file and follow the instructions on the screen to install it.
-
You may see a message that says this app is not compatible with your device or requires additional permissions. Tap on Install Anyway or Accept to continue.
-
Wait for the installation process to finish and then tap on Open or Done to launch the app.
-
-
Congratulations! You have successfully downloaded and installed 100+ in 1 Offline Collection APK on your device. You can now enjoy playing over 100 games offline and for free!
Pros and cons of 100+ in 1 Offline Collection APK
-
Like any other app, 100+ in 1 Offline Collection APK has its advantages and disadvantages. Here are some of the pros and cons that you should consider before downloading and installing this app:
-
Pros:
-
-
No internet connection required: You can play the games offline without depending on Wi-Fi or mobile data. This is great for saving data, battery, and money. It is also convenient for traveling, commuting, or staying in places with poor or no internet connection.
-
No ads or in-app purchases: You can play the games for free without being interrupted by annoying ads or pop-ups. You also don't have to worry about spending money on extra features, coins, gems, or lives. You can enjoy the games without any limitations or distractions.
-
Easy to switch between games: You can access all the games from one app and switch between them easily. You don't have to exit or close one game to play another. You can also bookmark your favorite games for faster access. You can play as many games as you want without cluttering your device's memory or storage.
-
Suitable for all ages and preferences: You can find games that are fun and appropriate for everyone. Whether you are a kid, a teenager, an adult, or a senior, you can find games that match your interests and skills. You can also play with your friends, family, or alone.
-
-
Cons:
-
-
Large file size (over 300 MB): The app takes up a lot of space on your device's storage. You may need to delete some files or apps to make room for it. You may also experience some lag or slow performance if your device has low specifications or memory.
-
Some games may not work on some devices: The app may not be compatible with all devices or operating systems. Some games may not run properly or crash on some devices. You may need to update your device's software or hardware to play some games.
-
Some games may have bugs or glitches: The app may contain some errors or defects that affect the quality or functionality of some games. Some games may freeze, crash, or display incorrect graphics or sounds. You may need to report these issues to the developer or wait for updates or fixes.
-
-
Conclusion
-
100+ in 1 Offline Collection APK is an app that lets you play over 100 games offline and for free on your Android device. It has many features, such as high quality and full size games, various genres and categories, no ads or in-app purchases, and easy to switch between games. It also has some drawbacks, such as large file size, compatibility issues, and bugs or glitches. If you are looking for a way to have fun and entertainment without internet connection or spending money, then you might want to give this app a try. However, you should also be aware of the potential risks and problems that it may cause to your device or gaming experience.
-
FAQs
-
Here are some of the frequently asked questions about 100+ in 1 Offline Collection APK:
-
-
Is 100+ in 1 Offline Collection APK safe to download and install?
-
Yes, 100+ in 1 Offline Collection APK is safe to download and install if you get it from a trusted source. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain viruses, malware, spyware, or other harmful elements. You should also scan the APK file with an antivirus program before installing it.
-
Is 100+ in 1 Offline Collection APK legal to use?
-
Yes, 100+ in 1 Offline Collection APK is legal to use as long as you don't violate any laws or regulations in your country or region. The app does not contain any pirated or copyrighted content, as it only features independent games that are free to play. However, you should always respect the rights and privacy of the developers and publishers of the games.
-
How do I update 100+ in 1 Offline Collection APK?
-
To update 100+ in 1 Offline Collection APK, you need to download and install the latest version of the APK file from a trusted source. You can check for updates by visiting the website where you downloaded the app or by following the developer's social media accounts. You can also enable automatic updates on your device's settings if available.
-
How do I uninstall 100+ in 1 Offline Collection APK?
-
To uninstall 100+ in 1 Offline Collection APK, you need to follow these steps:
-
-
Go to your device's settings and look for apps or applications options.
-
Find and tap on 100+ in 1 Offline Collection APK or Gamebanjo Deluxe.
-
Tap on Uninstall and confirm your action.
-
Wait for the uninstallation process to finish and then tap on OK or Done.
-
-
Note that uninstalling the app will delete all the games and data that are stored in it. You may need to back up your progress or achievements before uninstalling the app.
-
How do I contact the developer of 100+ in 1 Offline Collection APK?
-
If you have any questions, feedback, suggestions, or complaints about 100+ in 1 Offline Collection APK, you can contact the developer by using one of these methods:
-
-
Email: You can send an email to superxyzlab@gmail.com and expect a reply within 24 hours.
Twitter: You can follow and tweet them at @superxyzlab and get updates and news about their games and apps.
-
-
The developer is very responsive and friendly, so don't hesitate to reach out to them if you need any help or support.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 APK A Fun and Wacky Racing Adventure.md b/spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 APK A Fun and Wacky Racing Adventure.md
deleted file mode 100644
index e95dd5929b90466b740832f4f7027f088abcfae2..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 APK A Fun and Wacky Racing Adventure.md
+++ /dev/null
@@ -1,175 +0,0 @@
-
-
Beach Buggy Racing 2: A Fun and Wacky Kart Racer for Android
-
Introduction
-
If you are looking for a fun and wacky kart racing game for your Android device, you might want to check out Beach Buggy Racing 2. This is a sequel to the popular Beach Buggy Racing game that introduced over 100 million international mobile players to console-style kart-racing with a playful offroad twist. With Beach Buggy Racing 2, you can join the Beach Buggy Racing League and compete against drivers and cars from around the world. Race through Egyptian pyramids, dragon-infested castles, pirate ship wrecks, and experimental alien bio-labs. Collect and upgrade an arsenal of fun and wacky powerups. Recruit new drivers, assemble a garage full of cars and race your way to the top of the League.
-
In this article, we will tell you what Beach Buggy Racing 2 is, how to download and install it on your Android device, what features it offers, some tips and tricks to help you win, and our review of the game.
Beach Buggy Racing 2 is a fully 3D off-road kart racing game with amazing physics, detailed cars and characters, and spectacular weapons, powered by Vector Engine and NVIDIA's PhysX. It's like a console game in the palm of your hand! You can play any part of the game solo or with friends in split screen—from the story-driven Adventure mode to multi-event Championships, adrenaline-pumping Races, skill-mastering Drift Attacks and more. You can also customize your own game modes with different powerups, race rules, lap counts and more.
-
How to download and install Beach Buggy Racing 2 on Android?
-
Beach Buggy Racing 2 is free to play, but it contains items that can be purchased for real money. You can download it from the Google Play Store by following these steps:
-
-
Open the Google Play Store app on your Android device.
-
Search for "Beach Buggy Racing 2" or use this link.
-
Tap on the "Install" button and wait for the download to finish.
-
Tap on the "Open" button to launch the game.
-
-
You can also download the APK file from other sources, but make sure they are safe and trustworthy. To install an APK file, you need to enable "Unknown sources" in your device settings. Then, you can open the APK file and follow the instructions to install it.
-
Features of Beach Buggy Racing 2
-
Spectacular kart racing action
-
Beach Buggy Racing 2 offers a variety of tracks and environments to race on, each with their own challenges and surprises. You can race through Egyptian pyramids, dragon-infested castles, pirate ship wrecks, and experimental alien bio-labs. You can also encounter obstacles like tumbleweeds, birds, fireballs, giant crabs, lava flows, sandstorms, and more. You can use different types of vehicles like beach buggies, monster trucks, muscle cars, classic pickups and formula supercars.
Upgrade your powerups
-
One of the coolest features of Beach Buggy Racing 2 is that you can upgrade your powerups to make them more powerful and effective. You can do this by collecting coins and gems during the races, or by completing quests and achievements. You can also buy coins and gems with real money if you want to speed up the process. Upgrading your powerups will give you an edge over your rivals, as you can unleash more damage, more speed, or more protection. You can upgrade each powerup up to five times, and each upgrade will cost more coins and gems than the previous one. You can also unlock new powerups by playing the Adventure mode or by opening chests.
-
Build your team
-
Another cool feature of Beach Buggy Racing 2 is that you can recruit new drivers to join your team. Each driver has a unique ability that can help you in different situations. For example, Rez has the ability to fire a laser beam that zaps anyone in front of him, while McSkelly has the ability to summon a swarm of bats that blind the other racers. You can unlock new drivers by playing the Adventure mode or by opening chests. You can also upgrade your drivers to make their abilities more powerful and effective. You can do this by collecting driver cards during the races, or by buying them with coins and gems. Upgrading your drivers will also increase their stats, such as speed, acceleration, handling, and armor.
-
Collect over 55 cars
-
Beach Buggy Racing 2 has a huge collection of cars that you can unlock and use in the races. There are over 55 cars in total, each with their own style and performance. You can find beach buggies, monster trucks, muscle cars, classic pickups, formula supercars, and even some weird and wacky vehicles like a lunar rover, a shark car, a unicorn car, and a dragon car. You can unlock new cars by playing the Adventure mode or by opening chests. You can also upgrade your cars to make them faster and stronger. You can do this by collecting car parts during the races, or by buying them with coins and gems. Upgrading your cars will also change their appearance, making them look cooler and more customized.
-
beach buggy racing 2 mod apk unlimited money
-beach buggy racing 2 hack apk download
-beach buggy racing 2 apk + obb
-beach buggy racing 2 apk pure
-beach buggy racing 2 online multiplayer apk
-beach buggy racing 2 latest version apk
-beach buggy racing 2 free download apk
-beach buggy racing 2 android game apk
-beach buggy racing 2 full unlocked apk
-beach buggy racing 2 cheats apk
-beach buggy racing 2 premium apk
-beach buggy racing 2 offline mode apk
-beach buggy racing 2 apk for pc
-beach buggy racing 2 apk no ads
-beach buggy racing 2 apk revdl
-beach buggy racing 2 mod menu apk
-beach buggy racing 2 all cars unlocked apk
-beach buggy racing 2 unlimited gems apk
-beach buggy racing 2 mod apk rexdl
-beach buggy racing 2 vip pass apk
-beach buggy racing 2 mod apk happymod
-beach buggy racing 2 old version apk
-beach buggy racing 2 modded apk android 1
-beach buggy racing 2 cracked apk
-beach buggy racing 2 mod apk android republic
-beach buggy racing 2 update apk
-beach buggy racing 2 beta apk
-beach buggy racing 2 mod apk an1
-beach buggy racing 2 pro apk
-beach buggy racing 2 modded apk download
-beach buggy racing 2 original apk
-beach buggy racing 2 modded apk free shopping
-beach buggy racing 2 modded apk unlimited everything
-beach buggy racing 2 hacked version apk
-beach buggy racing 2 modded apk all levels unlocked
-beach buggy racing 2 modded apk no root
-beach buggy racing 2 modded apk anti ban
-beach buggy racing 2 modded apk unlimited tickets
-beach buggy racing 2 modded apk god mode
-beach buggy racing 2 modded apk high damage
-
Play against the world
-
Beach Buggy Racing 2 is not only a solo game, but also a multiplayer game. You can play against other players from around the world in online races, tournaments, and leagues. You can also challenge your friends to a race in split-screen mode on the same device, or connect with other devices via WiFi or Bluetooth. Playing against other players will test your skills and strategies, as well as earn you rewards and trophies. You can also chat with other players in the game lobby, or join a club to team up with other racers and share tips and tricks.
-
Customize your ride
-
Beach Buggy Racing 2 lets you customize your ride to suit your style and personality. You can change the color of your car, add stickers and decals, change the wheels and tires, add spoilers and exhausts, and more. You can also customize your driver's appearance, such as their outfit, hairstyle, sunglasses, hat, helmet, mask, etc. You can unlock new customization options by playing the Adventure mode or by opening chests. You can also buy them with coins and gems if you want to get them faster.
-
Awesome new game modes
-
Beach Buggy Racing 2 offers a variety of game modes to keep you entertained and challenged. Besides the Adventure mode, which is the main story mode where you race through different worlds and events, you can also play other modes such as:
-
-
Race: This is the classic mode where you race against seven other racers on any track you choose.
-
Championship: This is a series of races where you compete for points and trophies.
-
Daily Challenge: This is a special race that changes every day and has different rules and rewards.
-
Drift Attack: This is a mode where you have to drift as much as possible on a track to earn points.
-
Firework Fury: This is a mode where you have to collect rockets on a track and fire them at targets.
-
Boss Battle: This is a mode where you have to race against a boss character who has special abilities.
-
Custom Race: This is a mode where you can create your own race with different settings such as powerups, laps, opponents, etc.
-
-
Tips and tricks for Beach Buggy Racing 2
-
Master the drift
-
Drifting is an essential skill
Drifting is an essential skill in Beach Buggy Racing 2, as it allows you to take sharp turns without losing speed. To drift, you need to tap and hold the brake button while steering. You will see a yellow trail behind your car, indicating that you are drifting. The longer you drift, the more boost you will accumulate. You can use the boost by releasing the brake button and tapping the gas button. Boosting will give you a burst of speed that can help you overtake your opponents or avoid obstacles. You can also use the boost to perform a powerslide, which is a drift that goes in the opposite direction of the turn. This can help you change lanes quickly or dodge incoming attacks.
-
Use the driver's ability at the right time
-
As mentioned before, each driver in Beach Buggy Racing 2 has a unique ability that can give you an advantage in the race. However, you need to use it wisely and at the right time. Each ability has a cooldown time, which means that you can't use it again until it recharges. You can see the cooldown timer on the bottom left corner of the screen, next to your driver's portrait. You can also see a blue bar above your car, which indicates how much charge you have for your ability. You can charge your ability by collecting blue orbs on the track, or by hitting other racers with powerups. To use your ability, you need to tap on your driver's portrait when it is fully charged. Some abilities are offensive, such as Rez's laser beam or McSkelly's bat swarm, while some are defensive, such as Roxie's shield or Tiki's teleport. You need to use them according to the situation and your strategy.
-
Don't fall into the trap
-
Beach Buggy Racing 2 is full of traps and hazards that can slow you down or damage your car. You need to be careful and avoid them as much as possible. Some of the traps and hazards include:
-
-
Mines: These are small explosives that are placed on the track by other racers or by the environment. They will explode when you touch them, causing you to spin out and lose speed.
-
Oil slicks: These are slippery patches of oil that are spilled on the track by other racers or by the environment. They will make you lose control and skid off course.
-
Fireballs: These are balls of fire that are shot from cannons or volcanoes on some tracks. They will hit you and set you on fire, causing you to lose health and speed.
-
Lava flows: These are streams of lava that flow across some tracks. They will burn you and damage your car if you touch them.
-
Sandstorms: These are storms of sand that blow across some tracks. They will reduce your visibility and make it harder to see where you are going.
-
Tumbleweeds: These are balls of dried plants that roll across some tracks. They will bounce off your car and slow you down if you hit them.
-
-
You can avoid these traps and hazards by steering away from them, using your boost to get past them, or using your powerups to destroy them or protect yourself from them.
-
Build the best deck of crazy powerups
-
Beach Buggy Racing 2 has a lot of crazy powerups that you can use to spice up the race and sabotage your opponents. You can collect powerups by driving through red bubbles on the track, or by opening chests. You can also upgrade your powerups to make them more powerful and effective, as explained before. However, you can only equip four powerups at a time, so you need to choose wisely which ones to use. You can create different decks of powerups for different situations and strategies. For example, you can create a deck of offensive powerups, such as rockets, fireballs, lightning bolts, etc., to attack your opponents and slow them down. Or, you can create a deck of defensive powerups, such as shields, magnets, oil slicks, etc., to protect yourself from attacks and traps. Or, you can create a deck of utility powerups, such as boosts, teleports, springs, etc., to enhance your speed and maneuverability.
-
Grab those fast bubbles
-
Besides red bubbles that contain powerups, there are also green bubbles that contain coins and gems, blue bubbles that contain driver cards and car parts, and yellow bubbles that contain fast bubbles. Fast bubbles are special items that give you an instant boost of speed when you collect them. They are very useful for overtaking your opponents or escaping from danger. However, they are also very rare and hard to find. You need to keep an eye out for them and grab them whenever
you see them. They are usually hidden in secret places or behind obstacles, so you need to explore the tracks and find the best routes to get them. You can also use your powerups or your driver's ability to help you reach them. For example, you can use a spring to jump over a wall, or a teleport to skip a section of the track.
-
Choose the best controls
-
Beach Buggy Racing 2 offers different options for controlling your car. You can choose between tilt, touch, or gamepad controls. You can also adjust the sensitivity and the layout of the buttons. You need to find the best controls that suit your preference and style. You can experiment with different settings and see which one works best for you. You can also switch between different controls during the game by pausing and going to the settings menu. Here are some pros and cons of each control option:
-
-
-
Control option
-
Pros
-
Cons
-
-
-
Tilt
-
More realistic and immersive, easy to drift and powerslide, no need to touch the screen.
-
Less precise and responsive, may cause motion sickness, may not work well on some devices.
-
-
-
Touch
-
More precise and responsive, easy to steer and brake, works well on any device.
-
Less realistic and immersive, may block the view of the screen, may cause finger fatigue.
-
-
-
Gamepad
-
Most realistic and immersive, most precise and responsive, most comfortable and ergonomic.
-
Requires an external device, may not be compatible with some games or devices, may be expensive or hard to find.
-
-
-
Review of Beach Buggy Racing 2
-
Pros and cons
-
Beach Buggy Racing 2 is a fun and wacky kart racing game that offers a lot of features and content for Android users. However, it also has some drawbacks that may affect your enjoyment of the game. Here are some pros and cons of Beach Buggy Racing 2:
-
-
-
Pros
-
Cons
-
-
-
Stunning graphics and sound effects.
-
Frequent ads and pop-ups.
-
-
-
Varied tracks and environments.
-
Sometimes laggy or buggy.
-
-
-
Huge collection of cars and drivers.
-
Somewhat pay-to-win.
-
-
-
Crazy powerups and abilities.
-
Sometimes unfair or frustrating.
-
-
-
Multifaceted game modes.
-
Sometimes repetitive or boring.
-
-
-
Multifaceted game modes.
-
Sometimes repetitive or boring.
Multifaceted game modes.
Sometimes repetitive or boring.
Multifaceted game modes.
Sometimes repetitive or boring.
Multifaceted game modes.
Sometimes repetitive or boring.
-
Rating and verdict
-
We give Beach Buggy Racing 2 a rating of 4 out of 5 stars. It is a fun and wacky kart racing game that will keep you entertained and challenged for hours. It has stunning graphics, varied tracks, huge collection of cars and drivers, crazy powerups and abilities, multifaceted game modes, and multiplayer options. However, it also has frequent ads, laggy performance, pay-to-win elements, unfair difficulty, and repetitive gameplay. If you are looking for a kart racing game for your Android device, you might want to give Beach Buggy Racing 2 a try. It is free to download and play, but it contains in-app purchases that can enhance your experience. You can also check out other similar games such as Mario Kart Tour, Crash Bandicoot: On the Run!, Sonic Racing Transformed, etc.
-
Frequently Asked Questions (FAQs)
-
Here are some frequently asked questions (FAQs) about Beach Buggy Racing 2:
-
-
How do I unlock new cars and drivers?
-
You can unlock new cars and drivers by playing the Adventure mode or by opening chests. You can also buy them with coins and gems if you want to get them faster.
-
How do I upgrade my cars, drivers, and powerups?
-
You can upgrade your
You can upgrade your cars, drivers, and powerups by collecting coins, gems, car parts, driver cards, and powerup cards during the races, or by buying them with real money. You can also upgrade them by completing quests and achievements. Upgrading your cars, drivers, and powerups will make them more powerful and effective, as well as change their appearance.
-
How do I play with my friends?
-
You can play with your friends in split-screen mode on the same device, or connect with other devices via WiFi or Bluetooth. You can also play online with your friends or other players from around the world in races, tournaments, and leagues. You can also chat with your friends in the game lobby, or join a club to team up with other racers and share tips and tricks.
-
How do I get more coins and gems?
-
You can get more coins and gems by playing the game and collecting them during the races, or by opening chests. You can also get more coins and gems by watching ads, completing offers, or buying them with real money. Coins and gems are used to unlock and upgrade cars, drivers, powerups, and customization options.
-
How do I get rid of ads?
-
You can get rid of ads by buying any amount of coins or gems with real money. This will remove all ads from the game permanently. You can also turn off your internet connection to avoid ads, but this will also disable some features of the game such as online multiplayer, daily challenge, etc.
-
How do I contact the developers?
-
You can contact the developers of Beach Buggy Racing 2 by visiting their website at www.vectorunit.com, or by sending them an email at support@vectorunit.com. You can also follow them on social media platforms such as Facebook, Twitter, Instagram, YouTube, etc. You can also leave a review or a comment on the Google Play Store to share your feedback and suggestions.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/7eu7d7/anime-ai-detect-fucker/app.py b/spaces/7eu7d7/anime-ai-detect-fucker/app.py
deleted file mode 100644
index 3faa82da6a30634ed0e6b834d63743ed3e17eea4..0000000000000000000000000000000000000000
--- a/spaces/7eu7d7/anime-ai-detect-fucker/app.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import gradio as gr
-from attack import Attacker
-import argparse
-
-def do_attack(img, eps, step_size, steps, progress=gr.Progress()):
- args=argparse.Namespace()
- args.out_dir='./'
- args.target='auto'
- args.eps=eps
- args.step_size=step_size
- args.steps=steps
- args.test_atk=False
-
- step = progress.tqdm(range(steps))
-
- def pdg_prog(ori_images, images, labels):
- step.update(1)
-
- attacker = Attacker(args, pgd_callback=pdg_prog)
- atk_img, noise = attacker.attack_(img)
- attacker.save_image(img, noise, 'out.png')
- return 'out_atk.png'
-
-with gr.Blocks(title="Anime AI Detect Fucker Demo", theme="dark") as demo:
- gr.HTML('github repo')
-
- with gr.Row():
- with gr.Column():
- with gr.Row():
- eps = gr.Slider(label="eps (Noise intensity)", minimum=1, maximum=16, step=1, value=1)
- step_size = gr.Slider(label="Noise step size", minimum=0.001, maximum=16, step=0.001, value=0.136)
- with gr.Row():
- steps = gr.Slider(label="step count", minimum=1, maximum=100, step=1, value=20)
- model_name = gr.Dropdown(label="attack target",
- choices=["auto", "human", "ai"],
- interactive=True,
- value="auto", show_label=True)
-
- input_image = gr.Image(label="Clean Image", type="pil")
-
- atk_btn = gr.Button("Attack")
-
- with gr.Column():
- output_image = gr.Image(label="Attacked Image")
-
- atk_btn.click(fn=do_attack,
- inputs=[input_image, eps, step_size, steps],
- outputs=output_image)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/801artistry/RVC801/MDXNet.py b/spaces/801artistry/RVC801/MDXNet.py
deleted file mode 100644
index 9b7eb43844ad0d4f9ce61287ccf9a8a4206d3853..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/MDXNet.py
+++ /dev/null
@@ -1,272 +0,0 @@
-import soundfile as sf
-import torch, pdb, os, warnings, librosa
-import numpy as np
-import onnxruntime as ort
-from tqdm import tqdm
-import torch
-
-dim_c = 4
-
-
-class Conv_TDF_net_trim:
- def __init__(
- self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
- ):
- super(Conv_TDF_net_trim, self).__init__()
-
- self.dim_f = dim_f
- self.dim_t = 2**dim_t
- self.n_fft = n_fft
- self.hop = hop
- self.n_bins = self.n_fft // 2 + 1
- self.chunk_size = hop * (self.dim_t - 1)
- self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
- device
- )
- self.target_name = target_name
- self.blender = "blender" in model_name
-
- out_c = dim_c * 4 if target_name == "*" else dim_c
- self.freq_pad = torch.zeros(
- [1, out_c, self.n_bins - self.dim_f, self.dim_t]
- ).to(device)
-
- self.n = L // 2
-
- def stft(self, x):
- x = x.reshape([-1, self.chunk_size])
- x = torch.stft(
- x,
- n_fft=self.n_fft,
- hop_length=self.hop,
- window=self.window,
- center=True,
- return_complex=True,
- )
- x = torch.view_as_real(x)
- x = x.permute([0, 3, 1, 2])
- x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
- [-1, dim_c, self.n_bins, self.dim_t]
- )
- return x[:, :, : self.dim_f]
-
- def istft(self, x, freq_pad=None):
- freq_pad = (
- self.freq_pad.repeat([x.shape[0], 1, 1, 1])
- if freq_pad is None
- else freq_pad
- )
- x = torch.cat([x, freq_pad], -2)
- c = 4 * 2 if self.target_name == "*" else 2
- x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
- [-1, 2, self.n_bins, self.dim_t]
- )
- x = x.permute([0, 2, 3, 1])
- x = x.contiguous()
- x = torch.view_as_complex(x)
- x = torch.istft(
- x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
- )
- return x.reshape([-1, c, self.chunk_size])
-
-
-def get_models(device, dim_f, dim_t, n_fft):
- return Conv_TDF_net_trim(
- device=device,
- model_name="Conv-TDF",
- target_name="vocals",
- L=11,
- dim_f=dim_f,
- dim_t=dim_t,
- n_fft=n_fft,
- )
-
-
-warnings.filterwarnings("ignore")
-cpu = torch.device("cpu")
-if torch.cuda.is_available():
- device = torch.device("cuda:0")
-elif torch.backends.mps.is_available():
- device = torch.device("mps")
-else:
- device = torch.device("cpu")
-
-
-class Predictor:
- def __init__(self, args):
- self.args = args
- self.model_ = get_models(
- device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
- )
- self.model = ort.InferenceSession(
- os.path.join(args.onnx, self.model_.target_name + ".onnx"),
- providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
- )
- print("onnx load done")
-
- def demix(self, mix):
- samples = mix.shape[-1]
- margin = self.args.margin
- chunk_size = self.args.chunks * 44100
- assert not margin == 0, "margin cannot be zero!"
- if margin > chunk_size:
- margin = chunk_size
-
- segmented_mix = {}
-
- if self.args.chunks == 0 or samples < chunk_size:
- chunk_size = samples
-
- counter = -1
- for skip in range(0, samples, chunk_size):
- counter += 1
-
- s_margin = 0 if counter == 0 else margin
- end = min(skip + chunk_size + margin, samples)
-
- start = skip - s_margin
-
- segmented_mix[skip] = mix[:, start:end].copy()
- if end == samples:
- break
-
- sources = self.demix_base(segmented_mix, margin_size=margin)
- """
- mix:(2,big_sample)
- segmented_mix:offset->(2,small_sample)
- sources:(1,2,big_sample)
- """
- return sources
-
- def demix_base(self, mixes, margin_size):
- chunked_sources = []
- progress_bar = tqdm(total=len(mixes))
- progress_bar.set_description("Processing")
- for mix in mixes:
- cmix = mixes[mix]
- sources = []
- n_sample = cmix.shape[1]
- model = self.model_
- trim = model.n_fft // 2
- gen_size = model.chunk_size - 2 * trim
- pad = gen_size - n_sample % gen_size
- mix_p = np.concatenate(
- (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
- )
- mix_waves = []
- i = 0
- while i < n_sample + pad:
- waves = np.array(mix_p[:, i : i + model.chunk_size])
- mix_waves.append(waves)
- i += gen_size
- mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
- with torch.no_grad():
- _ort = self.model
- spek = model.stft(mix_waves)
- if self.args.denoise:
- spec_pred = (
- -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
- + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
- )
- tar_waves = model.istft(torch.tensor(spec_pred))
- else:
- tar_waves = model.istft(
- torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
- )
- tar_signal = (
- tar_waves[:, :, trim:-trim]
- .transpose(0, 1)
- .reshape(2, -1)
- .numpy()[:, :-pad]
- )
-
- start = 0 if mix == 0 else margin_size
- end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
- if margin_size == 0:
- end = None
- sources.append(tar_signal[:, start:end])
-
- progress_bar.update(1)
-
- chunked_sources.append(sources)
- _sources = np.concatenate(chunked_sources, axis=-1)
- # del self.model
- progress_bar.close()
- return _sources
-
- def prediction(self, m, vocal_root, others_root, format):
- os.makedirs(vocal_root, exist_ok=True)
- os.makedirs(others_root, exist_ok=True)
- basename = os.path.basename(m)
- mix, rate = librosa.load(m, mono=False, sr=44100)
- if mix.ndim == 1:
- mix = np.asfortranarray([mix, mix])
- mix = mix.T
- sources = self.demix(mix.T)
- opt = sources[0].T
- if format in ["wav", "flac"]:
- sf.write(
- "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
- )
- sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
- else:
- path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
- path_other = "%s/%s_others.wav" % (others_root, basename)
- sf.write(path_vocal, mix - opt, rate)
- sf.write(path_other, opt, rate)
- if os.path.exists(path_vocal):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path_vocal, path_vocal[:-4] + ".%s" % format)
- )
- if os.path.exists(path_other):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path_other, path_other[:-4] + ".%s" % format)
- )
-
-
-class MDXNetDereverb:
- def __init__(self, chunks):
- self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy"
- self.shifts = 10 #'Predict with randomised equivariant stabilisation'
- self.mixing = "min_mag" # ['default','min_mag','max_mag']
- self.chunks = chunks
- self.margin = 44100
- self.dim_t = 9
- self.dim_f = 3072
- self.n_fft = 6144
- self.denoise = True
- self.pred = Predictor(self)
-
- def _path_audio_(self, input, vocal_root, others_root, format):
- self.pred.prediction(input, vocal_root, others_root, format)
-
-
-if __name__ == "__main__":
- dereverb = MDXNetDereverb(15)
- from time import time as ttime
-
- t0 = ttime()
- dereverb._path_audio_(
- "雪雪伴奏对消HP5.wav",
- "vocal",
- "others",
- )
- t1 = ttime()
- print(t1 - t0)
-
-
-"""
-
-runtime\python.exe MDXNet.py
-
-6G:
-15/9:0.8G->6.8G
-14:0.8G->6.5G
-25:炸
-
-half15:0.7G->6.6G,22.69s
-fp32-15:0.7G->6.6G,20.85s
-
-"""
diff --git a/spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Redis 9e063b60eca24a1783c225cfdc21dd8c.md b/spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Redis 9e063b60eca24a1783c225cfdc21dd8c.md
deleted file mode 100644
index 7ab5357f2f29fb6e29ceace21a68b2bd75ca57ce..0000000000000000000000000000000000000000
--- a/spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Redis 9e063b60eca24a1783c225cfdc21dd8c.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Redis
-
-Last edited time: March 31, 2023 1:49 PM
-Owner: Anonymous
-Tags: Infrastructure
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/models/loaders.py b/spaces/AIConsultant/MusicGen/audiocraft/models/loaders.py
deleted file mode 100644
index 9c7808a0588bd1a8084157b072bae42aa7efaf84..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/models/loaders.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Utility functions to load from the checkpoints.
-Each checkpoint is a torch.saved dict with the following keys:
-- 'xp.cfg': the hydra config as dumped during training. This should be used
- to rebuild the object using the audiocraft.models.builders functions,
-- 'model_best_state': a readily loadable best state for the model, including
- the conditioner. The model obtained from `xp.cfg` should be compatible
- with this state dict. In the case of a LM, the encodec model would not be
- bundled along but instead provided separately.
-
-Those functions also support loading from a remote location with the Torch Hub API.
-They also support overriding some parameters, in particular the device and dtype
-of the returned model.
-"""
-
-from pathlib import Path
-from huggingface_hub import hf_hub_download
-import typing as tp
-import os
-
-from omegaconf import OmegaConf, DictConfig
-import torch
-
-from . import builders
-from .encodec import CompressionModel
-
-
-def get_audiocraft_cache_dir() -> tp.Optional[str]:
- return os.environ.get('AUDIOCRAFT_CACHE_DIR', None)
-
-
-def _get_state_dict(
- file_or_url_or_id: tp.Union[Path, str],
- filename: tp.Optional[str] = None,
- device='cpu',
- cache_dir: tp.Optional[str] = None,
-):
- if cache_dir is None:
- cache_dir = get_audiocraft_cache_dir()
- # Return the state dict either from a file or url
- file_or_url_or_id = str(file_or_url_or_id)
- assert isinstance(file_or_url_or_id, str)
-
- if os.path.isfile(file_or_url_or_id):
- return torch.load(file_or_url_or_id, map_location=device)
-
- if os.path.isdir(file_or_url_or_id):
- file = f"{file_or_url_or_id}/{filename}"
- return torch.load(file, map_location=device)
-
- elif file_or_url_or_id.startswith('https://'):
- return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True)
-
- else:
- assert filename is not None, "filename needs to be defined if using HF checkpoints"
-
- file = hf_hub_download(repo_id=file_or_url_or_id, filename=filename, cache_dir=cache_dir)
- return torch.load(file, map_location=device)
-
-
-def load_compression_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
- return _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir)
-
-
-def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
- pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
- if 'pretrained' in pkg:
- return CompressionModel.get_pretrained(pkg['pretrained'], device=device)
- cfg = OmegaConf.create(pkg['xp.cfg'])
- cfg.device = str(device)
- model = builders.get_compression_model(cfg)
- model.load_state_dict(pkg['best_state'])
- model.eval()
- return model
-
-
-def load_lm_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
- return _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir)
-
-
-def _delete_param(cfg: DictConfig, full_name: str):
- parts = full_name.split('.')
- for part in parts[:-1]:
- if part in cfg:
- cfg = cfg[part]
- else:
- return
- OmegaConf.set_struct(cfg, False)
- if parts[-1] in cfg:
- del cfg[parts[-1]]
- OmegaConf.set_struct(cfg, True)
-
-
-def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
- pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
- cfg = OmegaConf.create(pkg['xp.cfg'])
- cfg.device = str(device)
- if cfg.device == 'cpu':
- cfg.dtype = 'float32'
- else:
- cfg.dtype = 'float16'
- _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')
- _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')
- _delete_param(cfg, 'conditioners.args.drop_desc_p')
- model = builders.get_lm_model(cfg)
- model.load_state_dict(pkg['best_state'])
- model.eval()
- model.cfg = cfg
- return model
-
-
-def load_mbd_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
- return _get_state_dict(file_or_url_or_id, filename="all_in_one.pt", cache_dir=cache_dir)
-
-
-def load_diffusion_models(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
- pkg = load_mbd_ckpt(file_or_url_or_id, cache_dir=cache_dir)
- models = []
- processors = []
- cfgs = []
- sample_rate = pkg['sample_rate']
- for i in range(pkg['n_bands']):
- cfg = pkg[i]['cfg']
- model = builders.get_diffusion_model(cfg)
- model_dict = pkg[i]['model_state']
- model.load_state_dict(model_dict)
- model.to(device)
- processor = builders.get_processor(cfg=cfg.processor, sample_rate=sample_rate)
- processor_dict = pkg[i]['processor_state']
- processor.load_state_dict(processor_dict)
- processor.to(device)
- models.append(model)
- processors.append(processor)
- cfgs.append(cfg)
- return models, processors, cfgs
diff --git a/spaces/AIDHD/audio-video-transcriber/README.md b/spaces/AIDHD/audio-video-transcriber/README.md
deleted file mode 100644
index ed13bd459052e3b6cc1f73a1e1d96b6225771b9d..0000000000000000000000000000000000000000
--- a/spaces/AIDHD/audio-video-transcriber/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Audio Video Transcriber
-emoji: 🔥
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 3.14.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_bias_act.cpp b/spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_bias_act.cpp
deleted file mode 100644
index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_bias_act.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-#include
-
-
-torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
- int act, int grad, float alpha, float scale);
-
-#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
-
-torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
- int act, int grad, float alpha, float scale) {
- CHECK_CUDA(input);
- CHECK_CUDA(bias);
-
- return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
-}
\ No newline at end of file
diff --git a/spaces/AIFILMS/generate_human_motion/VQ-Trans/models/modules.py b/spaces/AIFILMS/generate_human_motion/VQ-Trans/models/modules.py
deleted file mode 100644
index 4f06cd98d4f6029bd3df073095cf50498483d54a..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/VQ-Trans/models/modules.py
+++ /dev/null
@@ -1,109 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn.utils.rnn import pack_padded_sequence
-
-def init_weight(m):
- if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose1d):
- nn.init.xavier_normal_(m.weight)
- # m.bias.data.fill_(0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
-
-class MovementConvEncoder(nn.Module):
- def __init__(self, input_size, hidden_size, output_size):
- super(MovementConvEncoder, self).__init__()
- self.main = nn.Sequential(
- nn.Conv1d(input_size, hidden_size, 4, 2, 1),
- nn.Dropout(0.2, inplace=True),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Conv1d(hidden_size, output_size, 4, 2, 1),
- nn.Dropout(0.2, inplace=True),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.out_net = nn.Linear(output_size, output_size)
- self.main.apply(init_weight)
- self.out_net.apply(init_weight)
-
- def forward(self, inputs):
- inputs = inputs.permute(0, 2, 1)
- outputs = self.main(inputs).permute(0, 2, 1)
- # print(outputs.shape)
- return self.out_net(outputs)
-
-
-
-class TextEncoderBiGRUCo(nn.Module):
- def __init__(self, word_size, pos_size, hidden_size, output_size, device):
- super(TextEncoderBiGRUCo, self).__init__()
- self.device = device
-
- self.pos_emb = nn.Linear(pos_size, word_size)
- self.input_emb = nn.Linear(word_size, hidden_size)
- self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
- self.output_net = nn.Sequential(
- nn.Linear(hidden_size * 2, hidden_size),
- nn.LayerNorm(hidden_size),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Linear(hidden_size, output_size)
- )
-
- self.input_emb.apply(init_weight)
- self.pos_emb.apply(init_weight)
- self.output_net.apply(init_weight)
- self.hidden_size = hidden_size
- self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
-
- # input(batch_size, seq_len, dim)
- def forward(self, word_embs, pos_onehot, cap_lens):
- num_samples = word_embs.shape[0]
-
- pos_embs = self.pos_emb(pos_onehot)
- inputs = word_embs + pos_embs
- input_embs = self.input_emb(inputs)
- hidden = self.hidden.repeat(1, num_samples, 1)
-
- cap_lens = cap_lens.data.tolist()
- emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
-
- gru_seq, gru_last = self.gru(emb, hidden)
-
- gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
-
- return self.output_net(gru_last)
-
-
-class MotionEncoderBiGRUCo(nn.Module):
- def __init__(self, input_size, hidden_size, output_size, device):
- super(MotionEncoderBiGRUCo, self).__init__()
- self.device = device
-
- self.input_emb = nn.Linear(input_size, hidden_size)
- self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
- self.output_net = nn.Sequential(
- nn.Linear(hidden_size*2, hidden_size),
- nn.LayerNorm(hidden_size),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Linear(hidden_size, output_size)
- )
-
- self.input_emb.apply(init_weight)
- self.output_net.apply(init_weight)
- self.hidden_size = hidden_size
- self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
-
- # input(batch_size, seq_len, dim)
- def forward(self, inputs, m_lens):
- num_samples = inputs.shape[0]
-
- input_embs = self.input_emb(inputs)
- hidden = self.hidden.repeat(1, num_samples, 1)
-
- cap_lens = m_lens.data.tolist()
- emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True, enforce_sorted=False)
-
- gru_seq, gru_last = self.gru(emb, hidden)
-
- gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
-
- return self.output_net(gru_last)
diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py
deleted file mode 100644
index e805d7e34921bee436e1e7fd9e1f753c7609186b..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""RAdam optimizer.
-
-This code is drived from https://github.com/LiyuanLucasLiu/RAdam.
-"""
-
-import math
-import torch
-
-from torch.optim.optimizer import Optimizer
-
-
-class RAdam(Optimizer):
- """Rectified Adam optimizer."""
-
- def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
- """Initilize RAdam optimizer."""
- defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
- self.buffer = [[None, None, None] for ind in range(10)]
- super(RAdam, self).__init__(params, defaults)
-
- def __setstate__(self, state):
- """Set state."""
- super(RAdam, self).__setstate__(state)
-
- def step(self, closure=None):
- """Run one step."""
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
-
- for p in group['params']:
- if p.grad is None:
- continue
- grad = p.grad.data.float()
- if grad.is_sparse:
- raise RuntimeError('RAdam does not support sparse gradients')
-
- p_data_fp32 = p.data.float()
-
- state = self.state[p]
-
- if len(state) == 0:
- state['step'] = 0
- state['exp_avg'] = torch.zeros_like(p_data_fp32)
- state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
- else:
- state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
- state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
-
- exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
- beta1, beta2 = group['betas']
-
- exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
- exp_avg.mul_(beta1).add_(1 - beta1, grad)
-
- state['step'] += 1
- buffered = self.buffer[int(state['step'] % 10)]
- if state['step'] == buffered[0]:
- N_sma, step_size = buffered[1], buffered[2]
- else:
- buffered[0] = state['step']
- beta2_t = beta2 ** state['step']
- N_sma_max = 2 / (1 - beta2) - 1
- N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
- buffered[1] = N_sma
-
- # more conservative since it's an approximated value
- if N_sma >= 5:
- step_size = math.sqrt(
- (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA
- else:
- step_size = 1.0 / (1 - beta1 ** state['step'])
- buffered[2] = step_size
-
- if group['weight_decay'] != 0:
- p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
-
- # more conservative since it's an approximated value
- if N_sma >= 5:
- denom = exp_avg_sq.sqrt().add_(group['eps'])
- p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
- else:
- p_data_fp32.add_(-step_size * group['lr'], exp_avg)
-
- p.data.copy_(p_data_fp32)
-
- return loss
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/linear_probe.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/linear_probe.py
deleted file mode 100644
index bb2841dd4e28201db8b5bd4a215e1b8b9a60d25a..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/linear_probe.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import numpy as np
-import torch.nn.functional as F
-from torch import nn
-from .model import MLPLayers
-
-
-class LinearProbe(nn.Module):
- def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None):
- """
- Args:
- model: nn.Module
- mlp: bool, if True, then use the MLP layer as the linear probe module
- freeze: bool, if Ture, then freeze all the CLAP model's layers when training the linear probe
- in_ch: int, the output channel from CLAP model
- out_ch: int, the output channel from linear probe (class_num)
- act: torch.nn.functional, the activation function before the loss function
- """
- super().__init__()
- in_ch = 512
- self.clap_model = model
- self.clap_model.text_branch = None # to save memory
- self.freeze = freeze
- if mlp:
- self.lp_layer = MLPLayers(units=[in_ch, in_ch * 2, out_ch])
- else:
- self.lp_layer = nn.Linear(in_ch, out_ch)
-
- if self.freeze:
- for param in self.clap_model.parameters():
- param.requires_grad = False
-
- if act == 'None':
- self.act = None
- elif act == 'relu':
- self.act = nn.ReLU()
- elif act == 'elu':
- self.act = nn.ELU()
- elif act == 'prelu':
- self.act = nn.PReLU(num_parameters=in_ch)
- elif act == 'softmax':
- self.act = nn.Softmax(dim=-1)
- elif act == 'sigmoid':
- self.act = nn.Sigmoid()
-
- def forward(self, x, mix_lambda=None, device=None):
- """
- Args:
- x: waveform, torch.tensor [batch, t_samples] / batch of mel_spec and longer list
- mix_lambda: torch.tensor [batch], the mixup lambda
- Returns:
- class_prob: torch.tensor [batch, class_num]
-
- """
- # batchnorm cancel grandient
- if self.freeze:
- self.clap_model.eval()
-
- x = self.clap_model.audio_projection(
- self.clap_model.audio_branch(x, mixup_lambda=mix_lambda, device=device)["embedding"])
- out = self.lp_layer(x)
- if self.act is not None:
- out = self.act(out)
- return out
diff --git a/spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetB.py b/spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetB.py
deleted file mode 100644
index 3497b86dbc7f503622a61ae60762055a52decd93..0000000000000000000000000000000000000000
--- a/spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetB.py
+++ /dev/null
@@ -1,307 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch import Tensor
-
-__all__ = [
- "ResidualDenseBlock", "ResidualResidualDenseBlock", "Generator",
- "DownSamplingNetwork"
-]
-
-
-class ResidualDenseBlock(nn.Module):
- """Achieves densely connected convolutional layers.
- `Densely Connected Convolutional Networks" ` paper.
-
- Args:
- channels (int): The number of channels in the input image.
- growths (int): The number of channels that increase in each layer of convolution.
- """
-
- def __init__(self, channels: int, growths: int) -> None:
- super(ResidualDenseBlock, self).__init__()
- self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
- self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
- self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
- self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
- self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
-
- self.leaky_relu = nn.LeakyReLU(0.2, True)
- self.identity = nn.Identity()
-
- def forward(self, x: Tensor) -> Tensor:
- identity = x
-
- out1 = self.leaky_relu(self.conv1(x))
- out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
- out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
- out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
- out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
- out = out5 * 0.2 + identity
-
- return out
-
-
-
-class ResidualDenseBlock(nn.Module):
- """Achieves densely connected convolutional layers.
- `Densely Connected Convolutional Networks" ` paper.
-
- Args:
- channels (int): The number of channels in the input image.
- growths (int): The number of channels that increase in each layer of convolution.
- """
-
- def __init__(self, channels: int, growths: int) -> None:
- super(ResidualDenseBlock, self).__init__()
- self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
- self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
- self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
- self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
- self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
-
- self.leaky_relu = nn.LeakyReLU(0.2, True)
- self.identity = nn.Identity()
-
- def forward(self, x: Tensor) -> Tensor:
- identity = x
-
- out1 = self.leaky_relu(self.conv1(x))
- out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
- out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
- out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
- out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
- out = out5 * 0.2 + identity
-
- return out
-
-
-
-class MiniResidualDenseBlock(nn.Module):
- """Achieves densely connected convolutional layers.
- `Densely Connected Convolutional Networks" ` paper.
-
- Args:
- channels (int): The number of channels in the input image.
- growths (int): The number of channels that increase in each layer of convolution.
- """
-
- def __init__(self, channels: int, growths: int) -> None:
- super(MiniResidualDenseBlock, self).__init__()
- self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
- self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
- self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
- self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
- self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
-
- self.leaky_relu = nn.LeakyReLU(0.2, True)
-
- def forward(self, x: Tensor) -> Tensor:
- identity = x
-
- out1 = self.leaky_relu(self.conv1(x))
- out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
- out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
- out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
- out5 = self.leaky_relu(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
- out = out5 * 0.2 + identity
-
- return out
-
-
-
-class ResidualResidualDenseBlock(nn.Module):
- """Multi-layer residual dense convolution block.
-
- Args:
- channels (int): The number of channels in the input image.
- growths (int): The number of channels that increase in each layer of convolution.
- """
-
- def __init__(self, channels: int, growths: int) -> None:
- super(ResidualResidualDenseBlock, self).__init__()
- self.rdb1 = ResidualDenseBlock(channels, growths)
- self.rdb2 = ResidualDenseBlock(channels, growths)
- self.rdb3 = ResidualDenseBlock(channels, growths)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- identity = x
-
- out = self.rdb1(x)
- out = self.rdb2(out)
- out = self.rdb3(out)
- out = out * 0.2 + identity
-
- return out
-
-
-class MiniResidualResidualDenseBlock(nn.Module):
- """Multi-layer residual dense convolution block.
-
- Args:
- channels (int): The number of channels in the input image.
- growths (int): The number of channels that increase in each layer of convolution.
- """
-
- def __init__(self, channels: int, growths: int) -> None:
- super(MiniResidualResidualDenseBlock, self).__init__()
- self.M_rdb1 = MiniResidualDenseBlock(channels, growths)
- self.M_rdb2 = MiniResidualDenseBlock(channels, growths)
- self.M_rdb3 = MiniResidualDenseBlock(channels, growths)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- identity = x
- out = self.M_rdb1(x)
- out = self.M_rdb2(out)
- out = self.M_rdb3(out)
- out = out * 0.2 + identity
- return out
-
-
-class Generator(nn.Module):
- def __init__(self) -> None:
- super(Generator, self).__init__()
-
- #RLNet
- self.RLNetconv_block1 = nn.Conv2d(1, 64, (3, 3), (1, 1), (1, 1))
- RLNettrunk = []
- for _ in range(4):
- RLNettrunk += [ResidualResidualDenseBlock(64, 32)]
- self.RLNettrunk = nn.Sequential(*RLNettrunk)
- self.RLNetconv_block2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
- self.RLNetconv_block3 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True)
- )
- self.RLNetconv_block4 = nn.Sequential(
- nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
- nn.Tanh()
- )
-
- #############################################################################
- # Generator
- self.conv_block1 = nn.Conv2d(1, 64, (3, 3), (1, 1), (1, 1))
- trunk = []
- for _ in range(16):
- trunk += [ResidualResidualDenseBlock(64, 32)]
- self.trunk = nn.Sequential(*trunk)
-
- # After the feature extraction network, reconnect a layer of convolutional blocks.
- self.conv_block2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
-
-
- # Upsampling convolutional layer.
- self.upsampling = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True)
- )
-
- # Reconnect a layer of convolution block after upsampling.
- self.conv_block3 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True)
- )
-
- self.conv_block4 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- #nn.Sigmoid()
- )
-
- self.conv_block0_branch0 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(128, 64, (3, 3), (1, 1), (1, 1)),
- nn.Tanh()
- )
-
- self.conv_block0_branch1 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(128, 64, (3, 3), (1, 1), (1, 1)),
- nn.Tanh()
- )
-
- self.conv_block1_branch0 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
- #nn.LeakyReLU(0.2, True),
- #nn.Conv2d(32, 1, (3, 3), (1, 1), (1, 1)),
- nn.Sigmoid()
- )
-
-
-
- self.conv_block1_branch1 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
- nn.Sigmoid())
-
-
-
-
- def _forward_impl(self, x: Tensor) -> Tensor:
- #RLNet
- out1 = self.RLNetconv_block1(x)
- out = self.RLNettrunk(out1)
- out2 = self.RLNetconv_block2(out)
- out = out1 + out2
- out = self.RLNetconv_block3(out)
- out = self.RLNetconv_block4(out)
- rlNet_out = out + x
-
- #Generator
- out1 = self.conv_block1(rlNet_out)
- out = self.trunk(out1)
- out2 = self.conv_block2(out)
- out = out1 + out2
- out = self.upsampling(F.interpolate(out, scale_factor=2, mode="bicubic"))
- out = self.upsampling(F.interpolate(out, scale_factor=2, mode="bicubic"))
- out = self.conv_block3(out)
- #
- out = self.conv_block4(out)
-
- #demResidual = out[:, 1:2, :, :]
- #grayResidual = out[:, 0:1, :, :]
-
- # out = self.trunkRGB(out_4)
- #
- # out_dem = out[:, 3:4, :, :] * 0.2 + demResidual # DEM images extracted
- # out_rgb = out[:, 0:3, :, :] * 0.2 + rgbResidual # RGB images extracted
-
- #ra0
- #out_rgb= rgbResidual + self.conv_block0_branch0(rgbResidual)
-
- out_dem = out + self.conv_block0_branch1(out) #out+ tanh()
- out_gray = out + self.conv_block0_branch0(out) #out+ tanh()
-
- out_gray = self.conv_block1_branch0(out_gray) #sigmoid()
- out_dem = self.conv_block1_branch1(out_dem) #sigmoid()
-
- return out_gray, out_dem, rlNet_out
-
-
- def forward(self, x: Tensor) -> Tensor:
- return self._forward_impl(x)
-
- def _initialize_weights(self) -> None:
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(m.weight)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
- m.weight.data *= 0.1
- elif isinstance(m, nn.BatchNorm2d):
- nn.init.constant_(m.weight, 1)
- m.weight.data *= 0.1
-
-
diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/core_vq.py b/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/core_vq.py
deleted file mode 100644
index e1896bb1788a945a1f7be6369abb255ecf72c7a0..0000000000000000000000000000000000000000
--- a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/core_vq.py
+++ /dev/null
@@ -1,400 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import typing as tp
-
-from einops import rearrange, repeat
-import flashy
-import torch
-from torch import nn, einsum
-import torch.nn.functional as F
-
-
-def exists(val: tp.Optional[tp.Any]) -> bool:
- return val is not None
-
-
-def default(val: tp.Any, d: tp.Any) -> tp.Any:
- return val if exists(val) else d
-
-
-def l2norm(t):
- return F.normalize(t, p=2, dim=-1)
-
-
-def ema_inplace(moving_avg, new, decay: float):
- moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
-
-
-def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
- return (x + epsilon) / (x.sum() + n_categories * epsilon)
-
-
-def uniform_init(*shape: int):
- t = torch.empty(shape)
- nn.init.kaiming_uniform_(t)
- return t
-
-
-def sample_vectors(samples, num: int):
- num_samples, device = samples.shape[0], samples.device
-
- if num_samples >= num:
- indices = torch.randperm(num_samples, device=device)[:num]
- else:
- indices = torch.randint(0, num_samples, (num,), device=device)
-
- return samples[indices]
-
-
-def kmeans(samples, num_clusters: int, num_iters: int = 10):
- dim, dtype = samples.shape[-1], samples.dtype
-
- means = sample_vectors(samples, num_clusters)
-
- for _ in range(num_iters):
- diffs = rearrange(samples, "n d -> n () d") - rearrange(
- means, "c d -> () c d"
- )
- dists = -(diffs ** 2).sum(dim=-1)
-
- buckets = dists.max(dim=-1).indices
- bins = torch.bincount(buckets, minlength=num_clusters)
- zero_mask = bins == 0
- bins_min_clamped = bins.masked_fill(zero_mask, 1)
-
- new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
- new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
- new_means = new_means / bins_min_clamped[..., None]
-
- means = torch.where(zero_mask[..., None], means, new_means)
-
- return means, bins
-
-
-def orthgonal_loss_fn(t):
- # eq (2) from https://arxiv.org/abs/2112.00384
- n = t.shape[0]
- normed_codes = l2norm(t)
- identity = torch.eye(n, device=t.device)
- cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes)
- return ((cosine_sim - identity) ** 2).sum() / (n ** 2)
-
-
-class EuclideanCodebook(nn.Module):
- """Codebook with Euclidean distance.
-
- Args:
- dim (int): Dimension.
- codebook_size (int): Codebook size.
- kmeans_init (bool): Whether to use k-means to initialize the codebooks.
- If set to true, run the k-means algorithm on the first training batch and use
- the learned centroids as initialization.
- kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
- decay (float): Decay for exponential moving average over the codebooks.
- epsilon (float): Epsilon value for numerical stability.
- threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
- that have an exponential moving average cluster size less than the specified threshold with
- randomly selected vector from the current batch.
- """
- def __init__(
- self,
- dim: int,
- codebook_size: int,
- kmeans_init: int = False,
- kmeans_iters: int = 10,
- decay: float = 0.8,
- epsilon: float = 1e-5,
- threshold_ema_dead_code: int = 2,
- ):
- super().__init__()
- self.decay = decay
- init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
- embed = init_fn(codebook_size, dim)
-
- self.codebook_size = codebook_size
-
- self.kmeans_iters = kmeans_iters
- self.epsilon = epsilon
- self.threshold_ema_dead_code = threshold_ema_dead_code
-
- self.register_buffer("inited", torch.Tensor([not kmeans_init]))
- self.register_buffer("cluster_size", torch.zeros(codebook_size))
- self.register_buffer("embed", embed)
- self.register_buffer("embed_avg", embed.clone())
-
- @torch.jit.ignore
- def init_embed_(self, data):
- if self.inited:
- return
-
- embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
- self.embed.data.copy_(embed)
- self.embed_avg.data.copy_(embed.clone())
- self.cluster_size.data.copy_(cluster_size)
- self.inited.data.copy_(torch.Tensor([True]))
- # Make sure all buffers across workers are in sync after initialization
- flashy.distrib.broadcast_tensors(self.buffers())
-
- def replace_(self, samples, mask):
- modified_codebook = torch.where(
- mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
- )
- self.embed.data.copy_(modified_codebook)
-
- def expire_codes_(self, batch_samples):
- if self.threshold_ema_dead_code == 0:
- return
-
- expired_codes = self.cluster_size < self.threshold_ema_dead_code
- if not torch.any(expired_codes):
- return
-
- batch_samples = rearrange(batch_samples, "... d -> (...) d")
- self.replace_(batch_samples, mask=expired_codes)
- flashy.distrib.broadcast_tensors(self.buffers())
-
- def preprocess(self, x):
- x = rearrange(x, "... d -> (...) d")
- return x
-
- def quantize(self, x):
- embed = self.embed.t()
- dist = -(
- x.pow(2).sum(1, keepdim=True)
- - 2 * x @ embed
- + embed.pow(2).sum(0, keepdim=True)
- )
- embed_ind = dist.max(dim=-1).indices
- return embed_ind
-
- def postprocess_emb(self, embed_ind, shape):
- return embed_ind.view(*shape[:-1])
-
- def dequantize(self, embed_ind):
- quantize = F.embedding(embed_ind, self.embed)
- return quantize
-
- def encode(self, x):
- shape = x.shape
- # pre-process
- x = self.preprocess(x)
- # quantize
- embed_ind = self.quantize(x)
- # post-process
- embed_ind = self.postprocess_emb(embed_ind, shape)
- return embed_ind
-
- def decode(self, embed_ind):
- quantize = self.dequantize(embed_ind)
- return quantize
-
- def forward(self, x):
- shape, dtype = x.shape, x.dtype
- x = self.preprocess(x)
- self.init_embed_(x)
-
- embed_ind = self.quantize(x)
- embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
- embed_ind = self.postprocess_emb(embed_ind, shape)
- quantize = self.dequantize(embed_ind)
-
- if self.training:
- # We do the expiry of code at that point as buffers are in sync
- # and all the workers will take the same decision.
- self.expire_codes_(x)
- ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
- embed_sum = x.t() @ embed_onehot
- ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
- cluster_size = (
- laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
- * self.cluster_size.sum()
- )
- embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
- self.embed.data.copy_(embed_normalized)
-
- return quantize, embed_ind
-
-
-class VectorQuantization(nn.Module):
- """Vector quantization implementation.
- Currently supports only euclidean distance.
-
- Args:
- dim (int): Dimension
- codebook_size (int): Codebook size
- codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
- decay (float): Decay for exponential moving average over the codebooks.
- epsilon (float): Epsilon value for numerical stability.
- kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
- kmeans_iters (int): Number of iterations used for kmeans initialization.
- threshold_ema_dead_code (int):
- channels_last (bool): Channels are the last dimension in the input tensors.
- commitment_weight (float): Weight for commitment loss.
- orthogonal_reg_weight (float): Orthogonal regularization weights.
- orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
- orthogonal_reg_max_codes (optional int): Maximum number of codes to consider
- for orthogonal regulariation.
- threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
- that have an exponential moving average cluster size less than the specified threshold with
- randomly selected vector from the current batch.
- """
- def __init__(
- self,
- dim: int,
- codebook_size: int,
- codebook_dim: tp.Optional[int] = None,
- decay: float = 0.8,
- epsilon: float = 1e-5,
- kmeans_init: bool = False,
- kmeans_iters: int = 10,
- threshold_ema_dead_code: int = 2,
- channels_last: bool = False,
- commitment_weight: float = 1.,
- orthogonal_reg_weight: float = 0.0,
- orthogonal_reg_active_codes_only: bool = False,
- orthogonal_reg_max_codes: tp.Optional[int] = None,
- ):
- super().__init__()
- _codebook_dim: int = default(codebook_dim, dim)
-
- requires_projection = _codebook_dim != dim
- self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity())
- self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity())
-
- self.epsilon = epsilon
- self.commitment_weight = commitment_weight
-
- self.orthogonal_reg_weight = orthogonal_reg_weight
- self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
- self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
-
- self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size,
- kmeans_init=kmeans_init, kmeans_iters=kmeans_iters,
- decay=decay, epsilon=epsilon,
- threshold_ema_dead_code=threshold_ema_dead_code)
- self.codebook_size = codebook_size
-
- self.channels_last = channels_last
-
- @property
- def codebook(self):
- return self._codebook.embed
-
- @property
- def inited(self):
- return self._codebook.inited
-
- def _preprocess(self, x):
- if not self.channels_last:
- x = rearrange(x, "b d n -> b n d")
- return x
-
- def _postprocess(self, quantize):
- if not self.channels_last:
- quantize = rearrange(quantize, "b n d -> b d n")
- return quantize
-
- def encode(self, x):
- x = self._preprocess(x)
- x = self.project_in(x)
- embed_in = self._codebook.encode(x)
- return embed_in
-
- def decode(self, embed_ind):
- quantize = self._codebook.decode(embed_ind)
- quantize = self.project_out(quantize)
- quantize = self._postprocess(quantize)
- return quantize
-
- def forward(self, x):
- device = x.device
- x = self._preprocess(x)
-
- x = self.project_in(x)
- quantize, embed_ind = self._codebook(x)
-
- if self.training:
- quantize = x + (quantize - x).detach()
-
- loss = torch.tensor([0.0], device=device, requires_grad=self.training)
-
- if self.training:
- if self.commitment_weight > 0:
- commit_loss = F.mse_loss(quantize.detach(), x)
- loss = loss + commit_loss * self.commitment_weight
-
- if self.orthogonal_reg_weight > 0:
- codebook = self.codebook
-
- if self.orthogonal_reg_active_codes_only:
- # only calculate orthogonal loss for the activated codes for this batch
- unique_code_ids = torch.unique(embed_ind)
- codebook = codebook[unique_code_ids]
-
- num_codes = codebook.shape[0]
- if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
- rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes]
- codebook = codebook[rand_ids]
-
- orthogonal_reg_loss = orthgonal_loss_fn(codebook)
- loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
-
- quantize = self.project_out(quantize)
- quantize = self._postprocess(quantize)
-
- return quantize, embed_ind, loss
-
-
-class ResidualVectorQuantization(nn.Module):
- """Residual vector quantization implementation.
-
- Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
- """
- def __init__(self, *, num_quantizers, **kwargs):
- super().__init__()
- self.layers = nn.ModuleList(
- [VectorQuantization(**kwargs) for _ in range(num_quantizers)]
- )
-
- def forward(self, x, n_q: tp.Optional[int] = None):
- quantized_out = 0.0
- residual = x
-
- all_losses = []
- all_indices = []
-
- n_q = n_q or len(self.layers)
-
- for i, layer in enumerate(self.layers[:n_q]):
- quantized, indices, loss = layer(residual)
- residual = residual - quantized
- quantized_out = quantized_out + quantized
- all_indices.append(indices)
- all_losses.append(loss)
-
- out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
- return quantized_out, out_indices, out_losses
-
- def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor:
- residual = x
- all_indices = []
- n_q = n_q or len(self.layers)
- for layer in self.layers[:n_q]:
- indices = layer.encode(residual)
- quantized = layer.decode(indices)
- residual = residual - quantized
- all_indices.append(indices)
- out_indices = torch.stack(all_indices)
- return out_indices
-
- def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
- quantized_out = torch.tensor(0.0, device=q_indices.device)
- for i, indices in enumerate(q_indices):
- layer = self.layers[i]
- quantized = layer.decode(indices)
- quantized_out = quantized_out + quantized
- return quantized_out
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.d.ts
deleted file mode 100644
index 7ed6b4e2f6280bf9a8e638204180a8764ac9dbf7..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.d.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-import InTouching from '../../../plugins/intouching'
-export default InTouching;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenSizers.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenSizers.js
deleted file mode 100644
index 56e20e30c3bf0afaaa2918512b920bb4472ae777..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenSizers.js
+++ /dev/null
@@ -1,10 +0,0 @@
-var GetChildrenSizers = function(out) {
- if (out === undefined) {
- out = [];
- }
- if (this.child && this.child.isRexSizer) {
- out.push(this.child);
- }
- return out;
-}
-export default GetChildrenSizers;
\ No newline at end of file
diff --git a/spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.cpp b/spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.cpp
deleted file mode 100644
index 44fa337d8d4c34dfa010a59cd27d86857db671aa..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-#include
-#include
-#include "upfirdn2d.h"
-
-//------------------------------------------------------------------------
-
-static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain)
-{
- // Validate arguments.
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
- TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x");
- TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32");
- TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
- TORCH_CHECK(f.numel() <= INT_MAX, "f is too large");
- TORCH_CHECK(x.numel() > 0, "x has zero size");
- TORCH_CHECK(f.numel() > 0, "f has zero size");
- TORCH_CHECK(x.dim() == 4, "x must be rank 4");
- TORCH_CHECK(f.dim() == 2, "f must be rank 2");
- TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large");
- TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1");
- TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1");
- TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1");
-
- // Create output tensor.
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
- int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx;
- int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy;
- TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1");
- torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format());
- TORCH_CHECK(y.numel() <= INT_MAX, "output is too large");
- TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large");
-
- // Initialize CUDA kernel parameters.
- upfirdn2d_kernel_params p;
- p.x = x.data_ptr();
- p.f = f.data_ptr();
- p.y = y.data_ptr();
- p.up = make_int2(upx, upy);
- p.down = make_int2(downx, downy);
- p.pad0 = make_int2(padx0, pady0);
- p.flip = (flip) ? 1 : 0;
- p.gain = gain;
- p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
- p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0));
- p.filterSize = make_int2((int)f.size(1), (int)f.size(0));
- p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0));
- p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
- p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0));
- p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z;
- p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1;
-
- // Choose CUDA kernel.
- upfirdn2d_kernel_spec spec;
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
- {
- spec = choose_upfirdn2d_kernel(p);
- });
-
- // Set looping options.
- p.loopMajor = (p.sizeMajor - 1) / 16384 + 1;
- p.loopMinor = spec.loopMinor;
- p.loopX = spec.loopX;
- p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1;
- p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1;
-
- // Compute grid size.
- dim3 blockSize, gridSize;
- if (spec.tileOutW < 0) // large
- {
- blockSize = dim3(4, 32, 1);
- gridSize = dim3(
- ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor,
- (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1,
- p.launchMajor);
- }
- else // small
- {
- blockSize = dim3(256, 1, 1);
- gridSize = dim3(
- ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor,
- (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1,
- p.launchMajor);
- }
-
- // Launch CUDA kernel.
- void* args[] = {&p};
- AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
- return y;
-}
-
-//------------------------------------------------------------------------
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
-{
- m.def("upfirdn2d", &upfirdn2d);
-}
-
-//------------------------------------------------------------------------
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py
deleted file mode 100644
index f1f2e2d607db40727ac3665833a366c89c7a6bb6..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py
+++ /dev/null
@@ -1,725 +0,0 @@
-# Copyright 2023 MultiDiffusion Authors and The HuggingFace Team. All rights reserved."
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-import inspect
-import warnings
-from typing import Any, Callable, Dict, List, Optional, Union
-
-import torch
-from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
-
-from ...image_processor import VaeImageProcessor
-from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
-from ...models import AutoencoderKL, UNet2DConditionModel
-from ...schedulers import DDIMScheduler
-from ...utils import logging, randn_tensor, replace_example_docstring
-from ..pipeline_utils import DiffusionPipeline
-from . import StableDiffusionPipelineOutput
-from .safety_checker import StableDiffusionSafetyChecker
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> import torch
- >>> from diffusers import StableDiffusionPanoramaPipeline, DDIMScheduler
-
- >>> model_ckpt = "stabilityai/stable-diffusion-2-base"
- >>> scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
- >>> pipe = StableDiffusionPanoramaPipeline.from_pretrained(
- ... model_ckpt, scheduler=scheduler, torch_dtype=torch.float16
- ... )
-
- >>> pipe = pipe.to("cuda")
-
- >>> prompt = "a photo of the dolomites"
- >>> image = pipe(prompt).images[0]
- ```
-"""
-
-
-class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
- r"""
- Pipeline for text-to-image generation using MultiDiffusion.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
- text_encoder ([`~transformers.CLIPTextModel`]):
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
- tokenizer ([`~transformers.CLIPTokenizer`]):
- A `CLIPTokenizer` to tokenize text.
- unet ([`UNet2DConditionModel`]):
- A `UNet2DConditionModel` to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
- about a model's potential harms.
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: DDIMScheduler,
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPImageProcessor,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
- self.register_to_config(requires_safety_checker=requires_safety_checker)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
- def enable_vae_slicing(self):
- r"""
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
- """
- self.vae.enable_slicing()
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
- def disable_vae_slicing(self):
- r"""
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
- computing decoding in one step.
- """
- self.vae.disable_slicing()
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt=None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- lora_scale: Optional[float] = None,
- ):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
- less than `1`).
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- lora_scale (`float`, *optional*):
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
- """
- # set lora scale so that monkey patched LoRA
- # function of text encoder can correctly access it
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
- self._lora_scale = lora_scale
-
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- if prompt_embeds is None:
- # textual inversion: procecss multi-vector tokens if necessary
- if isinstance(self, TextualInversionLoaderMixin):
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- prompt_embeds = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- prompt_embeds = prompt_embeds[0]
-
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- bs_embed, seq_len, _ = prompt_embeds.shape
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance and negative_prompt_embeds is None:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif prompt is not None and type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- # textual inversion: procecss multi-vector tokens if necessary
- if isinstance(self, TextualInversionLoaderMixin):
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
-
- max_length = prompt_embeds.shape[1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- negative_prompt_embeds = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- negative_prompt_embeds = negative_prompt_embeds[0]
-
- if do_classifier_free_guidance:
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = negative_prompt_embeds.shape[1]
-
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
-
- return prompt_embeds
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is None:
- has_nsfw_concept = None
- else:
- if torch.is_tensor(image):
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
- else:
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- return image, has_nsfw_concept
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
- def decode_latents(self, latents):
- warnings.warn(
- "The decode_latents method is deprecated and will be removed in a future version. Please"
- " use VaeImageProcessor instead",
- FutureWarning,
- )
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents, return_dict=False)[0]
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- def decode_latents_with_padding(self, latents, padding=8):
- # Add padding to latents for circular inference
- # padding is the number of latents to add on each side
- # it would slightly increase the memory usage, but remove the boundary artifacts
- latents = 1 / self.vae.config.scaling_factor * latents
- latents_left = latents[..., :padding]
- latents_right = latents[..., -padding:]
- latents = torch.cat((latents_right, latents, latents_left), axis=-1)
- image = self.vae.decode(latents, return_dict=False)[0]
- padding_pix = self.vae_scale_factor * padding
- image = image[..., padding_pix:-padding_pix]
- return image
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
- def check_inputs(
- self,
- prompt,
- height,
- width,
- callback_steps,
- negative_prompt=None,
- prompt_embeds=None,
- negative_prompt_embeds=None,
- ):
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- if prompt is not None and prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
- " only forward one of the two."
- )
- elif prompt is None and prompt_embeds is None:
- raise ValueError(
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
- )
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if negative_prompt is not None and negative_prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
- )
-
- if prompt_embeds is not None and negative_prompt_embeds is not None:
- if prompt_embeds.shape != negative_prompt_embeds.shape:
- raise ValueError(
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
- f" {negative_prompt_embeds.shape}."
- )
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- latents = latents.to(device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- def get_views(self, panorama_height, panorama_width, window_size=64, stride=8, circular_padding=False):
- # Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
- # if panorama's height/width < window_size, num_blocks of height/width should return 1
- panorama_height /= 8
- panorama_width /= 8
- num_blocks_height = (panorama_height - window_size) // stride + 1 if panorama_height > window_size else 1
- if circular_padding:
- num_blocks_width = panorama_width // stride if panorama_width > window_size else 1
- else:
- num_blocks_width = (panorama_width - window_size) // stride + 1 if panorama_width > window_size else 1
- total_num_blocks = int(num_blocks_height * num_blocks_width)
- views = []
- for i in range(total_num_blocks):
- h_start = int((i // num_blocks_width) * stride)
- h_end = h_start + window_size
- w_start = int((i % num_blocks_width) * stride)
- w_end = w_start + window_size
- views.append((h_start, h_end, w_start, w_end))
- return views
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- prompt: Union[str, List[str]] = None,
- height: Optional[int] = 512,
- width: Optional[int] = 2048,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- view_batch_size: int = 1,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: Optional[int] = 1,
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- circular_padding: bool = False,
- ):
- r"""
- The call function to the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
- height (`int`, *optional*, defaults to 512):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to 2048):
- The width in pixels of the generated image. The width is kept high because the pipeline is supposed
- generate panorama-like images.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- A higher guidance scale value encourages the model to generate images closely linked to the text
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
- view_batch_size (`int`, *optional*, defaults to 1):
- The batch size to denoise split views. For some GPUs with high performance, higher view batch size can
- speedup the generation and increase the VRAM usage.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
- generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor is generated by sampling using the supplied random `generator`.
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
- provided, text embeddings are generated from the `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that calls every `callback_steps` steps during inference. The function is called with the
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function is called. If not specified, the callback is called at
- every step.
- cross_attention_kwargs (`dict`, *optional*):
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
- `self.processor` in
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
- circular_padding (`bool`, *optional*, defaults to `False`):
- If set to `True`, circular padding is applied to ensure there are no stitching artifacts. Circular
- padding allows the model to seamlessly generate a transition from the rightmost part of the image to
- the leftmost part, maintaining consistency in a 360-degree sense.
-
- Examples:
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
- second element is a list of `bool`s indicating whether the corresponding generated image contains
- "not-safe-for-work" (nsfw) content.
- """
- # 0. Default height and width to unet
- height = height or self.unet.config.sample_size * self.vae_scale_factor
- width = width or self.unet.config.sample_size * self.vae_scale_factor
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
- )
-
- # 2. Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- text_encoder_lora_scale = (
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
- )
- prompt_embeds = self._encode_prompt(
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- lora_scale=text_encoder_lora_scale,
- )
-
- # 4. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # 5. Prepare latent variables
- num_channels_latents = self.unet.config.in_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- prompt_embeds.dtype,
- device,
- generator,
- latents,
- )
-
- # 6. Define panorama grid and initialize views for synthesis.
- # prepare batch grid
- views = self.get_views(height, width, circular_padding=circular_padding)
- views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
- views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)] * len(views_batch)
- count = torch.zeros_like(latents)
- value = torch.zeros_like(latents)
-
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 8. Denoising loop
- # Each denoising step also includes refinement of the latents with respect to the
- # views.
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- count.zero_()
- value.zero_()
-
- # generate views
- # Here, we iterate through different spatial crops of the latents and denoise them. These
- # denoised (latent) crops are then averaged to produce the final latent
- # for the current timestep via MultiDiffusion. Please see Sec. 4.1 in the
- # MultiDiffusion paper for more details: https://arxiv.org/abs/2302.08113
- # Batch views denoise
- for j, batch_view in enumerate(views_batch):
- vb_size = len(batch_view)
- # get the latents corresponding to the current view coordinates
- if circular_padding:
- latents_for_view = []
- for h_start, h_end, w_start, w_end in batch_view:
- if w_end > latents.shape[3]:
- # Add circular horizontal padding
- latent_view = torch.cat(
- (
- latents[:, :, h_start:h_end, w_start:],
- latents[:, :, h_start:h_end, : w_end - latents.shape[3]],
- ),
- axis=-1,
- )
- else:
- latent_view = latents[:, :, h_start:h_end, w_start:w_end]
- latents_for_view.append(latent_view)
- latents_for_view = torch.cat(latents_for_view)
- else:
- latents_for_view = torch.cat(
- [
- latents[:, :, h_start:h_end, w_start:w_end]
- for h_start, h_end, w_start, w_end in batch_view
- ]
- )
-
- # rematch block's scheduler status
- self.scheduler.__dict__.update(views_scheduler_status[j])
-
- # expand the latents if we are doing classifier free guidance
- latent_model_input = (
- latents_for_view.repeat_interleave(2, dim=0)
- if do_classifier_free_guidance
- else latents_for_view
- )
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # repeat prompt_embeds for batch
- prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
-
- # predict the noise residual
- noise_pred = self.unet(
- latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds_input,
- cross_attention_kwargs=cross_attention_kwargs,
- ).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents_denoised_batch = self.scheduler.step(
- noise_pred, t, latents_for_view, **extra_step_kwargs
- ).prev_sample
-
- # save views scheduler status after sample
- views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__)
-
- # extract value from batch
- for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
- latents_denoised_batch.chunk(vb_size), batch_view
- ):
- if circular_padding and w_end > latents.shape[3]:
- # Case for circular padding
- value[:, :, h_start:h_end, w_start:] += latents_view_denoised[
- :, :, h_start:h_end, : latents.shape[3] - w_start
- ]
- value[:, :, h_start:h_end, : w_end - latents.shape[3]] += latents_view_denoised[
- :, :, h_start:h_end, latents.shape[3] - w_start :
- ]
- count[:, :, h_start:h_end, w_start:] += 1
- count[:, :, h_start:h_end, : w_end - latents.shape[3]] += 1
- else:
- value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
- count[:, :, h_start:h_end, w_start:w_end] += 1
-
- # take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113
- latents = torch.where(count > 0, value / count, value)
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- if not output_type == "latent":
- if circular_padding:
- image = self.decode_latents_with_padding(latents)
- else:
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
- else:
- image = latents
- has_nsfw_concept = None
-
- if has_nsfw_concept is None:
- do_denormalize = [True] * image.shape[0]
- else:
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
-
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
deleted file mode 100644
index 158b702c234f5c10c4f5f03e08e8794ac7b8dcad..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
+++ /dev/null
@@ -1,159 +0,0 @@
-import torch
-
-from .builder import IOU_CALCULATORS
-
-
-@IOU_CALCULATORS.register_module()
-class BboxOverlaps2D(object):
- """2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
-
- def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
- """Calculate IoU between 2D bboxes.
-
- Args:
- bboxes1 (Tensor): bboxes have shape (m, 4) in
- format, or shape (m, 5) in format.
- bboxes2 (Tensor): bboxes have shape (m, 4) in
- format, shape (m, 5) in format, or be
- empty. If ``is_aligned `` is ``True``, then m and n must be
- equal.
- mode (str): "iou" (intersection over union), "iof" (intersection
- over foreground), or "giou" (generalized intersection over
- union).
- is_aligned (bool, optional): If True, then m and n must be equal.
- Default False.
-
- Returns:
- Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
- """
- assert bboxes1.size(-1) in [0, 4, 5]
- assert bboxes2.size(-1) in [0, 4, 5]
- if bboxes2.size(-1) == 5:
- bboxes2 = bboxes2[..., :4]
- if bboxes1.size(-1) == 5:
- bboxes1 = bboxes1[..., :4]
- return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
-
- def __repr__(self):
- """str: a string describing the module"""
- repr_str = self.__class__.__name__ + '()'
- return repr_str
-
-
-def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
- """Calculate overlap between two set of bboxes.
-
- If ``is_aligned `` is ``False``, then calculate the overlaps between each
- bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
- pair of bboxes1 and bboxes2.
-
- Args:
- bboxes1 (Tensor): shape (B, m, 4) in format or empty.
- bboxes2 (Tensor): shape (B, n, 4) in format or empty.
- B indicates the batch dim, in shape (B1, B2, ..., Bn).
- If ``is_aligned `` is ``True``, then m and n must be equal.
- mode (str): "iou" (intersection over union), "iof" (intersection over
- foreground) or "giou" (generalized intersection over union).
- Default "iou".
- is_aligned (bool, optional): If True, then m and n must be equal.
- Default False.
- eps (float, optional): A value added to the denominator for numerical
- stability. Default 1e-6.
-
- Returns:
- Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
-
- Example:
- >>> bboxes1 = torch.FloatTensor([
- >>> [0, 0, 10, 10],
- >>> [10, 10, 20, 20],
- >>> [32, 32, 38, 42],
- >>> ])
- >>> bboxes2 = torch.FloatTensor([
- >>> [0, 0, 10, 20],
- >>> [0, 10, 10, 19],
- >>> [10, 10, 20, 20],
- >>> ])
- >>> overlaps = bbox_overlaps(bboxes1, bboxes2)
- >>> assert overlaps.shape == (3, 3)
- >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
- >>> assert overlaps.shape == (3, )
-
- Example:
- >>> empty = torch.empty(0, 4)
- >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
- >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
- >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
- >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
- """
-
- assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
- # Either the boxes are empty or the length of boxes' last dimension is 4
- assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
- assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
-
- # Batch dim must be the same
- # Batch dim: (B1, B2, ... Bn)
- assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
- batch_shape = bboxes1.shape[:-2]
-
- rows = bboxes1.size(-2)
- cols = bboxes2.size(-2)
- if is_aligned:
- assert rows == cols
-
- if rows * cols == 0:
- if is_aligned:
- return bboxes1.new(batch_shape + (rows, ))
- else:
- return bboxes1.new(batch_shape + (rows, cols))
-
- area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
- bboxes1[..., 3] - bboxes1[..., 1])
- area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
- bboxes2[..., 3] - bboxes2[..., 1])
-
- if is_aligned:
- lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
- rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
-
- wh = (rb - lt).clamp(min=0) # [B, rows, 2]
- overlap = wh[..., 0] * wh[..., 1]
-
- if mode in ['iou', 'giou']:
- union = area1 + area2 - overlap
- else:
- union = area1
- if mode == 'giou':
- enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
- enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
- else:
- lt = torch.max(bboxes1[..., :, None, :2],
- bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
- rb = torch.min(bboxes1[..., :, None, 2:],
- bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
-
- wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
- overlap = wh[..., 0] * wh[..., 1]
-
- if mode in ['iou', 'giou']:
- union = area1[..., None] + area2[..., None, :] - overlap
- else:
- union = area1[..., None]
- if mode == 'giou':
- enclosed_lt = torch.min(bboxes1[..., :, None, :2],
- bboxes2[..., None, :, :2])
- enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
- bboxes2[..., None, :, 2:])
-
- eps = union.new_tensor([eps])
- union = torch.max(union, eps)
- ious = overlap / union
- if mode in ['iou', 'iof']:
- return ious
- # calculate gious
- enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
- enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
- enclose_area = torch.max(enclose_area, eps)
- gious = ious - (enclose_area - union) / enclose_area
- return gious
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/instaboost.py b/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/instaboost.py
deleted file mode 100644
index 38b6819f60587a6e0c0f6d57bfda32bb3a7a4267..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/instaboost.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import numpy as np
-
-from ..builder import PIPELINES
-
-
-@PIPELINES.register_module()
-class InstaBoost(object):
- r"""Data augmentation method in `InstaBoost: Boosting Instance
- Segmentation Via Probability Map Guided Copy-Pasting
- `_.
-
- Refer to https://github.com/GothicAi/Instaboost for implementation details.
- """
-
- def __init__(self,
- action_candidate=('normal', 'horizontal', 'skip'),
- action_prob=(1, 0, 0),
- scale=(0.8, 1.2),
- dx=15,
- dy=15,
- theta=(-1, 1),
- color_prob=0.5,
- hflag=False,
- aug_ratio=0.5):
- try:
- import instaboostfast as instaboost
- except ImportError:
- raise ImportError(
- 'Please run "pip install instaboostfast" '
- 'to install instaboostfast first for instaboost augmentation.')
- self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
- scale, dx, dy, theta,
- color_prob, hflag)
- self.aug_ratio = aug_ratio
-
- def _load_anns(self, results):
- labels = results['ann_info']['labels']
- masks = results['ann_info']['masks']
- bboxes = results['ann_info']['bboxes']
- n = len(labels)
-
- anns = []
- for i in range(n):
- label = labels[i]
- bbox = bboxes[i]
- mask = masks[i]
- x1, y1, x2, y2 = bbox
- # assert (x2 - x1) >= 1 and (y2 - y1) >= 1
- bbox = [x1, y1, x2 - x1, y2 - y1]
- anns.append({
- 'category_id': label,
- 'segmentation': mask,
- 'bbox': bbox
- })
-
- return anns
-
- def _parse_anns(self, results, anns, img):
- gt_bboxes = []
- gt_labels = []
- gt_masks_ann = []
- for ann in anns:
- x1, y1, w, h = ann['bbox']
- # TODO: more essential bug need to be fixed in instaboost
- if w <= 0 or h <= 0:
- continue
- bbox = [x1, y1, x1 + w, y1 + h]
- gt_bboxes.append(bbox)
- gt_labels.append(ann['category_id'])
- gt_masks_ann.append(ann['segmentation'])
- gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
- gt_labels = np.array(gt_labels, dtype=np.int64)
- results['ann_info']['labels'] = gt_labels
- results['ann_info']['bboxes'] = gt_bboxes
- results['ann_info']['masks'] = gt_masks_ann
- results['img'] = img
- return results
-
- def __call__(self, results):
- img = results['img']
- orig_type = img.dtype
- anns = self._load_anns(results)
- if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
- try:
- import instaboostfast as instaboost
- except ImportError:
- raise ImportError('Please run "pip install instaboostfast" '
- 'to install instaboostfast first.')
- anns, img = instaboost.get_new_data(
- anns, img.astype(np.uint8), self.cfg, background=None)
-
- results = self._parse_anns(results, anns, img.astype(orig_type))
- return results
-
- def __repr__(self):
- repr_str = self.__class__.__name__
- repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
- return repr_str
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py
deleted file mode 100644
index a2183fc2db1ff188b0ad5418e55f71005da926cc..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './gcnet_r50-d8_512x512_80k_ade20k.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/colorspace.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/colorspace.py
deleted file mode 100644
index 814533952fdfda23d67cb6a3073692d8c1156add..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/colorspace.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import cv2
-import numpy as np
-
-
-def imconvert(img, src, dst):
- """Convert an image from the src colorspace to dst colorspace.
-
- Args:
- img (ndarray): The input image.
- src (str): The source colorspace, e.g., 'rgb', 'hsv'.
- dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.
-
- Returns:
- ndarray: The converted image.
- """
- code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
- out_img = cv2.cvtColor(img, code)
- return out_img
-
-
-def bgr2gray(img, keepdim=False):
- """Convert a BGR image to grayscale image.
-
- Args:
- img (ndarray): The input image.
- keepdim (bool): If False (by default), then return the grayscale image
- with 2 dims, otherwise 3 dims.
-
- Returns:
- ndarray: The converted grayscale image.
- """
- out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- if keepdim:
- out_img = out_img[..., None]
- return out_img
-
-
-def rgb2gray(img, keepdim=False):
- """Convert a RGB image to grayscale image.
-
- Args:
- img (ndarray): The input image.
- keepdim (bool): If False (by default), then return the grayscale image
- with 2 dims, otherwise 3 dims.
-
- Returns:
- ndarray: The converted grayscale image.
- """
- out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
- if keepdim:
- out_img = out_img[..., None]
- return out_img
-
-
-def gray2bgr(img):
- """Convert a grayscale image to BGR image.
-
- Args:
- img (ndarray): The input image.
-
- Returns:
- ndarray: The converted BGR image.
- """
- img = img[..., None] if img.ndim == 2 else img
- out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
- return out_img
-
-
-def gray2rgb(img):
- """Convert a grayscale image to RGB image.
-
- Args:
- img (ndarray): The input image.
-
- Returns:
- ndarray: The converted RGB image.
- """
- img = img[..., None] if img.ndim == 2 else img
- out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
- return out_img
-
-
-def _convert_input_type_range(img):
- """Convert the type and range of the input image.
-
- It converts the input image to np.float32 type and range of [0, 1].
- It is mainly used for pre-processing the input image in colorspace
- conversion functions such as rgb2ycbcr and ycbcr2rgb.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
-
- Returns:
- (ndarray): The converted image with type of np.float32 and range of
- [0, 1].
- """
- img_type = img.dtype
- img = img.astype(np.float32)
- if img_type == np.float32:
- pass
- elif img_type == np.uint8:
- img /= 255.
- else:
- raise TypeError('The img type should be np.float32 or np.uint8, '
- f'but got {img_type}')
- return img
-
-
-def _convert_output_type_range(img, dst_type):
- """Convert the type and range of the image according to dst_type.
-
- It converts the image to desired type and range. If `dst_type` is np.uint8,
- images will be converted to np.uint8 type with range [0, 255]. If
- `dst_type` is np.float32, it converts the image to np.float32 type with
- range [0, 1].
- It is mainly used for post-processing images in colorspace conversion
- functions such as rgb2ycbcr and ycbcr2rgb.
-
- Args:
- img (ndarray): The image to be converted with np.float32 type and
- range [0, 255].
- dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
- converts the image to np.uint8 type with range [0, 255]. If
- dst_type is np.float32, it converts the image to np.float32 type
- with range [0, 1].
-
- Returns:
- (ndarray): The converted image with desired type and range.
- """
- if dst_type not in (np.uint8, np.float32):
- raise TypeError('The dst_type should be np.float32 or np.uint8, '
- f'but got {dst_type}')
- if dst_type == np.uint8:
- img = img.round()
- else:
- img /= 255.
- return img.astype(dst_type)
-
-
-def rgb2ycbcr(img, y_only=False):
- """Convert a RGB image to YCbCr image.
-
- This function produces the same results as Matlab's `rgb2ycbcr` function.
- It implements the ITU-R BT.601 conversion for standard-definition
- television. See more details in
- https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
-
- It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
- In OpenCV, it implements a JPEG conversion. See more details in
- https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
- y_only (bool): Whether to only return Y channel. Default: False.
-
- Returns:
- ndarray: The converted YCbCr image. The output image has the same type
- and range as input image.
- """
- img_type = img.dtype
- img = _convert_input_type_range(img)
- if y_only:
- out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
- else:
- out_img = np.matmul(
- img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
- [24.966, 112.0, -18.214]]) + [16, 128, 128]
- out_img = _convert_output_type_range(out_img, img_type)
- return out_img
-
-
-def bgr2ycbcr(img, y_only=False):
- """Convert a BGR image to YCbCr image.
-
- The bgr version of rgb2ycbcr.
- It implements the ITU-R BT.601 conversion for standard-definition
- television. See more details in
- https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
-
- It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
- In OpenCV, it implements a JPEG conversion. See more details in
- https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
- y_only (bool): Whether to only return Y channel. Default: False.
-
- Returns:
- ndarray: The converted YCbCr image. The output image has the same type
- and range as input image.
- """
- img_type = img.dtype
- img = _convert_input_type_range(img)
- if y_only:
- out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
- else:
- out_img = np.matmul(
- img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
- [65.481, -37.797, 112.0]]) + [16, 128, 128]
- out_img = _convert_output_type_range(out_img, img_type)
- return out_img
-
-
-def ycbcr2rgb(img):
- """Convert a YCbCr image to RGB image.
-
- This function produces the same results as Matlab's ycbcr2rgb function.
- It implements the ITU-R BT.601 conversion for standard-definition
- television. See more details in
- https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
-
- It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
- In OpenCV, it implements a JPEG conversion. See more details in
- https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
-
- Returns:
- ndarray: The converted RGB image. The output image has the same type
- and range as input image.
- """
- img_type = img.dtype
- img = _convert_input_type_range(img) * 255
- out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
- [0, -0.00153632, 0.00791071],
- [0.00625893, -0.00318811, 0]]) * 255.0 + [
- -222.921, 135.576, -276.836
- ]
- out_img = _convert_output_type_range(out_img, img_type)
- return out_img
-
-
-def ycbcr2bgr(img):
- """Convert a YCbCr image to BGR image.
-
- The bgr version of ycbcr2rgb.
- It implements the ITU-R BT.601 conversion for standard-definition
- television. See more details in
- https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
-
- It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
- In OpenCV, it implements a JPEG conversion. See more details in
- https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
-
- Returns:
- ndarray: The converted BGR image. The output image has the same type
- and range as input image.
- """
- img_type = img.dtype
- img = _convert_input_type_range(img) * 255
- out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
- [0.00791071, -0.00153632, 0],
- [0, -0.00318811, 0.00625893]]) * 255.0 + [
- -276.836, 135.576, -222.921
- ]
- out_img = _convert_output_type_range(out_img, img_type)
- return out_img
-
-
-def convert_color_factory(src, dst):
-
- code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
-
- def convert_color(img):
- out_img = cv2.cvtColor(img, code)
- return out_img
-
- convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()}
- image.
-
- Args:
- img (ndarray or str): The input image.
-
- Returns:
- ndarray: The converted {dst.upper()} image.
- """
-
- return convert_color
-
-
-bgr2rgb = convert_color_factory('bgr', 'rgb')
-
-rgb2bgr = convert_color_factory('rgb', 'bgr')
-
-bgr2hsv = convert_color_factory('bgr', 'hsv')
-
-hsv2bgr = convert_color_factory('hsv', 'bgr')
-
-bgr2hls = convert_color_factory('bgr', 'hls')
-
-hls2bgr = convert_color_factory('hls', 'bgr')
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/ipython.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/ipython.py
deleted file mode 100644
index 7df727cd0ba5eb481a9e2568ffdd063bfce90314..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/ipython.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
-from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore
- parse_argstring) # type: ignore
-
-from .main import find_dotenv, load_dotenv
-
-
-@magics_class
-class IPythonDotEnv(Magics):
-
- @magic_arguments()
- @argument(
- '-o', '--override', action='store_true',
- help="Indicate to override existing variables"
- )
- @argument(
- '-v', '--verbose', action='store_true',
- help="Indicate function calls to be verbose"
- )
- @argument('dotenv_path', nargs='?', type=str, default='.env',
- help='Search in increasingly higher folders for the `dotenv_path`')
- @line_magic
- def dotenv(self, line):
- args = parse_argstring(self.dotenv, line)
- # Locate the .env file
- dotenv_path = args.dotenv_path
- try:
- dotenv_path = find_dotenv(dotenv_path, True, True)
- except IOError:
- print("cannot find .env file")
- return
-
- # Load the .env file
- load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
-
-
-def load_ipython_extension(ipython):
- """Register the %dotenv magic."""
- ipython.register_magics(IPythonDotEnv)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/status_codes.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/status_codes.py
deleted file mode 100644
index 4bd072be9769748a852740d037d5c63021472c9d..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/status_codes.py
+++ /dev/null
@@ -1,128 +0,0 @@
-r"""
-The ``codes`` object defines a mapping from common names for HTTP statuses
-to their numerical codes, accessible either as attributes or as dictionary
-items.
-
-Example::
-
- >>> import requests
- >>> requests.codes['temporary_redirect']
- 307
- >>> requests.codes.teapot
- 418
- >>> requests.codes['\o/']
- 200
-
-Some codes have multiple names, and both upper- and lower-case versions of
-the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
-``codes.okay`` all correspond to the HTTP status code 200.
-"""
-
-from .structures import LookupDict
-
-_codes = {
- # Informational.
- 100: ("continue",),
- 101: ("switching_protocols",),
- 102: ("processing",),
- 103: ("checkpoint",),
- 122: ("uri_too_long", "request_uri_too_long"),
- 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
- 201: ("created",),
- 202: ("accepted",),
- 203: ("non_authoritative_info", "non_authoritative_information"),
- 204: ("no_content",),
- 205: ("reset_content", "reset"),
- 206: ("partial_content", "partial"),
- 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
- 208: ("already_reported",),
- 226: ("im_used",),
- # Redirection.
- 300: ("multiple_choices",),
- 301: ("moved_permanently", "moved", "\\o-"),
- 302: ("found",),
- 303: ("see_other", "other"),
- 304: ("not_modified",),
- 305: ("use_proxy",),
- 306: ("switch_proxy",),
- 307: ("temporary_redirect", "temporary_moved", "temporary"),
- 308: (
- "permanent_redirect",
- "resume_incomplete",
- "resume",
- ), # "resume" and "resume_incomplete" to be removed in 3.0
- # Client Error.
- 400: ("bad_request", "bad"),
- 401: ("unauthorized",),
- 402: ("payment_required", "payment"),
- 403: ("forbidden",),
- 404: ("not_found", "-o-"),
- 405: ("method_not_allowed", "not_allowed"),
- 406: ("not_acceptable",),
- 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
- 408: ("request_timeout", "timeout"),
- 409: ("conflict",),
- 410: ("gone",),
- 411: ("length_required",),
- 412: ("precondition_failed", "precondition"),
- 413: ("request_entity_too_large",),
- 414: ("request_uri_too_large",),
- 415: ("unsupported_media_type", "unsupported_media", "media_type"),
- 416: (
- "requested_range_not_satisfiable",
- "requested_range",
- "range_not_satisfiable",
- ),
- 417: ("expectation_failed",),
- 418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
- 421: ("misdirected_request",),
- 422: ("unprocessable_entity", "unprocessable"),
- 423: ("locked",),
- 424: ("failed_dependency", "dependency"),
- 425: ("unordered_collection", "unordered"),
- 426: ("upgrade_required", "upgrade"),
- 428: ("precondition_required", "precondition"),
- 429: ("too_many_requests", "too_many"),
- 431: ("header_fields_too_large", "fields_too_large"),
- 444: ("no_response", "none"),
- 449: ("retry_with", "retry"),
- 450: ("blocked_by_windows_parental_controls", "parental_controls"),
- 451: ("unavailable_for_legal_reasons", "legal_reasons"),
- 499: ("client_closed_request",),
- # Server Error.
- 500: ("internal_server_error", "server_error", "/o\\", "✗"),
- 501: ("not_implemented",),
- 502: ("bad_gateway",),
- 503: ("service_unavailable", "unavailable"),
- 504: ("gateway_timeout",),
- 505: ("http_version_not_supported", "http_version"),
- 506: ("variant_also_negotiates",),
- 507: ("insufficient_storage",),
- 509: ("bandwidth_limit_exceeded", "bandwidth"),
- 510: ("not_extended",),
- 511: ("network_authentication_required", "network_auth", "network_authentication"),
-}
-
-codes = LookupDict(name="status_codes")
-
-
-def _init():
- for code, titles in _codes.items():
- for title in titles:
- setattr(codes, title, code)
- if not title.startswith(("\\", "/")):
- setattr(codes, title.upper(), code)
-
- def doc(code):
- names = ", ".join(f"``{n}``" for n in _codes[code])
- return "* %d: %s" % (code, names)
-
- global __doc__
- __doc__ = (
- __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
- if __doc__ is not None
- else None
- )
-
-
-_init()
diff --git a/spaces/AtomdffAI/wechatgpt4atom/docker/build.alpine.sh b/spaces/AtomdffAI/wechatgpt4atom/docker/build.alpine.sh
deleted file mode 100644
index 6fda600d2d6cac087c5798a53788e8d3da8e17d8..0000000000000000000000000000000000000000
--- a/spaces/AtomdffAI/wechatgpt4atom/docker/build.alpine.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-CHATGPT_ON_WECHAT_TAG=1.0.2
-
-docker build -f Dockerfile.alpine \
- --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
- -t zhayujie/chatgpt-on-wechat .
-
-docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-alpine
-
\ No newline at end of file
diff --git a/spaces/AvaterClasher/Food_Classifier_Refined_MONI/app.py b/spaces/AvaterClasher/Food_Classifier_Refined_MONI/app.py
deleted file mode 100644
index 14d52fc5293483a1931a0eb575b3cbe3983087d1..0000000000000000000000000000000000000000
--- a/spaces/AvaterClasher/Food_Classifier_Refined_MONI/app.py
+++ /dev/null
@@ -1,70 +0,0 @@
-### 1. Imports and class names setup ###
-import gradio as gr
-import os
-import torch
-
-from model import create_effnetb2_model
-from timeit import default_timer as timer
-from typing import Tuple, Dict
-
-# Setup class names
-with open("class_names.txt", "r") as f:
- class_names = [food_name.strip() for food_name in f.readlines()]
-
-### 2. Model and transforms preparation ###
-# Create model and transforms
-effnetb2, effnetb2_transforms = create_effnetb2_model(num_classes=101)
-
-# Load saved weights
-effnetb2.load_state_dict(
- torch.load(f="food101.pth",
- map_location=torch.device("cpu")) # load to CPU
-)
-
-### 3. Predict function ###
-
-def predict(img) -> Tuple[Dict, float]:
- # Start a timer
- start_time = timer()
-
- # Transform the input image for use with EffNetB2
- img = effnetb2_transforms(img).unsqueeze(0) # unsqueeze = add batch dimension on 0th index
-
- # Put model into eval mode, make prediction
- effnetb2.eval()
- with torch.inference_mode():
- # Pass transformed image through the model and turn the prediction logits into probaiblities
- pred_probs = torch.softmax(effnetb2(img), dim=1)
-
- # Create a prediction label and prediction probability dictionary
- pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
-
- # Calculate pred time
- end_time = timer()
- pred_time = round(end_time - start_time, 4)
-
- # Return pred dict and pred time
- return pred_labels_and_probs, pred_time
-
-### 4. Gradio app ###
-
-# Create title, description and article
-title = "Food Classifier [Food 101] 🍥🍥🍥"
-description = ""
-article = ""
-
-# Create example list
-example_list = [["examples/" + example] for example in os.listdir("examples")]
-
-# Create the Gradio demo
-demo = gr.Interface(fn=predict, # maps inputs to outputs
- inputs=gr.Image(type="pil"),
- outputs=[gr.Label(num_top_classes=5, label="Predictions"),
- gr.Number(label="Prediction time (s)")],
- examples=example_list,
- title=title,
- description=description,
- article=article)
-
-# Launch the demo!
-demo.launch()
diff --git a/spaces/BLACKHOST/timer/README.md b/spaces/BLACKHOST/timer/README.md
deleted file mode 100644
index a61f53cad7c71056704a816332bf9225cf0c6b71..0000000000000000000000000000000000000000
--- a/spaces/BLACKHOST/timer/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Timer
-emoji: 💩
-colorFrom: pink
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Bambicita/rvc-models/README.md b/spaces/Bambicita/rvc-models/README.md
deleted file mode 100644
index 6c2e0c6e7f06e04e1f9de072175ac17c9dd63081..0000000000000000000000000000000000000000
--- a/spaces/Bambicita/rvc-models/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Rvc Models
-emoji: 🎤
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: ArkanDash/rvc-models
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Benson/text-generation/Examples/Cuerda Hroe 1.3.3 Mod Apk.md b/spaces/Benson/text-generation/Examples/Cuerda Hroe 1.3.3 Mod Apk.md
deleted file mode 100644
index aea6933b80d452487c6ba1cc071458bbb5abf657..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cuerda Hroe 1.3.3 Mod Apk.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-
héroe de la cuerda 1.3.3 Mod Apk: Un juego de superhéroes con dinero ilimitado y diversión
-
Si usted está buscando un juego de superhéroes que le permite girar alrededor de una ciudad con una cuerda, luchar contra el crimen, y tienen dinero y recursos ilimitados, entonces usted debe probar Rope Hero 1.3.3 Mod Apk. Esta es una versión modificada del popular juego de acción Rope Hero: Vice Town, que se ha descargado más de 100 millones de veces en Google Play Store. En este artículo, le diremos lo que es héroe de cuerda 1.3.3 Mod Apk, ¿por qué debe jugar, cómo jugarlo, y responder a algunas preguntas frecuentes sobre él.
-
¿Qué es el héroe de cuerda 1.3.3 Mod Apk?
-
Una breve introducción al juego y sus características
-
Héroe de cuerda 1.3.3 Mod Apk es un juego de acción en tercera persona que te pone en el papel de un superhéroe azul que tiene una cuerda con superpoderes ilimitados. Puedes usar tu cuerda para saltar como una araña de un edificio a otro, escalar paredes, volar por el aire y aterrizar con poder. También puedes usar tu cuerda para agarrar enemigos, vehículos, objetos e incluso helicópteros. El juego tiene un gran mundo abierto que se puede explorar libremente, con diferentes distritos, misiones, actividades y secretos. También puedes personalizar a tu héroe con diferentes pieles, armas, vehículos y habilidades. El juego tiene física realista, gráficos impresionantes y un juego suave.
Cómo descargar e instalar el apk mod en su dispositivo
-
Para descargar e instalar Rope Hero 1.3.3 Mod Apk en su dispositivo, es necesario seguir estos sencillos pasos:
-
-
Haga clic en este link para descargar el archivo mod apk. Asegúrese de que tiene suficiente espacio de almacenamiento en su dispositivo.
-
Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas.
-
Busque el archivo descargado en su administrador de archivos y toque en él para instalarlo.
-
Espere a que el proceso de instalación termine y lance el juego.
-
-
-
¿Por qué jugar héroe de cuerda 1.3.3 Mod Apk?
-
Los beneficios de jugar con dinero ilimitado y otras características de mod
-
Una de las principales razones por las que debe jugar héroe de la cuerda 1.3.3 Mod Apk es que se puede disfrutar del juego con dinero ilimitado y otras características mod. Con dinero ilimitado, puede comprar cualquier arma, vehículo, piel o capacidad que desee sin preocuparse por el costo. También puede actualizar su héroe al máximo nivel y desbloquear todas las habilidades y beneficios. Con otras características de mod, puede tener salud ilimitada, munición, energía y sin anuncios. También puede habilitar el modo dios, matar un golpe y comprar gratis. Estas características te harán invencible e imparable en el juego.
-
Los retos y misiones que puedes disfrutar en el juego
-
Otra razón por la que debe jugar héroe de la cuerda 1.3.3 Mod Apk es que se puede disfrutar de varios desafíos y misiones que le mantendrá entretenido y comprometido en el juego. El juego tiene una historia principal que implica la lucha contra una organización criminal llamada el Clan Oscuro. Tendrás que enfrentarte a diferentes enemigos, jefes y misiones a medida que avanzas en la historia. El juego también tiene misiones secundarias que puedes completar para recompensas adicionales y diversión. Puedes ayudar a ciudadanos necesitados, detener robos, perseguir criminales, rescatar rehenes y más. El juego también tiene misiones diarias que te darán dinero de bonificación y objetos. El juego tiene mucho contenido y variedad que te mantendrá enganchado durante horas.
-
Los consejos y trucos para dominar el juego y convertirse en un superhéroe
-
La última razón por la que debe jugar héroe de la cuerda 1.3.3 Mod Apk es que usted puede dominar el juego y convertirse en un superhéroe con algunos consejos y trucos. Estos son algunos de ellos:
-
-
-
Elige cuidadosamente tus armas. El juego tiene una amplia gama de armas que puedes usar para luchar contra tus enemigos. Puedes elegir entre armas, granadas, cohetes, láseres, espadas, martillos y más. Cada arma tiene sus propias ventajas y desventajas, así que elige la que se adapte a tu estilo y situación. También puede cambiar entre armas durante el combate para mayor flexibilidad.
-
Actualiza tu héroe regularmente. El juego le permite actualizar su héroe con diferentes habilidades y beneficios que mejorarán su rendimiento en el juego. Puedes mejorar tu salud, daño, velocidad, energía, defensa y más. También puedes desbloquear nuevas habilidades que te darán poderes especiales como bolas de fuego, rayos, telequinesis y más. Actualizar tu héroe te hará más fuerte y más versátil en el juego.
-
-
¿Cómo se juega héroe de cuerda 1.3.3 Mod Apk?
-
Los controles básicos y la mecánica de juego
-
Héroe de cuerda 1.3.3 Mod Apk es fácil de jugar con controles simples y mecánica de juego. El juego tiene un joystick virtual en el lado izquierdo de la pantalla que te permite mover a tu héroe. El juego también tiene botones en el lado derecho de la pantalla que te permiten realizar diferentes acciones como saltar, disparar, usar la cuerda o cambiar de arma. El juego tiene un mini-mapa en la esquina superior izquierda de la pantalla que te muestra tu ubicación, objetivos, enemigos y aliados. El juego también tiene un botón de menú en la esquina superior derecha de la pantalla que le permite acceder a su inventario, ajustes, misiones, mapa, tienda y más. El juego tiene una interfaz sencilla que facilita la navegación y el juego.
-
Las mejores armas y vehículos para usar en el juego
-
Héroe de cuerda 1.3.3 Mod Apk tiene un montón de armas y vehículos que se pueden utilizar en el juego. Aquí están algunos de los mejores:
-
-
Arma
Descripción
-
-
Pistola láser
Un arma futurista que dispara rayos de energía que pueden atravesar enemigos y objetos.
-
Espada
Un arma cuerpo a cuerpo que te permite cortar a tus enemigos con estilo y precisión.
-
Vehículo
Descripción
-
Motocicleta
Un vehículo rápido y ágil que te permite acercarte por las calles y realizar acrobacias.
-
Tanque
Un vehículo pesado y blindado que te permite destruir a tus enemigos y aplastar obstáculos.
-
Helicóptero
Un vehículo volador y versátil que te permite volar por encima de la ciudad y disparar desde el aire.
-
-
Los diferentes modos y distritos para explorar en el juego
-
Héroe de cuerda 1.3.3 Mod Apk tiene diferentes modos y distritos que se pueden explorar en el juego. Estos son algunos de ellos:
-
-
-Modo historia: Este es el modo principal del juego, donde sigues la trama y completas misiones para derrotar al Clan Oscuro. Encontrará diferentes personajes, ubicaciones y eventos en este modo.
-
Modo libre: Este es el modo en el que puedes deambular por la ciudad libremente y hacer lo que quieras. Puede encontrar misiones secundarias, actividades, secretos y desafíos en este modo. También puede interactuar con otros PNJ, vehículos y objetos en este modo.
-
Modo de supervivencia: Este es el modo en el que tienes que sobrevivir el mayor tiempo posible contra oleadas de enemigos que te atacarán desde todas las direcciones. Puedes usar tus armas, vehículos y habilidades para defenderte de ellos. También puedes ganar dinero y objetos en este modo.
-
Distritos: El juego tiene diferentes distritos que puedes explorar en la ciudad, cada uno con su propio tema, atmósfera y características. Algunos de los distritos son Chinatown, Downtown, Zona Industrial, Base Militar y Aeropuerto. Cada distrito tiene sus propios enemigos, misiones, secretos y puntos de referencia.
-
-
Conclusión
-
Un resumen de los puntos principales y una llamada a la acción
-
-
Preguntas frecuentes
-
¿Es seguro descargar y jugar Rope Hero 1.3.3 Mod Apk?
-
Sí, Héroe de cuerda 1.3.3 Mod Apk es seguro para descargar y jugar. El archivo mod apk se escanea en busca de virus y malware antes de ser subido a nuestro sitio. El mod apk tampoco requiere ninguna raíz o jailbreak para ejecutarse en su dispositivo. Sin embargo, le recomendamos que descargue el apk mod solo desde nuestro sitio, ya que otras fuentes pueden contener archivos dañinos o falsos.
-
¿Cuáles son los requisitos mínimos para jugar Rope Hero 1.3.3 Mod Apk?
-
Los requisitos mínimos para jugar héroe de cuerda 1.3.3 Mod Apk son los siguientes:
-
-
Android 4.4 o superior
-
Al menos 100 MB de espacio de almacenamiento libre
-
Una conexión a Internet estable
-
-
¿Cómo actualizar Rope Hero 1.3.3 Mod Apk a la última versión?
-
Para actualizar Rope Hero 1.3.3 Mod Apk a la última versión, es necesario seguir estos pasos:
-
-
Eliminar la versión anterior de la apk mod de su dispositivo.
-
Descargar la última versión de la apk mod de nuestro sitio.
-
Instalar la nueva versión de la apk mod en su dispositivo.
-
Iniciar el juego y disfrutar de las nuevas características.
-
-
Cómo ponerse en contacto con los desarrolladores de Rope Hero 1.3.3 Mod Apk para obtener información o apoyo?
-
Para contactar a los desarrolladores de Rope Hero 1.3.3 Mod Apk para obtener información o apoyo, puede utilizar uno de estos métodos:
- If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.
'''
- else:
- summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.
'''
-
- return([gr.update(visible=True), gr.update(visible=True, value=summary_sentence)])
-
-def update_steps(*files_list):
- file_counter = 0
- for i, files in enumerate(files_list):
- if(files):
- file_counter+=len(files)
- return(gr.update(value=file_counter*200))
-
-def visualise_progress_bar():
- return gr.update(visible=True)
-
-def pad_image(image):
- w, h = image.size
- if w == h:
- return image
- elif w > h:
- new_image = Image.new(image.mode, (w, w), (0, 0, 0))
- new_image.paste(image, (0, (w - h) // 2))
- return new_image
- else:
- new_image = Image.new(image.mode, (h, h), (0, 0, 0))
- new_image.paste(image, ((h - w) // 2, 0))
- return new_image
-
-def validate_model_upload(hf_token, model_name):
- if(hf_token != ''):
- api = HfApi()
- try:
- _ = api.whoami(hf_token)
- except:
- raise gr.Error("You have inserted an invalid Hugging Face token")
- try:
- if(is_spaces):
- update_repo_visibility(repo_id=os.environ['SPACE_ID'], private=True, token=hf_token, repo_type="space")
- except:
- raise gr.Error("Oops, you created a Hugging Face token with read permissions only. You need one with write permissions")
- else:
- raise gr.Error("Please insert a Hugging Face Token (make sure to create it with write permissions)")
- if(model_name == ""):
- raise gr.Error("Please fill in your model's name")
-
-def swap_hardware(hf_token, hardware="cpu-basic"):
- hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
- headers = { "authorization" : f"Bearer {hf_token}"}
- body = {'flavor': hardware}
- requests.post(hardware_url, json = body, headers=headers)
-
-def swap_sleep_time(hf_token,sleep_time):
- sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}/sleeptime"
- headers = { "authorization" : f"Bearer {hf_token}"}
- body = {'seconds':sleep_time}
- requests.post(sleep_time_url,json=body,headers=headers)
-
-def get_sleep_time(hf_token):
- sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}"
- headers = { "authorization" : f"Bearer {hf_token}"}
- response = requests.get(sleep_time_url,headers=headers)
- try:
- gcTimeout = response.json()['runtime']['gcTimeout']
- except:
- gcTimeout = None
- return gcTimeout
-
-def write_to_community(title, description,hf_token):
- from huggingface_hub import HfApi
- api = HfApi()
- api.create_discussion(repo_id=os.environ['SPACE_ID'], title=title, description=description,repo_type="space", token=hf_token)
-
-def train(progress=gr.Progress(track_tqdm=True), *inputs):
- which_model = inputs[-10]
- if(which_model == ""):
- raise gr.Error("You forgot to select a base model to use")
-
- if is_shared_ui:
- raise gr.Error("This Space only works in duplicated instances")
- if not is_gpu_associated:
- raise gr.Error("Please associate a T4 or A10G GPU for this Space")
- hf_token = inputs[-5]
- model_name = inputs[-7]
- if(is_spaces):
- sleep_time = get_sleep_time(hf_token)
- if sleep_time:
- swap_sleep_time(hf_token, -1)
- remove_attribution_after = inputs[-6]
- else:
- remove_attribution_after = False
-
- if(remove_attribution_after):
- validate_model_upload(hf_token, model_name)
-
- torch.cuda.empty_cache()
- if 'pipe' in globals():
- global pipe, pipe_is_set
- del pipe
- pipe_is_set = False
- gc.collect()
-
- if os.path.exists("output_model"): shutil.rmtree('output_model')
- if os.path.exists("instance_images"): shutil.rmtree('instance_images')
- if os.path.exists("diffusers_model.tar"): os.remove("diffusers_model.tar")
- if os.path.exists("model.ckpt"): os.remove("model.ckpt")
- if os.path.exists("hastrained.success"): os.remove("hastrained.success")
- file_counter = 0
- resolution = 512 if which_model != "v2-1-768" else 768
- for i, input in enumerate(inputs):
- if(i < maximum_concepts-1):
- if(input):
- os.makedirs('instance_images',exist_ok=True)
- files = inputs[i+(maximum_concepts*2)]
- prompt = inputs[i+maximum_concepts]
- if(prompt == "" or prompt == None):
- raise gr.Error("You forgot to define your concept prompt")
- for j, file_temp in enumerate(files):
- file = Image.open(file_temp.name)
- image = pad_image(file)
- image = image.resize((resolution, resolution))
- extension = file_temp.name.split(".")[1]
- image = image.convert('RGB')
- image.save(f'instance_images/{prompt}_({j+1}).jpg', format="JPEG", quality = 100)
- file_counter += 1
-
- os.makedirs('output_model',exist_ok=True)
- uses_custom = inputs[-1]
- type_of_thing = inputs[-4]
- experimental_face_improvement = inputs[-9]
-
- if(uses_custom):
- Training_Steps = int(inputs[-3])
- Train_text_encoder_for = int(inputs[-2])
- else:
- if(type_of_thing == "object"):
- Train_text_encoder_for=30
-
- elif(type_of_thing == "style"):
- Train_text_encoder_for=15
-
- elif(type_of_thing == "person"):
- Train_text_encoder_for=70
-
- Training_Steps = file_counter*150
- if(type_of_thing == "person" and Training_Steps > 2600):
- Training_Steps = 2600 #Avoid overfitting on people's faces
- stptxt = int((Training_Steps*Train_text_encoder_for)/100)
- gradient_checkpointing = True if (experimental_face_improvement or which_model != "v1-5") else False
- cache_latents = True if which_model != "v1-5" else False
- if (type_of_thing == "object" or type_of_thing == "style" or (type_of_thing == "person" and not experimental_face_improvement)):
- args_general = argparse.Namespace(
- image_captions_filename = True,
- train_text_encoder = True if stptxt > 0 else False,
- stop_text_encoder_training = stptxt,
- save_n_steps = 0,
- pretrained_model_name_or_path = model_to_load,
- instance_data_dir="instance_images",
- class_data_dir=None,
- output_dir="output_model",
- instance_prompt="",
- seed=42,
- resolution=resolution,
- mixed_precision="fp16",
- train_batch_size=1,
- gradient_accumulation_steps=1,
- use_8bit_adam=True,
- learning_rate=2e-6,
- lr_scheduler="polynomial",
- lr_warmup_steps = 0,
- max_train_steps=Training_Steps,
- gradient_checkpointing=gradient_checkpointing,
- cache_latents=cache_latents,
- )
- print("Starting single training...")
- lock_file = open("intraining.lock", "w")
- lock_file.close()
- try:
- run_training(args_general)
- except Exception as e:
- if(is_spaces):
- title="There was an error on during your training"
- description=f'''
- Unfortunately there was an error during training your {model_name} model.
- Please check it out below. Feel free to report this issue to [Dreambooth Training](https://huggingface.co/spaces/multimodalart/dreambooth-training):
- ```
- {str(e)}
- ```
- '''
- swap_hardware(hf_token, "cpu-basic")
- write_to_community(title,description,hf_token)
-
-
- gc.collect()
- torch.cuda.empty_cache()
- if(which_model == "v1-5"):
- print("Adding Safety Checker to the model...")
- shutil.copytree(f"{safety_checker}/feature_extractor", "output_model/feature_extractor", dirs_exist_ok=True)
- shutil.copytree(f"{safety_checker}/safety_checker", "output_model/safety_checker", dirs_exist_ok=True)
- shutil.copy(f"model_index.json", "output_model/model_index.json")
-
- if(not remove_attribution_after):
- swap_sleep_time(hf_token, sleep_time)
- print("Archiving model file...")
- with tarfile.open("diffusers_model.tar", "w") as tar:
- tar.add("output_model", arcname=os.path.basename("output_model"))
- if os.path.exists("intraining.lock"): os.remove("intraining.lock")
- trained_file = open("hastrained.success", "w")
- trained_file.close()
- print("Training completed!")
- return [
- gr.update(visible=False), #progress_bar
- gr.update(visible=True, value=["diffusers_model.tar"]), #result
- gr.update(visible=True), #try_your_model
- gr.update(visible=True), #push_to_hub
- gr.update(visible=True), #convert_button
- gr.update(visible=False), #training_ongoing
- gr.update(visible=True) #completed_training
- ]
- else:
- where_to_upload = inputs[-8]
- push(model_name, where_to_upload, hf_token, which_model, True)
- swap_hardware(hf_token, "cpu-basic")
-
-pipe_is_set = False
-def generate(prompt, steps):
- torch.cuda.empty_cache()
- from diffusers import StableDiffusionPipeline
- global pipe_is_set
- if(not pipe_is_set):
- global pipe
- pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
- pipe = pipe.to("cuda")
- pipe_is_set = True
-
- image = pipe(prompt, num_inference_steps=steps).images[0]
- return(image)
-
-def push(model_name, where_to_upload, hf_token, which_model, comes_from_automated=False):
- validate_model_upload(hf_token, model_name)
- if(not os.path.exists("model.ckpt")):
- convert("output_model", "model.ckpt")
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
- from huggingface_hub import create_repo
- model_name_slug = slugify(model_name)
- api = HfApi()
- your_username = api.whoami(token=hf_token)["name"]
- if(where_to_upload == "My personal profile"):
- model_id = f"{your_username}/{model_name_slug}"
- else:
- model_id = f"sd-dreambooth-library/{model_name_slug}"
- headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"}
- response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers)
-
- print(f"Starting to upload the model {model_id}...")
- images_upload = os.listdir("instance_images")
- image_string = ""
- instance_prompt_list = []
- previous_instance_prompt = ''
- for i, image in enumerate(images_upload):
- instance_prompt = image.split("_")[0]
- if(instance_prompt != previous_instance_prompt):
- title_instance_prompt_string = instance_prompt
- instance_prompt_list.append(instance_prompt)
- else:
- title_instance_prompt_string = ''
- previous_instance_prompt = instance_prompt
- image_string = f'''{title_instance_prompt_string} {"(use that on your prompt)" if title_instance_prompt_string != "" else ""}
-{image_string}})'''
- readme_text = f'''---
-license: creativeml-openrail-m
-tags:
-- text-to-image
-widget:
-- text: {instance_prompt_list[0]}
----
-### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the {which_model} base model
-
-You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts!
-
-Sample pictures of:
-{image_string}
-'''
- #Save the readme to a file
- readme_file = open("model.README.md", "w")
- readme_file.write(readme_text)
- readme_file.close()
- #Save the token identifier to a file
- text_file = open("token_identifier.txt", "w")
- text_file.write(', '.join(instance_prompt_list))
- text_file.close()
- try:
- create_repo(model_id,private=True, token=hf_token)
- except:
- import time
- epoch_time = str(int(time.time()))
- create_repo(f"{model_id}-{epoch_time}", private=True,token=hf_token)
- operations = [
- CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="model.README.md"),
- CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt")
- ]
- api.create_commit(
- repo_id=model_id,
- operations=operations,
- commit_message=f"Upload the model {model_name}",
- token=hf_token
- )
- api.upload_folder(
- folder_path="output_model",
- repo_id=model_id,
- token=hf_token
- )
- api.upload_folder(
- folder_path="instance_images",
- path_in_repo="concept_images",
- repo_id=model_id,
- token=hf_token
- )
- if is_spaces:
- if(not comes_from_automated):
- extra_message = "Don't forget to remove the GPU attribution after you play with it."
- else:
- extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page"
- title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!"
- description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}"
- write_to_community(title, description, hf_token)
- #api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}",repo_type="space", token=hf_token)
- print("Model uploaded successfully!")
- return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])]
-
-def convert_to_ckpt():
- if 'pipe' in globals():
- global pipe, pipe_is_set
- del pipe
- pipe_is_set = False
- gc.collect()
- convert("output_model", "model.ckpt")
- return gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])
-
-def check_status(top_description):
- if os.path.exists("hastrained.success"):
- if is_spaces:
- update_top_tag = gr.update(value=f'''
-
-
Your model has finished training ✅
-
Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub). Once you are done, your model is safe, and you don't want to train a new one, go to the settings page and downgrade your Space to a CPU Basic
You closed the tab while your model was training, but it's all good! It is still training right now. You can click the "Open logs" button above here to check the training status. Once training is done, reload this tab to interact with your model
Attention - This Space doesn't work in this shared UI
-
For it to work, you can either run locally or duplicate the Space and run it on your own profile using a (paid) private T4-small or A10G-small GPU for training. A T4 costs US$0.60/h, so it should cost < US$1 to train most models using default settings with it!
You have successfully cloned the Dreambooth Training Space locally 🎉
-
Do a pip install requirements-local.txt
-
- ''')
- gr.Markdown("# Dreambooth Training UI 💭")
- gr.Markdown("Customize Stable Diffusion v1 or v2 (ⁿᵉʷ!) by giving it a few examples of a concept. Based on the [🧨 diffusers](https://github.com/huggingface/diffusers) implementation, additional techniques from [TheLastBen](https://github.com/TheLastBen/diffusers) and [ShivamShrirao](https://github.com/ShivamShrirao/diffusers)")
-
- with gr.Row() as what_are_you_training:
- type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
- with gr.Column():
- base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["v1-5", "v2-1-512", "v2-1-768"], value="v1-5", interactive=True)
-
- #Very hacky approach to emulate dynamically created Gradio components
- with gr.Row() as upload_your_concept:
- with gr.Column():
- thing_description = gr.Markdown("You are going to train an `object`, please upload 5-10 images of the object you are planning on training on from different angles/perspectives. You must have the right to do so and you are liable for the images you use, example")
- thing_experimental = gr.Checkbox(label="Improve faces (prior preservation) - can take longer training but can improve faces", visible=False, value=False)
- thing_image_example = gr.HTML('''''')
- things_naming = gr.Markdown("You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `cttoy` here). Images will be automatically cropped to 512x512.")
-
- with gr.Column():
- file_collection = []
- concept_collection = []
- buttons_collection = []
- delete_collection = []
- is_visible = []
-
- row = [None] * maximum_concepts
- for x in range(maximum_concepts):
- ordinal = lambda n: "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
- if(x == 0):
- visible = True
- is_visible.append(gr.State(value=True))
- else:
- visible = False
- is_visible.append(gr.State(value=False))
-
- file_collection.append(gr.File(file_types=["image"], label=f'''Upload the images for your {ordinal(x+1) if (x>0) else ""} concept''', file_count="multiple", interactive=True, visible=visible))
- with gr.Column(visible=visible) as row[x]:
- concept_collection.append(gr.Textbox(label=f'''{ordinal(x+1) if (x>0) else ""} concept prompt - use a unique, made up word to avoid collisions'''))
- with gr.Row():
- if(x < maximum_concepts-1):
- buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible))
- if(x > 0):
- delete_collection.append(gr.Button(value=f"Delete {ordinal(x+1)} concept"))
-
- counter_add = 1
- for button in buttons_collection:
- if(counter_add < len(buttons_collection)):
- button.click(lambda:
- [gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), True, None],
- None,
- [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], buttons_collection[counter_add], is_visible[counter_add], file_collection[counter_add]], queue=False)
- else:
- button.click(lambda:[gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), True], None, [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], is_visible[counter_add]], queue=False)
- counter_add += 1
-
- counter_delete = 1
- for delete_button in delete_collection:
- if(counter_delete < len(delete_collection)+1):
- delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False)
- counter_delete += 1
-
- with gr.Accordion("Custom Settings", open=False):
- swap_auto_calculated = gr.Checkbox(label="Use custom settings")
- gr.Markdown("If not checked, the % of frozen encoder will be tuned automatically to whether you are training an `object`, `person` or `style`. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and 75% trained for persons. The number of steps varies between 1400 and 2400 depending on how many images uploaded. If you see too many artifacts in your output, it means it may have overfit and you need less steps. If your results aren't really what you wanted, it may be underfitting and you need more steps.")
- steps = gr.Number(label="How many steps", value=2400)
- perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
-
- with gr.Box(visible=False) as training_summary:
- training_summary_text = gr.HTML("", visible=True, label="Training Summary")
- is_advanced_visible = True if is_spaces else False
- training_summary_checkbox = gr.Checkbox(label="Automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training", value=True, visible=is_advanced_visible)
- training_summary_model_name = gr.Textbox(label="Name of your model", visible=True)
- training_summary_where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], value="My personal profile", label="Upload to", visible=True)
- training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=True)
- training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="password", visible=True)
-
- train_btn = gr.Button("Start Training")
- progress_bar = gr.Textbox(visible=False)
- if(is_shared_ui):
- training_ongoing = gr.Markdown("## This Space only works in duplicated instances. Please duplicate it and try again!", visible=False)
- elif(not is_gpu_associated):
- training_ongoing = gr.Markdown("## Oops, you haven't associated your T4 or A10G GPU to this Space. Visit the Settings tab, associate and try again.", visible=False)
- else:
- training_ongoing = gr.Markdown("## Training is ongoing ⌛... You can close this tab if you like or just wait. If you did not check the `Remove GPU After training`, you can come back here to try your model and upload it after training. Don't forget to remove the GPU attribution after you are done. ", visible=False)
-
-
- #Post-training UI
- completed_training = gr.Markdown('''# ✅ Training completed.
- ### Don't forget to remove the GPU attribution after you are done trying and uploading your model''', visible=False)
-
- with gr.Row():
- with gr.Box(visible=False) as try_your_model:
- gr.Markdown("## Try your model")
- prompt = gr.Textbox(label="Type your prompt")
- result_image = gr.Image()
- inference_steps = gr.Slider(minimum=1, maximum=150, value=50, step=1)
- generate_button = gr.Button("Generate Image")
-
- with gr.Box(visible=False) as push_to_hub:
- gr.Markdown("## Push to Hugging Face Hub")
- model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
- where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
- gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
- hf_token = gr.Textbox(label="Hugging Face Write Token", type="password")
-
- push_button = gr.Button("Push to the Hub")
-
- result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
- success_message_upload = gr.Markdown(visible=False)
- convert_button = gr.Button("Convert to CKPT", visible=False)
-
- #Swap the examples and the % of text encoder trained depending if it is an object, person or style
- type_of_thing.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
-
- #Swap the base model
-
- base_model_to_use.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
- #base_model_to_use.change(fn=visualise_progress_bar, inputs=[], outputs=progress_bar)
- base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
- #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
- for file in file_collection:
- #file.change(fn=update_steps,inputs=file_collection, outputs=steps)
- file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
-
- thing_experimental.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
- base_model_to_use.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
- steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
- perc_txt_encoder.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
-
- #Give more options if the user wants to finish everything after training
- if(is_spaces):
- training_summary_checkbox.change(fn=checkbox_swap, inputs=training_summary_checkbox, outputs=[training_summary_token_message, training_summary_token, training_summary_model_name, training_summary_where_to_upload],queue=False, show_progress=False)
- #Add a message for while it is in training
-
- #train_btn.click(lambda:gr.update(visible=True), inputs=None, outputs=training_ongoing)
-
- #The main train function
- train_btn.click(lambda:gr.update(visible=True), inputs=[], outputs=progress_bar)
- train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[base_model_to_use]+[thing_experimental]+[training_summary_where_to_upload]+[training_summary_model_name]+[training_summary_checkbox]+[training_summary_token]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[progress_bar, result, try_your_model, push_to_hub, convert_button, training_ongoing, completed_training], queue=False)
-
- #Button to generate an image from your trained model after training
- generate_button.click(fn=generate, inputs=[prompt, inference_steps], outputs=result_image, queue=False)
- #Button to push the model to the Hugging Face Hub
- push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token, base_model_to_use], outputs=[success_message_upload, result], queue=False)
- #Button to convert the model to ckpt format
- convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result, queue=False)
-
- #Checks if the training is running
- demo.load(fn=check_status, inputs=top_description, outputs=[top_description, try_your_model, push_to_hub, result, convert_button], queue=False, show_progress=False)
-
-demo.queue(default_enabled=False).launch(debug=True)
\ No newline at end of file
diff --git a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/models/panoptic_fpn_r101_fpn_psg.py b/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/models/panoptic_fpn_r101_fpn_psg.py
deleted file mode 100644
index 449ec6c9ff81c8447bc74029fad68d1bb3dc9598..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/models/panoptic_fpn_r101_fpn_psg.py
+++ /dev/null
@@ -1,8 +0,0 @@
-_base_ = './panoptic_fpn_r50_fpn_psg.py'
-
-model = dict(backbone=dict(
- depth=101,
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
-
-expt_name = 'panoptic_fpn_r101_fpn_psg'
-load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/lapjv.h b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/lapjv.h
deleted file mode 100644
index 0e34385a647bec225827370ff0041a391e628477..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/lapjv.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef LAPJV_H
-#define LAPJV_H
-
-#define LARGE 1000000
-
-#if !defined TRUE
-#define TRUE 1
-#endif
-#if !defined FALSE
-#define FALSE 0
-#endif
-
-#define NEW(x, t, n) if ((x = (t *)malloc(sizeof(t) * (n))) == 0) { return -1; }
-#define FREE(x) if (x != 0) { free(x); x = 0; }
-#define SWAP_INDICES(a, b) { int_t _temp_index = a; a = b; b = _temp_index; }
-
-#if 0
-#include
-#define ASSERT(cond) assert(cond)
-#define PRINTF(fmt, ...) printf(fmt, ##__VA_ARGS__)
-#define PRINT_COST_ARRAY(a, n) \
- while (1) { \
- printf(#a" = ["); \
- if ((n) > 0) { \
- printf("%f", (a)[0]); \
- for (uint_t j = 1; j < n; j++) { \
- printf(", %f", (a)[j]); \
- } \
- } \
- printf("]\n"); \
- break; \
- }
-#define PRINT_INDEX_ARRAY(a, n) \
- while (1) { \
- printf(#a" = ["); \
- if ((n) > 0) { \
- printf("%d", (a)[0]); \
- for (uint_t j = 1; j < n; j++) { \
- printf(", %d", (a)[j]); \
- } \
- } \
- printf("]\n"); \
- break; \
- }
-#else
-#define ASSERT(cond)
-#define PRINTF(fmt, ...)
-#define PRINT_COST_ARRAY(a, n)
-#define PRINT_INDEX_ARRAY(a, n)
-#endif
-
-
-typedef signed int int_t;
-typedef unsigned int uint_t;
-typedef double cost_t;
-typedef char boolean;
-typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
-
-extern int_t lapjv_internal(
- const uint_t n, cost_t *cost[],
- int_t *x, int_t *y);
-
-#endif // LAPJV_H
\ No newline at end of file
diff --git a/spaces/ECCV2022/bytetrack/yolox/data/samplers.py b/spaces/ECCV2022/bytetrack/yolox/data/samplers.py
deleted file mode 100644
index 064b13376b3c813ad6f9e5745496dd5027b65f0f..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/data/samplers.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import torch
-import torch.distributed as dist
-from torch.utils.data.sampler import BatchSampler as torchBatchSampler
-from torch.utils.data.sampler import Sampler
-
-import itertools
-from typing import Optional
-
-
-class YoloBatchSampler(torchBatchSampler):
- """
- This batch sampler will generate mini-batches of (dim, index) tuples from another sampler.
- It works just like the :class:`torch.utils.data.sampler.BatchSampler`,
- but it will prepend a dimension, whilst ensuring it stays the same across one mini-batch.
- """
-
- def __init__(self, *args, input_dimension=None, mosaic=True, **kwargs):
- super().__init__(*args, **kwargs)
- self.input_dim = input_dimension
- self.new_input_dim = None
- self.mosaic = mosaic
-
- def __iter__(self):
- self.__set_input_dim()
- for batch in super().__iter__():
- yield [(self.input_dim, idx, self.mosaic) for idx in batch]
- self.__set_input_dim()
-
- def __set_input_dim(self):
- """ This function randomly changes the the input dimension of the dataset. """
- if self.new_input_dim is not None:
- self.input_dim = (self.new_input_dim[0], self.new_input_dim[1])
- self.new_input_dim = None
-
-
-class InfiniteSampler(Sampler):
- """
- In training, we only care about the "infinite stream" of training data.
- So this sampler produces an infinite stream of indices and
- all workers cooperate to correctly shuffle the indices and sample different indices.
- The samplers in each worker effectively produces `indices[worker_id::num_workers]`
- where `indices` is an infinite stream of indices consisting of
- `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
- or `range(size) + range(size) + ...` (if shuffle is False)
- """
-
- def __init__(
- self,
- size: int,
- shuffle: bool = True,
- seed: Optional[int] = 0,
- rank=0,
- world_size=1,
- ):
- """
- Args:
- size (int): the total number of data of the underlying dataset to sample from
- shuffle (bool): whether to shuffle the indices or not
- seed (int): the initial seed of the shuffle. Must be the same
- across all workers. If None, will use a random seed shared
- among workers (require synchronization among all workers).
- """
- self._size = size
- assert size > 0
- self._shuffle = shuffle
- self._seed = int(seed)
-
- if dist.is_available() and dist.is_initialized():
- self._rank = dist.get_rank()
- self._world_size = dist.get_world_size()
- else:
- self._rank = rank
- self._world_size = world_size
-
- def __iter__(self):
- start = self._rank
- yield from itertools.islice(
- self._infinite_indices(), start, None, self._world_size
- )
-
- def _infinite_indices(self):
- g = torch.Generator()
- g.manual_seed(self._seed)
- while True:
- if self._shuffle:
- yield from torch.randperm(self._size, generator=g)
- else:
- yield from torch.arange(self._size)
-
- def __len__(self):
- return self._size // self._world_size
diff --git a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_coco_stuff_10k.py b/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_coco_stuff_10k.py
deleted file mode 100644
index a1ec0375858ada8e4270b534fcd58106254c7fa9..0000000000000000000000000000000000000000
--- a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_coco_stuff_10k.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import os
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.data.datasets import load_sem_seg
-
-COCO_CATEGORIES = [
- {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
- {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
- {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
- {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
- {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
- {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
- {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
- {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
- {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
- {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
- {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
- {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
- {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
- {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
- {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
- {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
- {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
- {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
- {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
- {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
- {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
- {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
- {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
- {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
- {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
- {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
- {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
- {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
- {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
- {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
- {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
- {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
- {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
- {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
- {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
- {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
- {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
- {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
- {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
- {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
- {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
- {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
- {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
- {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
- {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
- {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
- {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
- {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
- {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
- {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
- {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
- {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
- {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
- {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
- {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
- {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
- {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
- {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
- {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
- {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
- {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
- {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
- {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
- {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
- {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
- {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
- {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
- {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
- {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
- {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
- {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
- {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
- {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
- {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
- {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
- {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
- {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
- {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
- {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
- {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
- {"id": 92, "name": "banner", "supercategory": "textile"},
- {"id": 93, "name": "blanket", "supercategory": "textile"},
- {"id": 94, "name": "branch", "supercategory": "plant"},
- {"id": 95, "name": "bridge", "supercategory": "building"},
- {"id": 96, "name": "building-other", "supercategory": "building"},
- {"id": 97, "name": "bush", "supercategory": "plant"},
- {"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"},
- {"id": 99, "name": "cage", "supercategory": "structural"},
- {"id": 100, "name": "cardboard", "supercategory": "raw-material"},
- {"id": 101, "name": "carpet", "supercategory": "floor"},
- {"id": 102, "name": "ceiling-other", "supercategory": "ceiling"},
- {"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"},
- {"id": 104, "name": "cloth", "supercategory": "textile"},
- {"id": 105, "name": "clothes", "supercategory": "textile"},
- {"id": 106, "name": "clouds", "supercategory": "sky"},
- {"id": 107, "name": "counter", "supercategory": "furniture-stuff"},
- {"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"},
- {"id": 109, "name": "curtain", "supercategory": "textile"},
- {"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"},
- {"id": 111, "name": "dirt", "supercategory": "ground"},
- {"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"},
- {"id": 113, "name": "fence", "supercategory": "structural"},
- {"id": 114, "name": "floor-marble", "supercategory": "floor"},
- {"id": 115, "name": "floor-other", "supercategory": "floor"},
- {"id": 116, "name": "floor-stone", "supercategory": "floor"},
- {"id": 117, "name": "floor-tile", "supercategory": "floor"},
- {"id": 118, "name": "floor-wood", "supercategory": "floor"},
- {"id": 119, "name": "flower", "supercategory": "plant"},
- {"id": 120, "name": "fog", "supercategory": "water"},
- {"id": 121, "name": "food-other", "supercategory": "food-stuff"},
- {"id": 122, "name": "fruit", "supercategory": "food-stuff"},
- {"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"},
- {"id": 124, "name": "grass", "supercategory": "plant"},
- {"id": 125, "name": "gravel", "supercategory": "ground"},
- {"id": 126, "name": "ground-other", "supercategory": "ground"},
- {"id": 127, "name": "hill", "supercategory": "solid"},
- {"id": 128, "name": "house", "supercategory": "building"},
- {"id": 129, "name": "leaves", "supercategory": "plant"},
- {"id": 130, "name": "light", "supercategory": "furniture-stuff"},
- {"id": 131, "name": "mat", "supercategory": "textile"},
- {"id": 132, "name": "metal", "supercategory": "raw-material"},
- {"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"},
- {"id": 134, "name": "moss", "supercategory": "plant"},
- {"id": 135, "name": "mountain", "supercategory": "solid"},
- {"id": 136, "name": "mud", "supercategory": "ground"},
- {"id": 137, "name": "napkin", "supercategory": "textile"},
- {"id": 138, "name": "net", "supercategory": "structural"},
- {"id": 139, "name": "paper", "supercategory": "raw-material"},
- {"id": 140, "name": "pavement", "supercategory": "ground"},
- {"id": 141, "name": "pillow", "supercategory": "textile"},
- {"id": 142, "name": "plant-other", "supercategory": "plant"},
- {"id": 143, "name": "plastic", "supercategory": "raw-material"},
- {"id": 144, "name": "platform", "supercategory": "ground"},
- {"id": 145, "name": "playingfield", "supercategory": "ground"},
- {"id": 146, "name": "railing", "supercategory": "structural"},
- {"id": 147, "name": "railroad", "supercategory": "ground"},
- {"id": 148, "name": "river", "supercategory": "water"},
- {"id": 149, "name": "road", "supercategory": "ground"},
- {"id": 150, "name": "rock", "supercategory": "solid"},
- {"id": 151, "name": "roof", "supercategory": "building"},
- {"id": 152, "name": "rug", "supercategory": "textile"},
- {"id": 153, "name": "salad", "supercategory": "food-stuff"},
- {"id": 154, "name": "sand", "supercategory": "ground"},
- {"id": 155, "name": "sea", "supercategory": "water"},
- {"id": 156, "name": "shelf", "supercategory": "furniture-stuff"},
- {"id": 157, "name": "sky-other", "supercategory": "sky"},
- {"id": 158, "name": "skyscraper", "supercategory": "building"},
- {"id": 159, "name": "snow", "supercategory": "ground"},
- {"id": 160, "name": "solid-other", "supercategory": "solid"},
- {"id": 161, "name": "stairs", "supercategory": "furniture-stuff"},
- {"id": 162, "name": "stone", "supercategory": "solid"},
- {"id": 163, "name": "straw", "supercategory": "plant"},
- {"id": 164, "name": "structural-other", "supercategory": "structural"},
- {"id": 165, "name": "table", "supercategory": "furniture-stuff"},
- {"id": 166, "name": "tent", "supercategory": "building"},
- {"id": 167, "name": "textile-other", "supercategory": "textile"},
- {"id": 168, "name": "towel", "supercategory": "textile"},
- {"id": 169, "name": "tree", "supercategory": "plant"},
- {"id": 170, "name": "vegetable", "supercategory": "food-stuff"},
- {"id": 171, "name": "wall-brick", "supercategory": "wall"},
- {"id": 172, "name": "wall-concrete", "supercategory": "wall"},
- {"id": 173, "name": "wall-other", "supercategory": "wall"},
- {"id": 174, "name": "wall-panel", "supercategory": "wall"},
- {"id": 175, "name": "wall-stone", "supercategory": "wall"},
- {"id": 176, "name": "wall-tile", "supercategory": "wall"},
- {"id": 177, "name": "wall-wood", "supercategory": "wall"},
- {"id": 178, "name": "water-other", "supercategory": "water"},
- {"id": 179, "name": "waterdrops", "supercategory": "water"},
- {"id": 180, "name": "window-blind", "supercategory": "window"},
- {"id": 181, "name": "window-other", "supercategory": "window"},
- {"id": 182, "name": "wood", "supercategory": "solid"},
-]
-
-
-def _get_coco_stuff_meta():
- # Id 0 is reserved for ignore_label, we change ignore_label for 0
- # to 255 in our pre-processing.
- stuff_ids = [k["id"] for k in COCO_CATEGORIES]
- assert len(stuff_ids) == 171, len(stuff_ids)
-
- # For semantic segmentation, this mapping maps from contiguous stuff id
- # (in [0, 91], used in models) to ids in the dataset (used for processing results)
- stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
- stuff_classes = [k["name"] for k in COCO_CATEGORIES]
-
- ret = {
- "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
- "stuff_classes": stuff_classes,
- }
- return ret
-
-
-def register_all_coco_stuff_10k(root):
- root = os.path.join(root, "coco", "coco_stuff_10k")
- meta = _get_coco_stuff_meta()
- for name, image_dirname, sem_seg_dirname in [
- ("train", "images_detectron2/train", "annotations_detectron2/train"),
- ("test", "images_detectron2/test", "annotations_detectron2/test"),
- ]:
- image_dir = os.path.join(root, image_dirname)
- gt_dir = os.path.join(root, sem_seg_dirname)
- name = f"coco_2017_{name}_stuff_10k_sem_seg"
- DatasetCatalog.register(
- name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
- )
- MetadataCatalog.get(name).set(
- image_root=image_dir,
- sem_seg_root=gt_dir,
- evaluator_type="sem_seg",
- ignore_label=255,
- **meta,
- )
-
-
-_root = os.getenv("DETECTRON2_DATASETS", "datasets")
-register_all_coco_stuff_10k(_root)
diff --git a/spaces/Felix123456/bingo/src/lib/hooks/chat-history.ts b/spaces/Felix123456/bingo/src/lib/hooks/chat-history.ts
deleted file mode 100644
index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000
--- a/spaces/Felix123456/bingo/src/lib/hooks/chat-history.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-import { zip } from 'lodash-es'
-import { ChatMessageModel, BotId } from '@/lib/bots/bing/types'
-import { Storage } from '../storage'
-
-/**
- * conversations:$botId => Conversation[]
- * conversation:$botId:$cid:messages => ChatMessageModel[]
- */
-
-interface Conversation {
- id: string
- createdAt: number
-}
-
-type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] }
-
-async function loadHistoryConversations(botId: BotId): Promise {
- const key = `conversations:${botId}`
- const { [key]: value } = await Storage.get(key)
- return value || []
-}
-
-async function deleteHistoryConversation(botId: BotId, cid: string) {
- const conversations = await loadHistoryConversations(botId)
- const newConversations = conversations.filter((c) => c.id !== cid)
- await Storage.set({ [`conversations:${botId}`]: newConversations })
-}
-
-async function loadConversationMessages(botId: BotId, cid: string): Promise {
- const key = `conversation:${botId}:${cid}:messages`
- const { [key]: value } = await Storage.get(key)
- return value || []
-}
-
-export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) {
- const conversations = await loadHistoryConversations(botId)
- if (!conversations.some((c) => c.id === cid)) {
- conversations.unshift({ id: cid, createdAt: Date.now() })
- await Storage.set({ [`conversations:${botId}`]: conversations })
- }
- const key = `conversation:${botId}:${cid}:messages`
- await Storage.set({ [key]: messages })
-}
-
-export async function loadHistoryMessages(botId: BotId): Promise {
- const conversations = await loadHistoryConversations(botId)
- const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id)))
- return zip(conversations, messagesList).map(([c, messages]) => ({
- id: c!.id,
- createdAt: c!.createdAt,
- messages: messages!,
- }))
-}
-
-export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) {
- const messages = await loadConversationMessages(botId, conversationId)
- const newMessages = messages.filter((m) => m.id !== messageId)
- await setConversationMessages(botId, conversationId, newMessages)
- if (!newMessages.length) {
- await deleteHistoryConversation(botId, conversationId)
- }
-}
diff --git a/spaces/Fr33d0m21/stabilityai-stable-diffusion-2-1/README.md b/spaces/Fr33d0m21/stabilityai-stable-diffusion-2-1/README.md
deleted file mode 100644
index f8e133b76e6e83810fcfbf532c9d551b2b3fbc12..0000000000000000000000000000000000000000
--- a/spaces/Fr33d0m21/stabilityai-stable-diffusion-2-1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stabilityai Stable Diffusion 2 1
-emoji: 🦀
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/GeemiW/pdb_answers/app.py b/spaces/GeemiW/pdb_answers/app.py
deleted file mode 100644
index 8b882edf8783691bbd3fb2a1c91d7235abb12885..0000000000000000000000000000000000000000
--- a/spaces/GeemiW/pdb_answers/app.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import streamlit as st #Web App
-import os
-import stqdm
-import time
-from utils import *
-
-api_key = ' '
-
-st.write(
- """""",
- unsafe_allow_html=True,
-)
-
-
-st.title("PDB Scientist 🧑🔬")
-st.markdown("Geemi Wellawatte ([@GWellawatte](https://twitter.com/GWellawatte))")
-st.markdown("#### This tool will allow you information related to a protein. You only have to enter the PDB ID of the related protein. It uses OpenAI's GPT models, and you must have your own API key. Each query is about 10k tokens, which costs about only $0.20 on your own API key, charged by OpenAI.")
-st.markdown("##### Current version queries articles listed in the PDB database with relevance to the PDB ID, from the PubMed database. ")
-st.markdown("##### Currently data is extracted from the abstracts only as this is a ✨FREE✨ paper-scraper!")
-st.markdown("Used libraries:\n * [PaperQA](https://github.com/whitead/paper-qa)")
-
-api_key_url = 'https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key'
-
-api_key = st.text_input('OpenAI API Key',
- placeholder='sk-...',
- help=f"['What is that?']({api_key_url})",
- type="password",
- value = '')
-
-os.environ["OPENAI_API_KEY"] = f"{api_key}"
-if len(api_key) != 51:
- st.warning('Please enter a valid OpenAI API key.', icon="⚠️")
-
-
-with st.form(key='pdbid_form', clear_on_submit = False):
- pdbid = st.text_input("Input search query here:", placeholder='PDB ID', value= '')
-
- submitButton1 = st.form_submit_button('Submit')
-
-if submitButton1:
- st.write("PDB ID submitted! ✅")
- pdb_query = PDBQuery(pdbid)
- citations = pdb_query.create_citation()
- if 'citations' not in st.session_state:
- st.session_state.key = 'citations'
- st.session_state['citations'] = citations
-
- pdb_query.write_webdata()
-
-def answer_query(question):
- import paperqa
-
- citations = st.session_state['citations']
-
- docs = paperqa.Docs()
- docs.add('web_data.txt', ''.join(citations))
-
- answer = docs.query(question)
-
- st.success('Found answer 🥳')
- return answer.formatted_answer
-
-with st.form(key='question_form', clear_on_submit = False):
- question = st.text_input("What do you wanna know from these papers?", placeholder='Input questions here...',
- value='')
-
- submitButton2 = st.form_submit_button('Submit')
-
-if submitButton2:
- with st.spinner('⏳ Please wait...'):
- start = time.time()
- paperqa_answer = answer_query(question)
- length_answer = len(paperqa_answer)
- st.text_area("Answer:", paperqa_answer, height=max(length_answer//4, 100))
- end = time.time()
- clock_time = end - start
- with st.empty():
- st.write(f"⏰ Task completed in {clock_time:.2f} seconds.")
-
-
-
-clearButton = st.button('Clear data!')
-if clearButton:
- st.write('Extracted data is removed 😶🌫️')
- if os.path.exists('web_data.txt'):
- os.remove('web_data.txt')
\ No newline at end of file
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_zone_arrangement.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_zone_arrangement.py
deleted file mode 100644
index 9b4f9cf4b9e61bb387bece0cb0fa6db56dd25b27..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_zone_arrangement.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import numpy as np
-import os
-import pybullet as p
-import random
-from cliport.tasks import primitives
-from cliport.tasks.grippers import Spatula
-from cliport.tasks.task import Task
-from cliport.utils import utils
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-import pybullet as p
-
-class ColorCoordinatedZoneArrangement(Task):
- """Pick up blocks of different colors and place them on the pallets of the same color."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 15
- self.lang_template = "place the {color} blocks on the {color} pallet"
- self.task_completed_desc = "done arranging blocks on pallets."
- self.additional_reset()
-
- def reset(self, env):
- super().reset(env)
-
- # Add pallets.
- # x, y, z dimensions for the asset size
- pallet_size = (0.12, 0.12, 0.02)
- pallet_urdf = 'pallet/pallet.urdf'
- pallet_colors = ['red', 'blue', 'green']
- pallet_poses = []
- for color in pallet_colors:
- pallet_pose = self.get_random_pose(env, pallet_size)
- env.add_object(pallet_urdf, pallet_pose, category='fixed', color=utils.COLORS[color])
- pallet_poses.append(pallet_pose)
-
- # Add blocks.
- # x, y, z dimensions for the asset size
- blocks = []
- block_size = (0.04, 0.04, 0.04)
- block_urdf = 'block/block.urdf'
- for color in pallet_colors:
- for _ in range(3):
- block_pose = self.get_random_pose(env, block_size)
- block_id = env.add_object(block_urdf, block_pose, color=utils.COLORS[color])
- blocks.append(block_id)
-
- # Add small blocks as obstacles.
- small_block_size = (0.02, 0.02, 0.02)
- small_block_urdf = 'block/small.urdf'
- for _ in range(5):
- small_block_pose = self.get_random_pose(env, small_block_size)
- env.add_object(small_block_urdf, small_block_pose)
-
- # Goal: each block is on the pallet of the same color.
- for i in range(9):
- self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[pallet_poses[i // 3]], replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1 / 9,
- language_goal=self.lang_template.format(color=pallet_colors[i // 3]))
\ No newline at end of file
diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/vocoder/models/deepmind_version.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/vocoder/models/deepmind_version.py
deleted file mode 100644
index 1d973d9b8b9ab547571abc5a3f5ea86226a25924..0000000000000000000000000000000000000000
--- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/vocoder/models/deepmind_version.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.display import *
-from utils.dsp import *
-
-
-class WaveRNN(nn.Module) :
- def __init__(self, hidden_size=896, quantisation=256) :
- super(WaveRNN, self).__init__()
-
- self.hidden_size = hidden_size
- self.split_size = hidden_size // 2
-
- # The main matmul
- self.R = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
-
- # Output fc layers
- self.O1 = nn.Linear(self.split_size, self.split_size)
- self.O2 = nn.Linear(self.split_size, quantisation)
- self.O3 = nn.Linear(self.split_size, self.split_size)
- self.O4 = nn.Linear(self.split_size, quantisation)
-
- # Input fc layers
- self.I_coarse = nn.Linear(2, 3 * self.split_size, bias=False)
- self.I_fine = nn.Linear(3, 3 * self.split_size, bias=False)
-
- # biases for the gates
- self.bias_u = nn.Parameter(torch.zeros(self.hidden_size))
- self.bias_r = nn.Parameter(torch.zeros(self.hidden_size))
- self.bias_e = nn.Parameter(torch.zeros(self.hidden_size))
-
- # display num params
- self.num_params()
-
-
- def forward(self, prev_y, prev_hidden, current_coarse) :
-
- # Main matmul - the projection is split 3 ways
- R_hidden = self.R(prev_hidden)
- R_u, R_r, R_e, = torch.split(R_hidden, self.hidden_size, dim=1)
-
- # Project the prev input
- coarse_input_proj = self.I_coarse(prev_y)
- I_coarse_u, I_coarse_r, I_coarse_e = \
- torch.split(coarse_input_proj, self.split_size, dim=1)
-
- # Project the prev input and current coarse sample
- fine_input = torch.cat([prev_y, current_coarse], dim=1)
- fine_input_proj = self.I_fine(fine_input)
- I_fine_u, I_fine_r, I_fine_e = \
- torch.split(fine_input_proj, self.split_size, dim=1)
-
- # concatenate for the gates
- I_u = torch.cat([I_coarse_u, I_fine_u], dim=1)
- I_r = torch.cat([I_coarse_r, I_fine_r], dim=1)
- I_e = torch.cat([I_coarse_e, I_fine_e], dim=1)
-
- # Compute all gates for coarse and fine
- u = F.sigmoid(R_u + I_u + self.bias_u)
- r = F.sigmoid(R_r + I_r + self.bias_r)
- e = F.tanh(r * R_e + I_e + self.bias_e)
- hidden = u * prev_hidden + (1. - u) * e
-
- # Split the hidden state
- hidden_coarse, hidden_fine = torch.split(hidden, self.split_size, dim=1)
-
- # Compute outputs
- out_coarse = self.O2(F.relu(self.O1(hidden_coarse)))
- out_fine = self.O4(F.relu(self.O3(hidden_fine)))
-
- return out_coarse, out_fine, hidden
-
-
- def generate(self, seq_len):
- with torch.no_grad():
- # First split up the biases for the gates
- b_coarse_u, b_fine_u = torch.split(self.bias_u, self.split_size)
- b_coarse_r, b_fine_r = torch.split(self.bias_r, self.split_size)
- b_coarse_e, b_fine_e = torch.split(self.bias_e, self.split_size)
-
- # Lists for the two output seqs
- c_outputs, f_outputs = [], []
-
- # Some initial inputs
- out_coarse = torch.LongTensor([0]).cuda()
- out_fine = torch.LongTensor([0]).cuda()
-
- # We'll meed a hidden state
- hidden = self.init_hidden()
-
- # Need a clock for display
- start = time.time()
-
- # Loop for generation
- for i in range(seq_len) :
-
- # Split into two hidden states
- hidden_coarse, hidden_fine = \
- torch.split(hidden, self.split_size, dim=1)
-
- # Scale and concat previous predictions
- out_coarse = out_coarse.unsqueeze(0).float() / 127.5 - 1.
- out_fine = out_fine.unsqueeze(0).float() / 127.5 - 1.
- prev_outputs = torch.cat([out_coarse, out_fine], dim=1)
-
- # Project input
- coarse_input_proj = self.I_coarse(prev_outputs)
- I_coarse_u, I_coarse_r, I_coarse_e = \
- torch.split(coarse_input_proj, self.split_size, dim=1)
-
- # Project hidden state and split 6 ways
- R_hidden = self.R(hidden)
- R_coarse_u , R_fine_u, \
- R_coarse_r, R_fine_r, \
- R_coarse_e, R_fine_e = torch.split(R_hidden, self.split_size, dim=1)
-
- # Compute the coarse gates
- u = F.sigmoid(R_coarse_u + I_coarse_u + b_coarse_u)
- r = F.sigmoid(R_coarse_r + I_coarse_r + b_coarse_r)
- e = F.tanh(r * R_coarse_e + I_coarse_e + b_coarse_e)
- hidden_coarse = u * hidden_coarse + (1. - u) * e
-
- # Compute the coarse output
- out_coarse = self.O2(F.relu(self.O1(hidden_coarse)))
- posterior = F.softmax(out_coarse, dim=1)
- distrib = torch.distributions.Categorical(posterior)
- out_coarse = distrib.sample()
- c_outputs.append(out_coarse)
-
- # Project the [prev outputs and predicted coarse sample]
- coarse_pred = out_coarse.float() / 127.5 - 1.
- fine_input = torch.cat([prev_outputs, coarse_pred.unsqueeze(0)], dim=1)
- fine_input_proj = self.I_fine(fine_input)
- I_fine_u, I_fine_r, I_fine_e = \
- torch.split(fine_input_proj, self.split_size, dim=1)
-
- # Compute the fine gates
- u = F.sigmoid(R_fine_u + I_fine_u + b_fine_u)
- r = F.sigmoid(R_fine_r + I_fine_r + b_fine_r)
- e = F.tanh(r * R_fine_e + I_fine_e + b_fine_e)
- hidden_fine = u * hidden_fine + (1. - u) * e
-
- # Compute the fine output
- out_fine = self.O4(F.relu(self.O3(hidden_fine)))
- posterior = F.softmax(out_fine, dim=1)
- distrib = torch.distributions.Categorical(posterior)
- out_fine = distrib.sample()
- f_outputs.append(out_fine)
-
- # Put the hidden state back together
- hidden = torch.cat([hidden_coarse, hidden_fine], dim=1)
-
- # Display progress
- speed = (i + 1) / (time.time() - start)
- stream('Gen: %i/%i -- Speed: %i', (i + 1, seq_len, speed))
-
- coarse = torch.stack(c_outputs).squeeze(1).cpu().data.numpy()
- fine = torch.stack(f_outputs).squeeze(1).cpu().data.numpy()
- output = combine_signal(coarse, fine)
-
- return output, coarse, fine
-
- def init_hidden(self, batch_size=1) :
- return torch.zeros(batch_size, self.hidden_size).cuda()
-
- def num_params(self) :
- parameters = filter(lambda p: p.requires_grad, self.parameters())
- parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
- print('Trainable Parameters: %.3f million' % parameters)
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py
deleted file mode 100644
index fa4b6f12f36be74c6e1f7182db110893f9f4f0c4..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py
+++ /dev/null
@@ -1,11 +0,0 @@
-_base_ = '../dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py'
-model = dict(
- backbone=dict(
- norm_cfg=dict(type='SyncBN', requires_grad=True),
- norm_eval=False,
- plugins=[
- dict(
- cfg=dict(type='ContextBlock', ratio=1. / 16),
- stages=(False, True, True, True),
- position='after_conv3')
- ]))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/legacy_1.x/ssd300_coco_v1.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/legacy_1.x/ssd300_coco_v1.py
deleted file mode 100644
index b194e7651ede006c5101bff1056749edf4d249cd..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/legacy_1.x/ssd300_coco_v1.py
+++ /dev/null
@@ -1,79 +0,0 @@
-_base_ = [
- '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
- '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
-]
-# model settings
-input_size = 300
-model = dict(
- bbox_head=dict(
- type='SSDHead',
- anchor_generator=dict(
- type='LegacySSDAnchorGenerator',
- scale_major=False,
- input_size=input_size,
- basesize_ratio_range=(0.15, 0.9),
- strides=[8, 16, 32, 64, 100, 300],
- ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
- bbox_coder=dict(
- type='LegacyDeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[0.1, 0.1, 0.2, 0.2])))
-# dataset settings
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile', to_float32=True),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='PhotoMetricDistortion',
- brightness_delta=32,
- contrast_range=(0.5, 1.5),
- saturation_range=(0.5, 1.5),
- hue_delta=18),
- dict(
- type='Expand',
- mean=img_norm_cfg['mean'],
- to_rgb=img_norm_cfg['to_rgb'],
- ratio_range=(1, 4)),
- dict(
- type='MinIoURandomCrop',
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
- min_crop_size=0.3),
- dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(300, 300),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=False),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=8,
- workers_per_gpu=3,
- train=dict(
- _delete_=True,
- type='RepeatDataset',
- times=5,
- dataset=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_train2017.json',
- img_prefix=data_root + 'train2017/',
- pipeline=train_pipeline)),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
-# optimizer
-optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
-optimizer_config = dict(_delete_=True)
-dist_params = dict(backend='nccl', port=29555)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/base.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/base.py
deleted file mode 100644
index 7ce9c36c1d6f60c8567a72c44cc9eee0323ae2a2..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/base.py
+++ /dev/null
@@ -1,355 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import OrderedDict
-
-import mmcv
-import numpy as np
-import torch
-import torch.distributed as dist
-import torch.nn as nn
-from mmcv.runner import auto_fp16
-from mmcv.utils import print_log
-
-from mmdet.core.visualization import imshow_det_bboxes
-from mmdet.utils import get_root_logger
-
-
-class BaseDetector(nn.Module, metaclass=ABCMeta):
- """Base class for detectors."""
-
- def __init__(self):
- super(BaseDetector, self).__init__()
- self.fp16_enabled = False
-
- @property
- def with_neck(self):
- """bool: whether the detector has a neck"""
- return hasattr(self, 'neck') and self.neck is not None
-
- # TODO: these properties need to be carefully handled
- # for both single stage & two stage detectors
- @property
- def with_shared_head(self):
- """bool: whether the detector has a shared head in the RoI Head"""
- return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
-
- @property
- def with_bbox(self):
- """bool: whether the detector has a bbox head"""
- return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
- or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
-
- @property
- def with_mask(self):
- """bool: whether the detector has a mask head"""
- return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
- or (hasattr(self, 'mask_head') and self.mask_head is not None))
-
- @abstractmethod
- def extract_feat(self, imgs):
- """Extract features from images."""
- pass
-
- def extract_feats(self, imgs):
- """Extract features from multiple images.
-
- Args:
- imgs (list[torch.Tensor]): A list of images. The images are
- augmented from the same image but in different ways.
-
- Returns:
- list[torch.Tensor]: Features of different images
- """
- assert isinstance(imgs, list)
- return [self.extract_feat(img) for img in imgs]
-
- def forward_train(self, imgs, img_metas, **kwargs):
- """
- Args:
- img (list[Tensor]): List of tensors of shape (1, C, H, W).
- Typically these should be mean centered and std scaled.
- img_metas (list[dict]): List of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys, see
- :class:`mmdet.datasets.pipelines.Collect`.
- kwargs (keyword arguments): Specific to concrete implementation.
- """
- # NOTE the batched image size information may be useful, e.g.
- # in DETR, this is needed for the construction of masks, which is
- # then used for the transformer_head.
- batch_input_shape = tuple(imgs[0].size()[-2:])
- for img_meta in img_metas:
- img_meta['batch_input_shape'] = batch_input_shape
-
- async def async_simple_test(self, img, img_metas, **kwargs):
- raise NotImplementedError
-
- @abstractmethod
- def simple_test(self, img, img_metas, **kwargs):
- pass
-
- @abstractmethod
- def aug_test(self, imgs, img_metas, **kwargs):
- """Test function with test time augmentation."""
- pass
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in detector.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- if pretrained is not None:
- logger = get_root_logger()
- print_log(f'load model from: {pretrained}', logger=logger)
-
- async def aforward_test(self, *, img, img_metas, **kwargs):
- for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
- if not isinstance(var, list):
- raise TypeError(f'{name} must be a list, but got {type(var)}')
-
- num_augs = len(img)
- if num_augs != len(img_metas):
- raise ValueError(f'num of augmentations ({len(img)}) '
- f'!= num of image metas ({len(img_metas)})')
- # TODO: remove the restriction of samples_per_gpu == 1 when prepared
- samples_per_gpu = img[0].size(0)
- assert samples_per_gpu == 1
-
- if num_augs == 1:
- return await self.async_simple_test(img[0], img_metas[0], **kwargs)
- else:
- raise NotImplementedError
-
- def forward_test(self, imgs, img_metas, **kwargs):
- """
- Args:
- imgs (List[Tensor]): the outer list indicates test-time
- augmentations and inner Tensor should have a shape NxCxHxW,
- which contains all images in the batch.
- img_metas (List[List[dict]]): the outer list indicates test-time
- augs (multiscale, flip, etc.) and the inner list indicates
- images in a batch.
- """
- for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
- if not isinstance(var, list):
- raise TypeError(f'{name} must be a list, but got {type(var)}')
-
- num_augs = len(imgs)
- if num_augs != len(img_metas):
- raise ValueError(f'num of augmentations ({len(imgs)}) '
- f'!= num of image meta ({len(img_metas)})')
-
- # NOTE the batched image size information may be useful, e.g.
- # in DETR, this is needed for the construction of masks, which is
- # then used for the transformer_head.
- for img, img_meta in zip(imgs, img_metas):
- batch_size = len(img_meta)
- for img_id in range(batch_size):
- img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
-
- if num_augs == 1:
- # proposals (List[List[Tensor]]): the outer list indicates
- # test-time augs (multiscale, flip, etc.) and the inner list
- # indicates images in a batch.
- # The Tensor should have a shape Px4, where P is the number of
- # proposals.
- if 'proposals' in kwargs:
- kwargs['proposals'] = kwargs['proposals'][0]
- return self.simple_test(imgs[0], img_metas[0], **kwargs)
- else:
- assert imgs[0].size(0) == 1, 'aug test does not support ' \
- 'inference with batch size ' \
- f'{imgs[0].size(0)}'
- # TODO: support test augmentation for predefined proposals
- assert 'proposals' not in kwargs
- return self.aug_test(imgs, img_metas, **kwargs)
-
- @auto_fp16(apply_to=('img', ))
- def forward(self, img, img_metas, return_loss=True, **kwargs):
- """Calls either :func:`forward_train` or :func:`forward_test` depending
- on whether ``return_loss`` is ``True``.
-
- Note this setting will change the expected inputs. When
- ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
- and List[dict]), and when ``resturn_loss=False``, img and img_meta
- should be double nested (i.e. List[Tensor], List[List[dict]]), with
- the outer list indicating test time augmentations.
- """
- if return_loss:
- return self.forward_train(img, img_metas, **kwargs)
- else:
- return self.forward_test(img, img_metas, **kwargs)
-
- def _parse_losses(self, losses):
- """Parse the raw outputs (losses) of the network.
-
- Args:
- losses (dict): Raw output of the network, which usually contain
- losses and other necessary infomation.
-
- Returns:
- tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
- which may be a weighted sum of all losses, log_vars contains \
- all the variables to be sent to the logger.
- """
- log_vars = OrderedDict()
- for loss_name, loss_value in losses.items():
- if isinstance(loss_value, torch.Tensor):
- log_vars[loss_name] = loss_value.mean()
- elif isinstance(loss_value, list):
- log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
- else:
- raise TypeError(
- f'{loss_name} is not a tensor or list of tensors')
-
- loss = sum(_value for _key, _value in log_vars.items()
- if 'loss' in _key)
-
- log_vars['loss'] = loss
- for loss_name, loss_value in log_vars.items():
- # reduce loss when distributed training
- if dist.is_available() and dist.is_initialized():
- loss_value = loss_value.data.clone()
- dist.all_reduce(loss_value.div_(dist.get_world_size()))
- log_vars[loss_name] = loss_value.item()
-
- return loss, log_vars
-
- def train_step(self, data, optimizer):
- """The iteration step during training.
-
- This method defines an iteration step during training, except for the
- back propagation and optimizer updating, which are done in an optimizer
- hook. Note that in some complicated cases or models, the whole process
- including back propagation and optimizer updating is also defined in
- this method, such as GAN.
-
- Args:
- data (dict): The output of dataloader.
- optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
- runner is passed to ``train_step()``. This argument is unused
- and reserved.
-
- Returns:
- dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
- ``num_samples``.
-
- - ``loss`` is a tensor for back propagation, which can be a \
- weighted sum of multiple losses.
- - ``log_vars`` contains all the variables to be sent to the
- logger.
- - ``num_samples`` indicates the batch size (when the model is \
- DDP, it means the batch size on each GPU), which is used for \
- averaging the logs.
- """
- losses = self(**data)
- loss, log_vars = self._parse_losses(losses)
-
- outputs = dict(
- loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
-
- return outputs
-
- def val_step(self, data, optimizer):
- """The iteration step during validation.
-
- This method shares the same signature as :func:`train_step`, but used
- during val epochs. Note that the evaluation after training epochs is
- not implemented with this method, but an evaluation hook.
- """
- losses = self(**data)
- loss, log_vars = self._parse_losses(losses)
-
- outputs = dict(
- loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
-
- return outputs
-
- def show_result(self,
- img,
- result,
- score_thr=0.3,
- bbox_color=(72, 101, 241),
- text_color=(72, 101, 241),
- mask_color=None,
- thickness=2,
- font_size=13,
- win_name='',
- show=False,
- wait_time=0,
- out_file=None):
- """Draw `result` over `img`.
-
- Args:
- img (str or Tensor): The image to be displayed.
- result (Tensor or tuple): The results to draw over `img`
- bbox_result or (bbox_result, segm_result).
- score_thr (float, optional): Minimum score of bboxes to be shown.
- Default: 0.3.
- bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
- The tuple of color should be in BGR order. Default: 'green'
- text_color (str or tuple(int) or :obj:`Color`):Color of texts.
- The tuple of color should be in BGR order. Default: 'green'
- mask_color (None or str or tuple(int) or :obj:`Color`):
- Color of masks. The tuple of color should be in BGR order.
- Default: None
- thickness (int): Thickness of lines. Default: 2
- font_size (int): Font size of texts. Default: 13
- win_name (str): The window name. Default: ''
- wait_time (float): Value of waitKey param.
- Default: 0.
- show (bool): Whether to show the image.
- Default: False.
- out_file (str or None): The filename to write the image.
- Default: None.
-
- Returns:
- img (Tensor): Only if not `show` or `out_file`
- """
- img = mmcv.imread(img)
- img = img.copy()
- if isinstance(result, tuple):
- bbox_result, segm_result = result
- if isinstance(segm_result, tuple):
- segm_result = segm_result[0] # ms rcnn
- else:
- bbox_result, segm_result = result, None
- bboxes = np.vstack(bbox_result)
- labels = [
- np.full(bbox.shape[0], i, dtype=np.int32)
- for i, bbox in enumerate(bbox_result)
- ]
- labels = np.concatenate(labels)
- # draw segmentation masks
- segms = None
- if segm_result is not None and len(labels) > 0: # non empty
- segms = mmcv.concat_list(segm_result)
- if isinstance(segms[0], torch.Tensor):
- segms = torch.stack(segms, dim=0).detach().cpu().numpy()
- else:
- segms = np.stack(segms, axis=0)
- # if out_file specified, do not show image in window
- if out_file is not None:
- show = False
- # draw bounding boxes
- img = imshow_det_bboxes(
- img,
- bboxes,
- labels,
- segms,
- class_names=self.CLASSES,
- score_thr=score_thr,
- bbox_color=bbox_color,
- text_color=text_color,
- mask_color=mask_color,
- thickness=thickness,
- font_size=font_size,
- win_name=win_name,
- show=show,
- wait_time=wait_time,
- out_file=out_file)
-
- # if not (show or out_file):
- return img
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index bc25d6aaf67ccb7e9fcb44ba2d803bebfa31b160..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './psanet_r50-d8_512x1024_80k_cityscapes.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/quantization_options.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/quantization_options.py
deleted file mode 100644
index b46d682c0edaeaaf2a230e51d50da2a32d4bda98..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/quantization_options.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-def parse_config_yaml(yaml_data):
- # Initialize to default options.
- quantization_options = {
- "n_centroids": {
- "Linear": ["in_features", {"*": 256}],
- "Embedding": ["embedding_dim", {"*": 256}],
- },
- "block_sizes": {
- "Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}],
- "Embedding": ["fuzzy_name", {"emb": 8}],
- },
- "layers_to_quantize": [
- "decoder\\.layers\\.\\d+\\.fc[12]",
- "decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]",
- "decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)",
- ],
- }
-
- if "n_centroids" in yaml_data:
- quantization_options["n_centroids"] = {
- layer: convert_yaml_to_tuple(layer_data)
- for layer, layer_data in yaml_data["n_centroids"].items()
- }
- if "block_sizes" in yaml_data:
- quantization_options["block_sizes"] = {
- layer: convert_yaml_to_tuple(layer_data)
- for layer, layer_data in yaml_data["block_sizes"].items()
- }
- if "layers_to_quantize" in yaml_data:
- quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"]
-
- return quantization_options
-
-
-def convert_yaml_to_tuple(yaml_dictionary):
- """Converts a yaml dictionary with two keys: `key` and `value` into a two
- argument tuple of those values."""
- return (yaml_dictionary["key"], yaml_dictionary["value"])
diff --git a/spaces/Hila/RobustViT/SegmentationTest/utils/parallel.py b/spaces/Hila/RobustViT/SegmentationTest/utils/parallel.py
deleted file mode 100644
index c14ef5c0d8e3f84606c339ce513b46d4bc9e4a70..0000000000000000000000000000000000000000
--- a/spaces/Hila/RobustViT/SegmentationTest/utils/parallel.py
+++ /dev/null
@@ -1,260 +0,0 @@
-##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-## Created by: Hang Zhang
-## ECE Department, Rutgers University
-## Email: zhang.hang@rutgers.edu
-## Copyright (c) 2017
-##
-## This source code is licensed under the MIT-style license found in the
-## LICENSE file in the root directory of this source tree
-##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-"""Encoding Data Parallel"""
-import threading
-import functools
-import torch
-from torch.autograd import Variable, Function
-import torch.cuda.comm as comm
-from torch.nn.parallel.data_parallel import DataParallel
-from torch.nn.parallel.parallel_apply import get_a_var
-from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
-
-torch_ver = torch.__version__[:3]
-
-__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion',
- 'patch_replication_callback']
-
-def allreduce(*inputs):
- """Cross GPU all reduce autograd operation for calculate mean and
- variance in SyncBN.
- """
- return AllReduce.apply(*inputs)
-
-class AllReduce(Function):
- @staticmethod
- def forward(ctx, num_inputs, *inputs):
- ctx.num_inputs = num_inputs
- ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
- inputs = [inputs[i:i + num_inputs]
- for i in range(0, len(inputs), num_inputs)]
- # sort before reduce sum
- inputs = sorted(inputs, key=lambda i: i[0].get_device())
- results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
- outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
- return tuple([t for tensors in outputs for t in tensors])
-
- @staticmethod
- def backward(ctx, *inputs):
- inputs = [i.data for i in inputs]
- inputs = [inputs[i:i + ctx.num_inputs]
- for i in range(0, len(inputs), ctx.num_inputs)]
- results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
- outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
- return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])
-
-
-class Reduce(Function):
- @staticmethod
- def forward(ctx, *inputs):
- ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]
- inputs = sorted(inputs, key=lambda i: i.get_device())
- return comm.reduce_add(inputs)
-
- @staticmethod
- def backward(ctx, gradOutput):
- return Broadcast.apply(ctx.target_gpus, gradOutput)
-
-
-class DataParallelModel(DataParallel):
- """Implements data parallelism at the module level.
-
- This container parallelizes the application of the given module by
- splitting the input across the specified devices by chunking in the
- batch dimension.
- In the forward pass, the module is replicated on each device,
- and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module.
- Note that the outputs are not gathered, please use compatible
- :class:`encoding.parallel.DataParallelCriterion`.
-
- The batch size should be larger than the number of GPUs used. It should
- also be an integer multiple of the number of GPUs so that each chunk is
- the same size (so that each GPU processes the same number of samples).
-
- Args:
- module: module to be parallelized
- device_ids: CUDA devices (default: all devices)
-
- Reference:
- Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
- Amit Agrawal. “Context Encoding for Semantic Segmentation.
- *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
-
- Example::
-
- >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
- >>> y = net(x)
- """
- def gather(self, outputs, output_device):
- return outputs
-
- def replicate(self, module, device_ids):
- modules = super(DataParallelModel, self).replicate(module, device_ids)
- execute_replication_callbacks(modules)
- return modules
-
-
-class DataParallelCriterion(DataParallel):
- """
- Calculate loss in multiple-GPUs, which balance the memory usage for
- Semantic Segmentation.
-
- The targets are splitted across the specified devices by chunking in
- the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.
-
- Reference:
- Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
- Amit Agrawal. “Context Encoding for Semantic Segmentation.
- *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
-
- Example::
-
- >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
- >>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
- >>> y = net(x)
- >>> loss = criterion(y, target)
- """
- def forward(self, inputs, *targets, **kwargs):
- # input should be already scatterd
- # scattering the targets instead
- if not self.device_ids:
- return self.module(inputs, *targets, **kwargs)
- targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
- if len(self.device_ids) == 1:
- return self.module(inputs, *targets[0], **kwargs[0])
- replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
- outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
- return Reduce.apply(*outputs) / len(outputs)
- #return self.gather(outputs, self.output_device).mean()
-
-
-def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
- assert len(modules) == len(inputs)
- assert len(targets) == len(inputs)
- if kwargs_tup:
- assert len(modules) == len(kwargs_tup)
- else:
- kwargs_tup = ({},) * len(modules)
- if devices is not None:
- assert len(modules) == len(devices)
- else:
- devices = [None] * len(modules)
-
- lock = threading.Lock()
- results = {}
- if torch_ver != "0.3":
- grad_enabled = torch.is_grad_enabled()
-
- def _worker(i, module, input, target, kwargs, device=None):
- if torch_ver != "0.3":
- torch.set_grad_enabled(grad_enabled)
- if device is None:
- device = get_a_var(input).get_device()
- try:
- with torch.cuda.device(device):
- # this also avoids accidental slicing of `input` if it is a Tensor
- if not isinstance(input, (list, tuple)):
- input = (input,)
- if type(input) != type(target):
- if isinstance(target, tuple):
- input = tuple(input)
- elif isinstance(target, list):
- input = list(input)
- else:
- raise Exception("Types problem")
-
- output = module(*(input + target), **kwargs)
- with lock:
- results[i] = output
- except Exception as e:
- with lock:
- results[i] = e
-
- if len(modules) > 1:
- threads = [threading.Thread(target=_worker,
- args=(i, module, input, target,
- kwargs, device),)
- for i, (module, input, target, kwargs, device) in
- enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
-
- for thread in threads:
- thread.start()
- for thread in threads:
- thread.join()
- else:
- _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
-
- outputs = []
- for i in range(len(inputs)):
- output = results[i]
- if isinstance(output, Exception):
- raise output
- outputs.append(output)
- return outputs
-
-
-###########################################################################
-# Adapted from Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-#
-class CallbackContext(object):
- pass
-
-
-def execute_replication_callbacks(modules):
- """
- Execute an replication callback `__data_parallel_replicate__` on each module created
- by original replication.
-
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
-
- Note that, as all modules are isomorphism, we assign each sub-module with a context
- (shared among multiple copies of this module on different devices).
- Through this context, different copies can share some information.
-
- We guarantee that the callback on the master copy (the first copy) will be called ahead
- of calling the callback of any slave copies.
- """
- master_copy = modules[0]
- nr_modules = len(list(master_copy.modules()))
- ctxs = [CallbackContext() for _ in range(nr_modules)]
-
- for i, module in enumerate(modules):
- for j, m in enumerate(module.modules()):
- if hasattr(m, '__data_parallel_replicate__'):
- m.__data_parallel_replicate__(ctxs[j], i)
-
-
-def patch_replication_callback(data_parallel):
- """
- Monkey-patch an existing `DataParallel` object. Add the replication callback.
- Useful when you have customized `DataParallel` implementation.
-
- Examples:
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
- > patch_replication_callback(sync_bn)
- # this is equivalent to
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
- """
-
- assert isinstance(data_parallel, DataParallel)
-
- old_replicate = data_parallel.replicate
-
- @functools.wraps(old_replicate)
- def new_replicate(module, device_ids):
- modules = old_replicate(module, device_ids)
- execute_replication_callbacks(modules)
- return modules
-
- data_parallel.replicate = new_replicate
diff --git a/spaces/Hina4867/bingo/src/lib/bots/bing/types.ts b/spaces/Hina4867/bingo/src/lib/bots/bing/types.ts
deleted file mode 100644
index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000
--- a/spaces/Hina4867/bingo/src/lib/bots/bing/types.ts
+++ /dev/null
@@ -1,259 +0,0 @@
-export type Author = 'user' | 'system' | 'bot'
-
-export type BotId = 'bing'
-
-export enum BingConversationStyle {
- Creative = 'Creative',
- Balanced = 'Balanced',
- Precise = 'Precise'
-}
-
-export enum ErrorCode {
- CONVERSATION_LIMIT = 'CONVERSATION_LIMIT',
- BING_UNAUTHORIZED = 'BING_UNAUTHORIZED',
- BING_FORBIDDEN = 'BING_FORBIDDEN',
- BING_CAPTCHA = 'BING_CAPTCHA',
- THROTTLE_LIMIT = 'THROTTLE_LIMIT',
- NOTFOUND_ERROR = 'NOT_FOUND_ERROR',
- UNKOWN_ERROR = 'UNKOWN_ERROR',
- NETWORK_ERROR = 'NETWORK_ERROR',
-}
-
-export class ChatError extends Error {
- code: ErrorCode
- constructor(message: string, code: ErrorCode) {
- super(message)
- this.code = code
- }
-}
-
-export type ChatMessageModel = {
- id: string
- author: Author
- text: string
- error?: ChatError
- throttling?: Throttling
- sourceAttributions?: SourceAttribution[]
- suggestedResponses?: SuggestedResponse[]
-}
-
-export interface ConversationModel {
- messages: ChatMessageModel[]
-}
-
-export type Event =
- | {
- type: 'UPDATE_ANSWER'
- data: {
- text: string
- spokenText?: string
- sourceAttributions?: SourceAttribution[]
- suggestedResponses?: SuggestedResponse[]
- throttling?: Throttling
- }
- }
- | {
- type: 'DONE'
- }
- | {
- type: 'ERROR'
- error: ChatError
- }
-
-export interface SendMessageParams {
- prompt: string
- imageUrl?: string
- options: T
- onEvent: (event: Event) => void
- signal?: AbortSignal
-}
-
-export interface ConversationResponse {
- conversationId: string
- clientId: string
- conversationSignature: string
- result: {
- value: string
- message?: string
- }
-}
-
-export interface Telemetry {
- metrics?: null
- startTime: string
-}
-
-export interface ChatUpdateArgument {
- messages?: ChatResponseMessage[]
- throttling?: Throttling
- requestId: string
- result: null
-}
-
-export type ChatUpdateCompleteResponse = {
- type: 2
- invocationId: string
- item: ChatResponseItem
-} | {
- type: 1
- target: string
- arguments: ChatUpdateArgument[]
-} | {
- type: 3
- invocationId: string
-} | {
- type: 6 | 7
-}
-
-export interface ChatRequestResult {
- value: string
- serviceVersion: string
- error?: string
-}
-
-export interface ChatResponseItem {
- messages: ChatResponseMessage[]
- firstNewMessageIndex: number
- suggestedResponses: null
- conversationId: string
- requestId: string
- conversationExpiryTime: string
- telemetry: Telemetry
- result: ChatRequestResult
- throttling: Throttling
-}
-export enum InvocationEventType {
- Invocation = 1,
- StreamItem = 2,
- Completion = 3,
- StreamInvocation = 4,
- CancelInvocation = 5,
- Ping = 6,
- Close = 7,
-}
-
-// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts
-
-export interface ConversationInfo {
- conversationId: string
- clientId: string
- conversationSignature: string
- invocationId: number
- conversationStyle: BingConversationStyle
- prompt: string
- imageUrl?: string
-}
-
-export interface BingChatResponse {
- conversationSignature: string
- conversationId: string
- clientId: string
- invocationId: number
- conversationExpiryTime: Date
- response: string
- details: ChatResponseMessage
-}
-
-export interface Throttling {
- maxNumLongDocSummaryUserMessagesInConversation: number
- maxNumUserMessagesInConversation: number
- numLongDocSummaryUserMessagesInConversation: number
- numUserMessagesInConversation: number
-}
-
-export interface ChatResponseMessage {
- text: string
- spokenText?: string
- author: string
- createdAt: Date
- timestamp: Date
- messageId: string
- requestId: string
- offense: string
- adaptiveCards: AdaptiveCard[]
- sourceAttributions: SourceAttribution[]
- feedback: Feedback
- contentOrigin: string
- messageType?: string
- contentType?: string
- privacy: null
- suggestedResponses: SuggestedResponse[]
-}
-
-export interface AdaptiveCard {
- type: string
- version: string
- body: Body[]
-}
-
-export interface Body {
- type: string
- text: string
- wrap: boolean
- size?: string
-}
-
-export interface Feedback {
- tag: null
- updatedOn: null
- type: string
-}
-
-export interface SourceAttribution {
- providerDisplayName: string
- seeMoreUrl: string
- searchQuery: string
-}
-
-export interface SuggestedResponse {
- text: string
- author?: Author
- createdAt?: Date
- timestamp?: Date
- messageId?: string
- messageType?: string
- offense?: string
- feedback?: Feedback
- contentOrigin?: string
- privacy?: null
-}
-
-export interface KBlobRequest {
- knowledgeRequest: KnowledgeRequestContext
- imageBase64?: string
-}
-
-export interface KBlobResponse {
- blobId: string
- processedBlobId?: string
-}
-
-export interface KnowledgeRequestContext {
- imageInfo: ImageInfo;
- knowledgeRequest: KnowledgeRequest;
-}
-
-export interface ImageInfo {
- url?: string;
-}
-
-export interface KnowledgeRequest {
- invokedSkills: string[];
- subscriptionId: string;
- invokedSkillsRequestData: InvokedSkillsRequestData;
- convoData: ConvoData;
-}
-
-export interface ConvoData {
- convoid: string;
- convotone: BingConversationStyle;
-}
-
-export interface InvokedSkillsRequestData {
- enableFaceBlur: boolean;
-}
-
-export interface FileItem {
- url: string;
- status?: 'loading' | 'error' | 'loaded'
-}
diff --git a/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/midas_net_custom.py b/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/midas_net_custom.py
deleted file mode 100644
index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000
--- a/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/midas_net_custom.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
-This file contains code that is adapted from
-https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
-"""
-import torch
-import torch.nn as nn
-
-from .base_model import BaseModel
-from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
-
-
-class MidasNet_small(BaseModel):
- """Network for monocular depth estimation.
- """
-
- def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
- blocks={'expand': True}):
- """Init.
-
- Args:
- path (str, optional): Path to saved model. Defaults to None.
- features (int, optional): Number of features. Defaults to 256.
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
- """
- print("Loading weights: ", path)
-
- super(MidasNet_small, self).__init__()
-
- use_pretrained = False if path else True
-
- self.channels_last = channels_last
- self.blocks = blocks
- self.backbone = backbone
-
- self.groups = 1
-
- features1=features
- features2=features
- features3=features
- features4=features
- self.expand = False
- if "expand" in self.blocks and self.blocks['expand'] == True:
- self.expand = True
- features1=features
- features2=features*2
- features3=features*4
- features4=features*8
-
- self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
-
- self.scratch.activation = nn.ReLU(False)
-
- self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
- self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
- self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
- self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
-
-
- self.scratch.output_conv = nn.Sequential(
- nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
- Interpolate(scale_factor=2, mode="bilinear"),
- nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
- self.scratch.activation,
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
- nn.ReLU(True) if non_negative else nn.Identity(),
- nn.Identity(),
- )
-
- if path:
- self.load(path)
-
-
- def forward(self, x):
- """Forward pass.
-
- Args:
- x (tensor): input data (image)
-
- Returns:
- tensor: depth
- """
- if self.channels_last==True:
- print("self.channels_last = ", self.channels_last)
- x.contiguous(memory_format=torch.channels_last)
-
-
- layer_1 = self.pretrained.layer1(x)
- layer_2 = self.pretrained.layer2(layer_1)
- layer_3 = self.pretrained.layer3(layer_2)
- layer_4 = self.pretrained.layer4(layer_3)
-
- layer_1_rn = self.scratch.layer1_rn(layer_1)
- layer_2_rn = self.scratch.layer2_rn(layer_2)
- layer_3_rn = self.scratch.layer3_rn(layer_3)
- layer_4_rn = self.scratch.layer4_rn(layer_4)
-
-
- path_4 = self.scratch.refinenet4(layer_4_rn)
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
-
- out = self.scratch.output_conv(path_1)
-
- return torch.squeeze(out, dim=1)
-
-
-
-def fuse_model(m):
- prev_previous_type = nn.Identity()
- prev_previous_name = ''
- previous_type = nn.Identity()
- previous_name = ''
- for name, module in m.named_modules():
- if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
- # print("FUSED ", prev_previous_name, previous_name, name)
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
- elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
- # print("FUSED ", prev_previous_name, previous_name)
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
- # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
- # print("FUSED ", previous_name, name)
- # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
-
- prev_previous_type = previous_type
- prev_previous_name = previous_name
- previous_type = type(module)
- previous_name = name
\ No newline at end of file
diff --git a/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/modules/losses/lpips.py b/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/modules/losses/lpips.py
deleted file mode 100644
index a7280447694ffc302a7636e7e4d6183408e0aa95..0000000000000000000000000000000000000000
--- a/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/modules/losses/lpips.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
-
-import torch
-import torch.nn as nn
-from torchvision import models
-from collections import namedtuple
-
-from taming.util import get_ckpt_path
-
-
-class LPIPS(nn.Module):
- # Learned perceptual metric
- def __init__(self, use_dropout=True):
- super().__init__()
- self.scaling_layer = ScalingLayer()
- self.chns = [64, 128, 256, 512, 512] # vg16 features
- self.net = vgg16(pretrained=True, requires_grad=False)
- self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
- self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
- self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
- self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
- self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
- self.load_from_pretrained()
- for param in self.parameters():
- param.requires_grad = False
-
- def load_from_pretrained(self, name="vgg_lpips"):
- ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips")
- self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
- print("loaded pretrained LPIPS loss from {}".format(ckpt))
-
- @classmethod
- def from_pretrained(cls, name="vgg_lpips"):
- if name != "vgg_lpips":
- raise NotImplementedError
- model = cls()
- ckpt = get_ckpt_path(name)
- model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
- return model
-
- def forward(self, input, target):
- in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
- outs0, outs1 = self.net(in0_input), self.net(in1_input)
- feats0, feats1, diffs = {}, {}, {}
- lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
- for kk in range(len(self.chns)):
- feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
- diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
-
- res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
- val = res[0]
- for l in range(1, len(self.chns)):
- val += res[l]
- return val
-
-
-class ScalingLayer(nn.Module):
- def __init__(self):
- super(ScalingLayer, self).__init__()
- self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
- self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
-
- def forward(self, inp):
- return (inp - self.shift) / self.scale
-
-
-class NetLinLayer(nn.Module):
- """ A single linear layer which does a 1x1 conv """
- def __init__(self, chn_in, chn_out=1, use_dropout=False):
- super(NetLinLayer, self).__init__()
- layers = [nn.Dropout(), ] if (use_dropout) else []
- layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
- self.model = nn.Sequential(*layers)
-
-
-class vgg16(torch.nn.Module):
- def __init__(self, requires_grad=False, pretrained=True):
- super(vgg16, self).__init__()
- vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
- self.slice1 = torch.nn.Sequential()
- self.slice2 = torch.nn.Sequential()
- self.slice3 = torch.nn.Sequential()
- self.slice4 = torch.nn.Sequential()
- self.slice5 = torch.nn.Sequential()
- self.N_slices = 5
- for x in range(4):
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
- for x in range(4, 9):
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
- for x in range(9, 16):
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
- for x in range(16, 23):
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
- for x in range(23, 30):
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
- if not requires_grad:
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, X):
- h = self.slice1(X)
- h_relu1_2 = h
- h = self.slice2(h)
- h_relu2_2 = h
- h = self.slice3(h)
- h_relu3_3 = h
- h = self.slice4(h)
- h_relu4_3 = h
- h = self.slice5(h)
- h_relu5_3 = h
- vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
- out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
- return out
-
-
-def normalize_tensor(x,eps=1e-10):
- norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
- return x/(norm_factor+eps)
-
-
-def spatial_average(x, keepdim=True):
- return x.mean([2,3],keepdim=keepdim)
-
diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/widgets/duplicates.py b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/widgets/duplicates.py
deleted file mode 100644
index d140c20a036f7e1c1655178e8488d3e7d49fc221..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/widgets/duplicates.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import gradio as gr
-
-from widgets.widget_base import Widget
-from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
-import utils
-import utils.dataset_utils as ds_utils
-
-logs = utils.prepare_logging(__file__)
-
-
-class Duplicates(Widget):
- def __init__(self):
- duplicates_text = f"""
- Use this widget to identify text strings that appear more than once.
-
- A model's training and testing may be negatively affected by unwarranted duplicates ([Lee et al., 2021](https://arxiv.org/abs/2107.06499))
-
- ------
-
- ### Here is the list of all the duplicated items and their counts in the dataset.
- """
- self.duplicates_intro = gr.Markdown(render=False, value=duplicates_text)
- self.duplicates_df = gr.DataFrame(render=False)
- self.duplicates_text = gr.Markdown(render=False)
-
- def render(self):
- with gr.TabItem(f"Duplicates"):
- self.duplicates_intro.render()
- self.duplicates_text.render()
- self.duplicates_df.render()
-
- def update(self, dstats: dmt_cls):
- output = {}
-
- if not dstats.duplicates_results:
- output[self.duplicates_df] = gr.DataFrame.update(visible=False)
- output[self.duplicates_text] = gr.Markdown.update(visible=True,
- value="There are no duplicates in this dataset! 🥳")
- else:
- dupes_df_tmp = ds_utils.counter_dict_to_df(dstats.dups_dict, key_as_column=True)
- dupes_df_tmp.columns = ["instance", "count"]
- # Nice to have the counts show up first, because the instances
- # can be quite long (and run off the page)
- dupes_df = dupes_df_tmp[["count", "instance"]]
- output[self.duplicates_df] = gr.DataFrame.update(visible=True, value=dupes_df)
-
- duplicates_text = f"The fraction of data that is duplicate is {str(round(dstats.dups_frac, 4))}"
- output[self.duplicates_text] = gr.Markdown.update(value=duplicates_text, visible=True)
-
- return output
-
-
- @property
- def output_components(self):
- return [
- self.duplicates_text,
- self.duplicates_df,
- ]
-
- def add_events(self, state: gr.State):
- pass
\ No newline at end of file
diff --git a/spaces/HugoDzz/spaceship_drift/build/_app/immutable/nodes/1.dac78f11.js b/spaces/HugoDzz/spaceship_drift/build/_app/immutable/nodes/1.dac78f11.js
deleted file mode 100644
index 2b37e112a475c6450c223f618816264c8841474f..0000000000000000000000000000000000000000
--- a/spaces/HugoDzz/spaceship_drift/build/_app/immutable/nodes/1.dac78f11.js
+++ /dev/null
@@ -1 +0,0 @@
-import{S as x,i as H,s as S,k as u,q as h,a as g,l as d,m as v,r as b,h as m,c as k,b as _,G as E,u as $,H as q,I as y}from"../chunks/index.0d3f7c7a.js";import{p as C}from"../chunks/stores.bd2e29f1.js";function G(l){var f;let a,t=l[0].status+"",r,o,n,p=((f=l[0].error)==null?void 0:f.message)+"",c;return{c(){a=u("h1"),r=h(t),o=g(),n=u("p"),c=h(p)},l(e){a=d(e,"H1",{});var s=v(a);r=b(s,t),s.forEach(m),o=k(e),n=d(e,"P",{});var i=v(n);c=b(i,p),i.forEach(m)},m(e,s){_(e,a,s),E(a,r),_(e,o,s),_(e,n,s),E(n,c)},p(e,[s]){var i;s&1&&t!==(t=e[0].status+"")&&$(r,t),s&1&&p!==(p=((i=e[0].error)==null?void 0:i.message)+"")&&$(c,p)},i:q,o:q,d(e){e&&m(a),e&&m(o),e&&m(n)}}}function I(l,a,t){let r;return y(l,C,o=>t(0,r=o)),[r]}class w extends x{constructor(a){super(),H(this,a,I,G,S,{})}}export{w as component};
diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py
deleted file mode 100644
index 734d047f1bb8e3aa98c88e152eee7f91fea3d814..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-# author: adefossez
-
-import functools
-import logging
-from contextlib import contextmanager
-import inspect
-import time
-
-logger = logging.getLogger(__name__)
-
-EPS = 1e-8
-
-
-def capture_init(init):
- """capture_init.
-
- Decorate `__init__` with this, and you can then
- recover the *args and **kwargs passed to it in `self._init_args_kwargs`
- """
- @functools.wraps(init)
- def __init__(self, *args, **kwargs):
- self._init_args_kwargs = (args, kwargs)
- init(self, *args, **kwargs)
-
- return __init__
-
-
-def deserialize_model(package, strict=False):
- """deserialize_model.
-
- """
- klass = package['class']
- if strict:
- model = klass(*package['args'], **package['kwargs'])
- else:
- sig = inspect.signature(klass)
- kw = package['kwargs']
- for key in list(kw):
- if key not in sig.parameters:
- logger.warning("Dropping inexistant parameter %s", key)
- del kw[key]
- model = klass(*package['args'], **kw)
- model.load_state_dict(package['state'])
- return model
-
-
-def copy_state(state):
- return {k: v.cpu().clone() for k, v in state.items()}
-
-
-def serialize_model(model):
- args, kwargs = model._init_args_kwargs
- state = copy_state(model.state_dict())
- return {"class": model.__class__, "args": args, "kwargs": kwargs, "state": state}
-
-
-@contextmanager
-def swap_state(model, state):
- """
- Context manager that swaps the state of a model, e.g:
-
- # model is in old state
- with swap_state(model, new_state):
- # model in new state
- # model back to old state
- """
- old_state = copy_state(model.state_dict())
- model.load_state_dict(state)
- try:
- yield
- finally:
- model.load_state_dict(old_state)
-
-
-def pull_metric(history, name):
- out = []
- for metrics in history:
- if name in metrics:
- out.append(metrics[name])
- return out
-
-
-class LogProgress:
- """
- Sort of like tqdm but using log lines and not as real time.
- Args:
- - logger: logger obtained from `logging.getLogger`,
- - iterable: iterable object to wrap
- - updates (int): number of lines that will be printed, e.g.
- if `updates=5`, log every 1/5th of the total length.
- - total (int): length of the iterable, in case it does not support
- `len`.
- - name (str): prefix to use in the log.
- - level: logging level (like `logging.INFO`).
- """
- def __init__(self,
- logger,
- iterable,
- updates=5,
- total=None,
- name="LogProgress",
- level=logging.INFO):
- self.iterable = iterable
- self.total = total or len(iterable)
- self.updates = updates
- self.name = name
- self.logger = logger
- self.level = level
-
- def update(self, **infos):
- self._infos = infos
-
- def __iter__(self):
- self._iterator = iter(self.iterable)
- self._index = -1
- self._infos = {}
- self._begin = time.time()
- return self
-
- def __next__(self):
- self._index += 1
- try:
- value = next(self._iterator)
- except StopIteration:
- raise
- else:
- return value
- finally:
- log_every = max(1, self.total // self.updates)
- # logging is delayed by 1 it, in order to have the metrics from update
- if self._index >= 1 and self._index % log_every == 0:
- self._log()
-
- def _log(self):
- self._speed = (1 + self._index) / (time.time() - self._begin)
- infos = " | ".join(f"{k.capitalize()} {v}" for k, v in self._infos.items())
- if self._speed < 1e-4:
- speed = "oo sec/it"
- elif self._speed < 0.1:
- speed = f"{1/self._speed:.1f} sec/it"
- else:
- speed = f"{self._speed:.1f} it/sec"
- out = f"{self.name} | {self._index}/{self.total} | {speed}"
- if infos:
- out += " | " + infos
- self.logger.log(self.level, out)
-
-
-def colorize(text, color):
- """
- Display text with some ANSI color in the terminal.
- """
- code = f"\033[{color}m"
- restore = "\033[0m"
- return "".join([code, text, restore])
-
-
-def bold(text):
- """
- Display text in bold in the terminal.
- """
- return colorize(text, "1")
-
-
-def cal_snr(lbl, est):
- import torch
- y = 10.0 * torch.log10(
- torch.sum(lbl**2, dim=-1) / (torch.sum((est-lbl)**2, dim=-1) + EPS) +
- EPS
- )
- return y
diff --git a/spaces/Illumotion/Koboldcpp/examples/jeopardy/graph.py b/spaces/Illumotion/Koboldcpp/examples/jeopardy/graph.py
deleted file mode 100644
index 8bc0706b86d05617e4733a8ed46b774dc186e602..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/examples/jeopardy/graph.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python3
-import matplotlib.pyplot as plt
-import os
-import csv
-
-labels = []
-numbers = []
-numEntries = 1
-
-rows = []
-
-
-def bar_chart(numbers, labels, pos):
- plt.bar(pos, numbers, color='blue')
- plt.xticks(ticks=pos, labels=labels)
- plt.title("Jeopardy Results by Model")
- plt.xlabel("Model")
- plt.ylabel("Questions Correct")
- plt.show()
-
-
-def calculatecorrect():
- directory = os.fsencode("./examples/jeopardy/results/")
- csv_reader = csv.reader(open("./examples/jeopardy/qasheet.csv", 'rt'), delimiter=',')
- for row in csv_reader:
- global rows
- rows.append(row)
- for listing in os.listdir(directory):
- filename = os.fsdecode(listing)
- if filename.endswith(".txt"):
- file = open("./examples/jeopardy/results/" + filename, "rt")
- global labels
- global numEntries
- global numbers
- labels.append(filename[:-4])
- numEntries += 1
- i = 1
- totalcorrect = 0
- for line in file.readlines():
- if line.strip() != "------":
- print(line)
- else:
- print("Correct answer: " + rows[i][2] + "\n")
- i += 1
- print("Did the AI get the question right? (y/n)")
- if input() == "y":
- totalcorrect += 1
- numbers.append(totalcorrect)
-
-
-if __name__ == '__main__':
- calculatecorrect()
- pos = list(range(numEntries))
- labels.append("Human")
- numbers.append(48.11)
- bar_chart(numbers, labels, pos)
- print(labels)
- print(numbers)
diff --git a/spaces/IlyaGusev/saiga2_13b_gguf/app.py b/spaces/IlyaGusev/saiga2_13b_gguf/app.py
deleted file mode 100644
index 22ab9b13ba29a4d78918c78067c96d8fb671faf8..0000000000000000000000000000000000000000
--- a/spaces/IlyaGusev/saiga2_13b_gguf/app.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import gradio as gr
-
-import copy
-import random
-import os
-import requests
-import time
-import sys
-
-from huggingface_hub import snapshot_download
-from llama_cpp import Llama
-
-
-SYSTEM_PROMPT = "Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им."
-SYSTEM_TOKEN = 1788
-USER_TOKEN = 1404
-BOT_TOKEN = 9225
-LINEBREAK_TOKEN = 13
-
-
-ROLE_TOKENS = {
- "user": USER_TOKEN,
- "bot": BOT_TOKEN,
- "system": SYSTEM_TOKEN
-}
-
-
-def get_message_tokens(model, role, content):
- message_tokens = model.tokenize(content.encode("utf-8"))
- message_tokens.insert(1, ROLE_TOKENS[role])
- message_tokens.insert(2, LINEBREAK_TOKEN)
- message_tokens.append(model.token_eos())
- return message_tokens
-
-
-def get_system_tokens(model):
- system_message = {"role": "system", "content": SYSTEM_PROMPT}
- return get_message_tokens(model, **system_message)
-
-
-repo_name = "IlyaGusev/saiga2_13b_gguf"
-model_name = "model-q4_K.gguf"
-
-snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
-
-model = Llama(
- model_path=model_name,
- n_ctx=2000,
- n_parts=1,
-)
-
-max_new_tokens = 1500
-
-def user(message, history):
- new_history = history + [[message, None]]
- return "", new_history
-
-
-def bot(
- history,
- system_prompt,
- top_p,
- top_k,
- temp
-):
- tokens = get_system_tokens(model)[:]
- tokens.append(LINEBREAK_TOKEN)
-
- for user_message, bot_message in history[:-1]:
- message_tokens = get_message_tokens(model=model, role="user", content=user_message)
- tokens.extend(message_tokens)
- if bot_message:
- message_tokens = get_message_tokens(model=model, role="bot", content=bot_message)
- tokens.extend(message_tokens)
-
- last_user_message = history[-1][0]
- message_tokens = get_message_tokens(model=model, role="user", content=last_user_message)
- tokens.extend(message_tokens)
-
- role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
- tokens.extend(role_tokens)
- generator = model.generate(
- tokens,
- top_k=top_k,
- top_p=top_p,
- temp=temp
- )
-
- partial_text = ""
- for i, token in enumerate(generator):
- if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens):
- break
- partial_text += model.detokenize([token]).decode("utf-8", "ignore")
- history[-1][1] = partial_text
- yield history
-
-
-with gr.Blocks(
- theme=gr.themes.Soft()
-) as demo:
- favicon = ''
- gr.Markdown(
- f"""
{favicon}Saiga2 13B GGUF Q4_K
-
- This is a demo of a **Russian**-speaking LLaMA2-based model. If you are interested in other languages, please check other models, such as [MPT-7B-Chat](https://huggingface.co/spaces/mosaicml/mpt-7b-chat).
-
- Это демонстрационная версия [квантованной Сайги-2 с 13 миллиардами параметров](https://huggingface.co/IlyaGusev/saiga2_13b_ggml), работающая на CPU.
-
- Сайга-2 — это разговорная языковая модель, которая основана на [LLaMA-2](https://ai.meta.com/llama/) и дообучена на корпусах, сгенерированных ChatGPT, таких как [ru_turbo_alpaca](https://huggingface.co/datasets/IlyaGusev/ru_turbo_alpaca), [ru_turbo_saiga](https://huggingface.co/datasets/IlyaGusev/ru_turbo_saiga) и [gpt_roleplay_realm](https://huggingface.co/datasets/IlyaGusev/gpt_roleplay_realm).
- """
- )
- with gr.Row():
- with gr.Column(scale=5):
- system_prompt = gr.Textbox(label="Системный промпт", placeholder="", value=SYSTEM_PROMPT, interactive=False)
- chatbot = gr.Chatbot(label="Диалог").style(height=400)
- with gr.Column(min_width=80, scale=1):
- with gr.Tab(label="Параметры генерации"):
- top_p = gr.Slider(
- minimum=0.0,
- maximum=1.0,
- value=0.9,
- step=0.05,
- interactive=True,
- label="Top-p",
- )
- top_k = gr.Slider(
- minimum=10,
- maximum=100,
- value=30,
- step=5,
- interactive=True,
- label="Top-k",
- )
- temp = gr.Slider(
- minimum=0.0,
- maximum=2.0,
- value=0.01,
- step=0.01,
- interactive=True,
- label="Температура"
- )
- with gr.Row():
- with gr.Column():
- msg = gr.Textbox(
- label="Отправить сообщение",
- placeholder="Отправить сообщение",
- show_label=False,
- ).style(container=False)
- with gr.Column():
- with gr.Row():
- submit = gr.Button("Отправить")
- stop = gr.Button("Остановить")
- clear = gr.Button("Очистить")
- with gr.Row():
- gr.Markdown(
- """ПРЕДУПРЕЖДЕНИЕ: Модель может генерировать фактически или этически некорректные тексты. Мы не несём за это ответственность."""
- )
-
- # Pressing Enter
- submit_event = msg.submit(
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).success(
- fn=bot,
- inputs=[
- chatbot,
- system_prompt,
- top_p,
- top_k,
- temp
- ],
- outputs=chatbot,
- queue=True,
- )
-
- # Pressing the button
- submit_click_event = submit.click(
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).success(
- fn=bot,
- inputs=[
- chatbot,
- system_prompt,
- top_p,
- top_k,
- temp
- ],
- outputs=chatbot,
- queue=True,
- )
-
- # Stop generation
- stop.click(
- fn=None,
- inputs=None,
- outputs=None,
- cancels=[submit_event, submit_click_event],
- queue=False,
- )
-
- # Clear history
- clear.click(lambda: None, None, chatbot, queue=False)
-
-demo.queue(max_size=128, concurrency_count=1)
-demo.launch()
diff --git a/spaces/Inderdev07/Attendance-FaceRecognition/app.py b/spaces/Inderdev07/Attendance-FaceRecognition/app.py
deleted file mode 100644
index 384b01a7be987b50ac5366d3f1da18a64310c01a..0000000000000000000000000000000000000000
--- a/spaces/Inderdev07/Attendance-FaceRecognition/app.py
+++ /dev/null
@@ -1,80 +0,0 @@
-from PIL import Image
-import numpy as np
-import cv2
-import requests
-import face_recognition
-import os
-from datetime import datetime
-import streamlit as st
-
-# Set page title and description
-st.title("Face Recognition App")
-st.markdown("This app recognizes faces in an image and updates attendance.")
-
-# Load images for face recognition
-Images = []
-classnames = []
-directory = "photos"
-
-myList = os.listdir(directory)
-
-for cls in myList:
- if os.path.splitext(cls)[1] in [".jpg", ".jpeg"]:
- img_path = os.path.join(directory, cls)
- curImg = cv2.imread(img_path)
- Images.append(curImg)
- classnames.append(os.path.splitext(cls)[0])
-
-def findEncodings(Images):
- encodeList = []
- for img in Images:
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- encode = face_recognition.face_encodings(img)[0]
- encodeList.append(encode)
- return encodeList
-
-encodeListknown = findEncodings(Images)
-
-# Take picture using the camera
-img_file_buffer = st.camera_input("Take a picture")
-if img_file_buffer is not None:
- test_image = Image.open(img_file_buffer)
- image = np.asarray(test_image)
-
- imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25)
- imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
- facesCurFrame = face_recognition.face_locations(imgS)
- encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
-
- name = "Unknown" # Default name for unknown faces
-
- if len(encodesCurFrame) > 0:
- for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
- matches = face_recognition.compare_faces(encodeListknown, encodeFace)
- faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
- matchIndex = np.argmin(faceDis)
-
- if matches[matchIndex]:
- name = classnames[matchIndex].upper()
-
- y1, x2, y2, x1 = faceLoc
- y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
- cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
- cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
- cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
-
- if name != "Unknown":
- url = "https://inderfaceproject.000webhostapp.com"
- url1 = "/update.php"
- data1 = {'name': name}
- response = requests.post(url + url1, data=data1)
-
- if response.status_code == 200:
- st.success("Data updated on: " + url)
- else:
- st.warning("Data not updated")
-
- st.image(image, caption="Detected Face", use_column_width=True)
-
- if name == "Unknown":
- st.info("Face not detected. Please try again.")
\ No newline at end of file
diff --git a/spaces/ItsJayQz/GTA5_Artwork_Diffusion/README.md b/spaces/ItsJayQz/GTA5_Artwork_Diffusion/README.md
deleted file mode 100644
index 09ca00f21f7304a3f13f29d33133378e23ebfaa5..0000000000000000000000000000000000000000
--- a/spaces/ItsJayQz/GTA5_Artwork_Diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: GTA5 Artwork Diffusion
-emoji: 🐢
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.14.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jack000/glid-3-xl-stable-classifier/README.md b/spaces/Jack000/glid-3-xl-stable-classifier/README.md
deleted file mode 100644
index 630019e8991c1f04be600ed676c781362126e892..0000000000000000000000000000000000000000
--- a/spaces/Jack000/glid-3-xl-stable-classifier/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Glid 3 Xl Stable Classifier
-emoji: 🦀
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.3
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/JennBiggs/HTML5-Dashboard/style.css b/spaces/JennBiggs/HTML5-Dashboard/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/JennBiggs/HTML5-Dashboard/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_resnet.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_resnet.py
deleted file mode 100644
index bccf9bf2efcec54888fd03d34ced016703bf2fe7..0000000000000000000000000000000000000000
--- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_resnet.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import cv2
-import numpy as np
-from PIL import Image
-import pickle
-import tensorflow as tf
-from tensorflow.keras.applications.resnet50 import decode_predictions
-import io
-
-
-class gpResNet:
- def __init__(self,url) -> None:
- self.image = url
-
- def predict_image(self):
- # Load the model
- loaded_model = tf.keras.models.load_model("././Model/Guineapig/resnet50/ResnetSavemodel.h5")
-
- picklefilepath = "././Model/Guineapig/resnet50/dataSaved.pkl"
-
- with open(picklefilepath, 'rb') as file:
- saved_data = pickle.load(file)
- self.animal_breed = saved_data['class_name']
-
-
- im = Image.open(self.image)
- img = im.convert("RGB")
- img= np.asarray(img)
- image_resized= cv2.resize(img, (256,256))
- image=np.expand_dims(image_resized,axis=0)
- print(image.shape)
-
- pred=loaded_model.predict(image)
-
- pred_proba = self.custom_decode_predictions(pred,top=1)
- rate = 0
- for _,confidence in pred_proba:
- rate = confidence
-
- output_class= self.animal_breed[np.argmax(pred)]
-
- return [output_class, rate]
-
- def custom_decode_predictions(self, prediction, top=3):
- # convert 2d Array of shape (1,10) to a 1D array of shape (10,)
- prediction = np.squeeze(prediction)
-
- #get the indices of the top "top" prediction
- top_indices = prediction.argsort()[-top:][::-1]
-
- class_labels = self.animal_breed
-
- top_prediction = [(class_labels[i], prediction[i]) for i in top_indices]
-
- return top_prediction
diff --git a/spaces/Junity/TokaiTeio-SVC/hubert/hubert_model_onnx.py b/spaces/Junity/TokaiTeio-SVC/hubert/hubert_model_onnx.py
deleted file mode 100644
index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000
--- a/spaces/Junity/TokaiTeio-SVC/hubert/hubert_model_onnx.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
- def forward(self, x):
- return self.units(x)
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/Kaludi/VirtualBrainGPT/README.md b/spaces/Kaludi/VirtualBrainGPT/README.md
deleted file mode 100644
index 51f30281b6903d06b15864dfe2afc69bc492be3d..0000000000000000000000000000000000000000
--- a/spaces/Kaludi/VirtualBrainGPT/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
----
-title: VirtualBrainGPT
-emoji: 📝
-colorFrom: blue
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.19.0
-app_file: VirtualBrainGPT.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-# VirtualBrainGPT: Digital Journal 📝
-
-VirtualBrainGPT is a digital journal application that leverages the power of OpenAI's Embeddings and Langchain to create a seamless and efficient journaling experience. With the capability to search and extract information from journal entries in seconds, VirtualBrainGPT becomes an indispensable tool for users who want to recall specific details from their past entries.
-
-# Web App
-Click [Here](https://huggingface.co/spaces/Kaludi/VirtualBrainGPT "Here") To View This Application Online!
-
-
-
-
-
-## Features
-
-- Create and manage journal entries using a clean interface.
-- Search and extract information from journal entries quickly and accurately.
-- Support for both TXT and PDF file formats.
-- Powered by OpenAI's Embeddings and Langchain for efficient information retrieval.
-
-## Usage
-
-### Brain Entry
-
-In the 'Brain Entry' section, users can create a new journal entry or edit an existing one by choosing a date using the date picker. Once you have completed your entry, click 'Submit' and it will be saved or updated to the brain_journal.txt file in the brain folder.
-
-### Brain Search
-
-The 'Brain Search' section of the application unlocks the full potential of your digital journal. Here, you can ask any questions related to your journal entries, and the combination of OpenAI's Embeddings and Langchain will provide accurate responses in seconds, no matter how long the entry or document may be. The virtual brain can help in all aspects of a user's life, enabling users to easily recall specific information, even if they can't remember it themselves. Users also have an option to select other file types if they wish to upload their own file, the current file types include TXT and PDF files, which can be selected in the file type dropdown option. Along with the response, you will also get information about the amount of tokens that were used and the Total Cost of the query.
-
-## Examples of Use-Cases
-
-This application can be used in many ways. Imagine five years from now, you're trying to recollect a specific event that you documented in your journal, or you may have kept a journal from your childhood. VirtualBrainGPT will locate the exact date and provide you with a detailed account of that exact situation in a matter of seconds, as well as any other memories that you may have forgotten about. This type of application also could offer invaluable support for individuals with Alzheimer's, helping them retrieve memories from the past that may have been lost and share them with their loved ones.
-
-## Tools & Libraries Used
-
-### Tools
-- OpenAI [Embeddings](https://platform.openai.com/docs/guides/embeddings)
-- [LangChain](https://python.langchain.com/en/latest/use_cases/question_answering.html)
-- [Streamlit](https://streamlit.io/)
-
-### Libraries
-- Streamlit
-- OpenAI
-- PyPDF2
-- LangChain
-- python-dotenv
-- tiktoken
-- faiss-cpu
-
-## Installation
-
-To install VirtualBrainGPT, you need to have Python 3.7+ installed. Follow these steps to install the necessary dependencies:
-
-1. Clone this repository:
-
-`git clone https://github.com/Kaludii/VirtualBrainGPT.git`
-
-2. Change directory to the cloned repository:
-
-`cd VirtualBrainGPT`
-
-3. Install the required packages:
-
-`pip install -r requirements.txt`
-
-4. Run the Streamlit application:
-
-`streamlit run VirtualBrainGPT.py`
-
-## About the Developer
-
-This application was developed by [Kaludii](https://github.com/Kaludii) using the the different tools and libraries linked above. Kaludii is an AI enthusiast who is passionate about developing and applying large learning models to solve real-world problems quickly and stress-free.
-
-## Contributions
-
-If you have any suggestions or improvements for this project, feel free to open an issue or submit a pull request. Your contributions are always welcome!
\ No newline at end of file
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/nets_123812KB.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/nets_123812KB.py
deleted file mode 100644
index becbfae85683a13bbb19d3ea6c840da24e61e01e..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/nets_123812KB.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from . import layers_123821KB as layers
-
-
-class BaseASPPNet(nn.Module):
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
- super(BaseASPPNet, self).__init__()
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
-
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
-
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
-
- def __call__(self, x):
- h, e1 = self.enc1(x)
- h, e2 = self.enc2(h)
- h, e3 = self.enc3(h)
- h, e4 = self.enc4(h)
-
- h = self.aspp(h)
-
- h = self.dec4(h, e4)
- h = self.dec3(h, e3)
- h = self.dec2(h, e2)
- h = self.dec1(h, e1)
-
- return h
-
-
-class CascadedASPPNet(nn.Module):
- def __init__(self, n_fft):
- super(CascadedASPPNet, self).__init__()
- self.stg1_low_band_net = BaseASPPNet(2, 32)
- self.stg1_high_band_net = BaseASPPNet(2, 32)
-
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
- self.stg2_full_band_net = BaseASPPNet(16, 32)
-
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
- self.stg3_full_band_net = BaseASPPNet(32, 64)
-
- self.out = nn.Conv2d(64, 2, 1, bias=False)
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
-
- self.max_bin = n_fft // 2
- self.output_bin = n_fft // 2 + 1
-
- self.offset = 128
-
- def forward(self, x, aggressiveness=None):
- mix = x.detach()
- x = x.clone()
-
- x = x[:, :, : self.max_bin]
-
- bandw = x.size()[2] // 2
- aux1 = torch.cat(
- [
- self.stg1_low_band_net(x[:, :, :bandw]),
- self.stg1_high_band_net(x[:, :, bandw:]),
- ],
- dim=2,
- )
-
- h = torch.cat([x, aux1], dim=1)
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
-
- h = torch.cat([x, aux1, aux2], dim=1)
- h = self.stg3_full_band_net(self.stg3_bridge(h))
-
- mask = torch.sigmoid(self.out(h))
- mask = F.pad(
- input=mask,
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
- mode="replicate",
- )
-
- if self.training:
- aux1 = torch.sigmoid(self.aux1_out(aux1))
- aux1 = F.pad(
- input=aux1,
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
- mode="replicate",
- )
- aux2 = torch.sigmoid(self.aux2_out(aux2))
- aux2 = F.pad(
- input=aux2,
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
- mode="replicate",
- )
- return mask * mix, aux1 * mix, aux2 * mix
- else:
- if aggressiveness:
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
- mask[:, :, : aggressiveness["split_bin"]],
- 1 + aggressiveness["value"] / 3,
- )
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
- mask[:, :, aggressiveness["split_bin"] :],
- 1 + aggressiveness["value"],
- )
-
- return mask * mix
-
- def predict(self, x_mag, aggressiveness=None):
- h = self.forward(x_mag, aggressiveness)
-
- if self.offset > 0:
- h = h[:, :, :, self.offset : -self.offset]
- assert h.size()[3] > 0
-
- return h
diff --git a/spaces/Kevin676/AutoGPT/autogpt/agent/agent_manager.py b/spaces/Kevin676/AutoGPT/autogpt/agent/agent_manager.py
deleted file mode 100644
index 898767a485e50b5e62625a7883edf1b30d5fddf9..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/AutoGPT/autogpt/agent/agent_manager.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""Agent manager for managing GPT agents"""
-from __future__ import annotations
-
-from typing import Union
-
-from autogpt.config.config import Singleton
-from autogpt.llm_utils import create_chat_completion
-
-
-class AgentManager(metaclass=Singleton):
- """Agent manager for managing GPT agents"""
-
- def __init__(self):
- self.next_key = 0
- self.agents = {} # key, (task, full_message_history, model)
-
- # Create new GPT agent
- # TODO: Centralise use of create_chat_completion() to globally enforce token limit
-
- def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
- """Create a new agent and return its key
-
- Args:
- task: The task to perform
- prompt: The prompt to use
- model: The model to use
-
- Returns:
- The key of the new agent
- """
- messages = [
- {"role": "user", "content": prompt},
- ]
-
- # Start GPT instance
- agent_reply = create_chat_completion(
- model=model,
- messages=messages,
- )
-
- # Update full message history
- messages.append({"role": "assistant", "content": agent_reply})
-
- key = self.next_key
- # This is done instead of len(agents) to make keys unique even if agents
- # are deleted
- self.next_key += 1
-
- self.agents[key] = (task, messages, model)
-
- return key, agent_reply
-
- def message_agent(self, key: str | int, message: str) -> str:
- """Send a message to an agent and return its response
-
- Args:
- key: The key of the agent to message
- message: The message to send to the agent
-
- Returns:
- The agent's response
- """
- task, messages, model = self.agents[int(key)]
-
- # Add user message to message history before sending to agent
- messages.append({"role": "user", "content": message})
-
- # Start GPT instance
- agent_reply = create_chat_completion(
- model=model,
- messages=messages,
- )
-
- # Update full message history
- messages.append({"role": "assistant", "content": agent_reply})
-
- return agent_reply
-
- def list_agents(self) -> list[tuple[str | int, str]]:
- """Return a list of all agents
-
- Returns:
- A list of tuples of the form (key, task)
- """
-
- # Return a list of agent keys and their tasks
- return [(key, task) for key, (task, _, _) in self.agents.items()]
-
- def delete_agent(self, key: Union[str, int]) -> bool:
- """Delete an agent from the agent manager
-
- Args:
- key: The key of the agent to delete
-
- Returns:
- True if successful, False otherwise
- """
-
- try:
- del self.agents[int(key)]
- return True
- except KeyError:
- return False
diff --git a/spaces/Kevin676/AutoGPT/autogpt/memory/__init__.py b/spaces/Kevin676/AutoGPT/autogpt/memory/__init__.py
deleted file mode 100644
index 3d18704c70dfc287642b1923e6f2e1f72a5f2a62..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/AutoGPT/autogpt/memory/__init__.py
+++ /dev/null
@@ -1,99 +0,0 @@
-from autogpt.memory.local import LocalCache
-from autogpt.memory.no_memory import NoMemory
-
-# List of supported memory backends
-# Add a backend to this list if the import attempt is successful
-supported_memory = ["local", "no_memory"]
-
-try:
- from autogpt.memory.redismem import RedisMemory
-
- supported_memory.append("redis")
-except ImportError:
- # print("Redis not installed. Skipping import.")
- RedisMemory = None
-
-try:
- from autogpt.memory.pinecone import PineconeMemory
-
- supported_memory.append("pinecone")
-except ImportError:
- # print("Pinecone not installed. Skipping import.")
- PineconeMemory = None
-
-try:
- from autogpt.memory.weaviate import WeaviateMemory
-
- supported_memory.append("weaviate")
-except ImportError:
- # print("Weaviate not installed. Skipping import.")
- WeaviateMemory = None
-
-try:
- from autogpt.memory.milvus import MilvusMemory
-
- supported_memory.append("milvus")
-except ImportError:
- # print("pymilvus not installed. Skipping import.")
- MilvusMemory = None
-
-
-def get_memory(cfg, init=False):
- memory = None
- if cfg.memory_backend == "pinecone":
- if not PineconeMemory:
- print(
- "Error: Pinecone is not installed. Please install pinecone"
- " to use Pinecone as a memory backend."
- )
- else:
- memory = PineconeMemory(cfg)
- if init:
- memory.clear()
- elif cfg.memory_backend == "redis":
- if not RedisMemory:
- print(
- "Error: Redis is not installed. Please install redis-py to"
- " use Redis as a memory backend."
- )
- else:
- memory = RedisMemory(cfg)
- elif cfg.memory_backend == "weaviate":
- if not WeaviateMemory:
- print(
- "Error: Weaviate is not installed. Please install weaviate-client to"
- " use Weaviate as a memory backend."
- )
- else:
- memory = WeaviateMemory(cfg)
- elif cfg.memory_backend == "milvus":
- if not MilvusMemory:
- print(
- "Error: Milvus sdk is not installed."
- "Please install pymilvus to use Milvus as memory backend."
- )
- else:
- memory = MilvusMemory(cfg)
- elif cfg.memory_backend == "no_memory":
- memory = NoMemory(cfg)
-
- if memory is None:
- memory = LocalCache(cfg)
- if init:
- memory.clear()
- return memory
-
-
-def get_supported_memory_backends():
- return supported_memory
-
-
-__all__ = [
- "get_memory",
- "LocalCache",
- "RedisMemory",
- "PineconeMemory",
- "NoMemory",
- "MilvusMemory",
- "WeaviateMemory",
-]
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/web/config/__init__.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/web/config/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/KonradSzafer/HF-QA-Demo/api/__main__.py b/spaces/KonradSzafer/HF-QA-Demo/api/__main__.py
deleted file mode 100644
index 15dbd5ffccf7e52d97647b142f42ba54ec2d6bf8..0000000000000000000000000000000000000000
--- a/spaces/KonradSzafer/HF-QA-Demo/api/__main__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import uvicorn
-from fastapi import FastAPI
-
-from qa_engine import logger, Config, QAEngine
-
-
-config = Config()
-app = FastAPI()
-qa_engine = QAEngine(
- llm_model_id=config.question_answering_model_id,
- embedding_model_id=config.embedding_model_id,
- index_repo_id=config.index_repo_id,
- prompt_template=config.prompt_template,
- use_docs_for_context=config.use_docs_for_context,
- num_relevant_docs=config.num_relevant_docs,
- add_sources_to_response=config.add_sources_to_response,
- use_messages_for_context=config.use_messages_in_context,
- debug=config.debug
-)
-
-
-@app.get('/')
-def get_answer(question: str, messages_context: str = ''):
- logger.info(
- f'Received request with question: {question}' \
- f'and context: {messages_context}'
- )
- response = qa_engine.get_response(
- question=question,
- messages_context=messages_context
- )
- return {
- 'answer': response.get_answer(),
- 'sources': response.get_sources_as_text()
- }
-
-
-if __name__ == '__main__':
- uvicorn.run(app, host='0.0.0.0', port=8000)
diff --git a/spaces/LUCKky/QQsign/bin/unidbg-fetch-qsign.bat b/spaces/LUCKky/QQsign/bin/unidbg-fetch-qsign.bat
deleted file mode 100644
index e3c701d4a36fef5ecd4ebe0ac807d091c2722d27..0000000000000000000000000000000000000000
--- a/spaces/LUCKky/QQsign/bin/unidbg-fetch-qsign.bat
+++ /dev/null
@@ -1,89 +0,0 @@
-@rem
-@rem Copyright 2015 the original author or authors.
-@rem
-@rem Licensed under the Apache License, Version 2.0 (the "License");
-@rem you may not use this file except in compliance with the License.
-@rem You may obtain a copy of the License at
-@rem
-@rem https://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-@rem
-
-@if "%DEBUG%" == "" @echo off
-@rem ##########################################################################
-@rem
-@rem unidbg-fetch-qsign startup script for Windows
-@rem
-@rem ##########################################################################
-
-@rem Set local scope for the variables with windows NT shell
-if "%OS%"=="Windows_NT" setlocal
-
-set DIRNAME=%~dp0
-if "%DIRNAME%" == "" set DIRNAME=.
-set APP_BASE_NAME=%~n0
-set APP_HOME=%DIRNAME%..
-
-@rem Resolve any "." and ".." in APP_HOME to make it shorter.
-for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
-
-@rem Add default JVM options here. You can also use JAVA_OPTS and UNIDBG_FETCH_QSIGN_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS=
-
-@rem Find java.exe
-if defined JAVA_HOME goto findJavaFromJavaHome
-
-set JAVA_EXE=java.exe
-%JAVA_EXE% -version >NUL 2>&1
-if "%ERRORLEVEL%" == "0" goto execute
-
-echo.
-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
-
-goto fail
-
-:findJavaFromJavaHome
-set JAVA_HOME=%JAVA_HOME:"=%
-set JAVA_EXE=%JAVA_HOME%/bin/java.exe
-
-if exist "%JAVA_EXE%" goto execute
-
-echo.
-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
-
-goto fail
-
-:execute
-@rem Setup the command line
-
-set CLASSPATH=%APP_HOME%\lib\unidbg-fetch-qsign-1.1.7.jar;%APP_HOME%\lib\unidbg-1.0.2.jar;%APP_HOME%\lib\ktor-server-content-negotiation-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-json-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-status-pages-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-netty-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-host-common-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-core-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-events-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-websockets-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-cio-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-network-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-utils-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-io-jvm-2.3.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk8-1.8.22.jar;%APP_HOME%\lib\kotlinx-serialization-json-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-protobuf-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-core-jvm-1.5.1.jar;%APP_HOME%\lib\logback-classic-1.2.11.jar;%APP_HOME%\lib\kotlinx-coroutines-jdk8-1.7.1.jar;%APP_HOME%\lib\kotlinx-coroutines-core-jvm-1.7.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk7-1.8.22.jar;%APP_HOME%\lib\kotlin-reflect-1.8.10.jar;%APP_HOME%\lib\kotlin-stdlib-1.8.22.jar;%APP_HOME%\lib\slf4j-api-1.7.36.jar;%APP_HOME%\lib\kotlin-stdlib-common-1.8.22.jar;%APP_HOME%\lib\config-1.4.2.jar;%APP_HOME%\lib\jansi-2.4.0.jar;%APP_HOME%\lib\netty-codec-http2-4.1.92.Final.jar;%APP_HOME%\lib\alpn-api-1.1.3.v20160715.jar;%APP_HOME%\lib\netty-transport-native-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-epoll-4.1.92.Final.jar;%APP_HOME%\lib\logback-core-1.2.11.jar;%APP_HOME%\lib\annotations-23.0.0.jar;%APP_HOME%\lib\netty-codec-http-4.1.92.Final.jar;%APP_HOME%\lib\netty-handler-4.1.92.Final.jar;%APP_HOME%\lib\netty-codec-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-epoll-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-unix-common-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-4.1.92.Final.jar;%APP_HOME%\lib\netty-buffer-4.1.92.Final.jar;%APP_HOME%\lib\netty-resolver-4.1.92.Final.jar;%APP_HOME%\lib\netty-common-4.1.92.Final.jar
-
-
-@rem Execute unidbg-fetch-qsign
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %UNIDBG_FETCH_QSIGN_OPTS% -classpath "%CLASSPATH%" MainKt %*
-
-:end
-@rem End local scope for the variables with windows NT shell
-if "%ERRORLEVEL%"=="0" goto mainEnd
-
-:fail
-rem Set variable UNIDBG_FETCH_QSIGN_EXIT_CONSOLE if you need the _script_ return code instead of
-rem the _cmd.exe /c_ return code!
-if not "" == "%UNIDBG_FETCH_QSIGN_EXIT_CONSOLE%" exit 1
-exit /b 1
-
-:mainEnd
-if "%OS%"=="Windows_NT" endlocal
-
-:omega
diff --git a/spaces/Lasion/NCKH_2023/README.md b/spaces/Lasion/NCKH_2023/README.md
deleted file mode 100644
index f9cc0ee28990405ccadb737b6e2c1fe430f5b2ab..0000000000000000000000000000000000000000
--- a/spaces/Lasion/NCKH_2023/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: NCKH 2023
-emoji: 📚
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Lbin123/Lbingo/src/components/ui/dialog.tsx b/spaces/Lbin123/Lbingo/src/components/ui/dialog.tsx
deleted file mode 100644
index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/src/components/ui/dialog.tsx
+++ /dev/null
@@ -1,128 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as DialogPrimitive from '@radix-ui/react-dialog'
-
-import { cn } from '@/lib/utils'
-import { IconClose } from '@/components/ui/icons'
-
-const Dialog = DialogPrimitive.Root
-
-const DialogTrigger = DialogPrimitive.Trigger
-
-const DialogPortal = ({
- className,
- children,
- ...props
-}: DialogPrimitive.DialogPortalProps) => (
-
-
- {children}
-
-
-)
-DialogPortal.displayName = DialogPrimitive.Portal.displayName
-
-const DialogOverlay = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
-
-const DialogContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
-
- {children}
-
-
- Close
-
-
-
-))
-DialogContent.displayName = DialogPrimitive.Content.displayName
-
-const DialogHeader = ({
- className,
- ...props
-}: React.HTMLAttributes) => (
-
-)
-DialogHeader.displayName = 'DialogHeader'
-
-const DialogFooter = ({
- className,
- ...props
-}: React.HTMLAttributes) => (
-
-)
-DialogFooter.displayName = 'DialogFooter'
-
-const DialogTitle = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-DialogTitle.displayName = DialogPrimitive.Title.displayName
-
-const DialogDescription = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-DialogDescription.displayName = DialogPrimitive.Description.displayName
-
-export {
- Dialog,
- DialogTrigger,
- DialogContent,
- DialogHeader,
- DialogFooter,
- DialogTitle,
- DialogDescription
-}
diff --git a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/periodstats.py b/spaces/Lianjd/stock_dashboard/backtrader/analyzers/periodstats.py
deleted file mode 100644
index 0661ad21fdfea7340b6aa640be4f91fcfe180d0b..0000000000000000000000000000000000000000
--- a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/periodstats.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8; py-indent-offset:4 -*-
-###############################################################################
-#
-# Copyright (C) 2015-2020 Daniel Rodriguez
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-###############################################################################
-from __future__ import (absolute_import, division, print_function,
- unicode_literals)
-
-
-import backtrader as bt
-from backtrader.utils.py3 import itervalues
-from backtrader.mathsupport import average, standarddev
-from . import TimeReturn
-
-
-__all__ = ['PeriodStats']
-
-
-class PeriodStats(bt.Analyzer):
- '''Calculates basic statistics for given timeframe
-
- Params:
-
- - ``timeframe`` (default: ``Years``)
- If ``None`` the ``timeframe`` of the 1st data in the system will be
- used
-
- Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no
- time constraints
-
- - ``compression`` (default: ``1``)
-
- Only used for sub-day timeframes to for example work on an hourly
- timeframe by specifying "TimeFrame.Minutes" and 60 as compression
-
- If ``None`` then the compression of the 1st data of the system will be
- used
-
- - ``fund`` (default: ``None``)
-
- If ``None`` the actual mode of the broker (fundmode - True/False) will
- be autodetected to decide if the returns are based on the total net
- asset value or on the fund value. See ``set_fundmode`` in the broker
- documentation
-
- Set it to ``True`` or ``False`` for a specific behavior
-
-
- ``get_analysis`` returns a dictionary containing the keys:
-
- - ``average``
- - ``stddev``
- - ``positive``
- - ``negative``
- - ``nochange``
- - ``best``
- - ``worst``
-
- If the parameter ``zeroispos`` is set to ``True``, periods with no change
- will be counted as positive
- '''
-
- params = (
- ('timeframe', bt.TimeFrame.Years),
- ('compression', 1),
- ('zeroispos', False),
- ('fund', None),
- )
-
- def __init__(self):
- self._tr = TimeReturn(timeframe=self.p.timeframe,
- compression=self.p.compression, fund=self.p.fund)
-
- def stop(self):
- trets = self._tr.get_analysis() # dict key = date, value = ret
- pos = nul = neg = 0
- trets = list(itervalues(trets))
- for tret in trets:
- if tret > 0.0:
- pos += 1
- elif tret < 0.0:
- neg += 1
- else:
- if self.p.zeroispos:
- pos += tret == 0.0
- else:
- nul += tret == 0.0
-
- self.rets['average'] = avg = average(trets)
- self.rets['stddev'] = standarddev(trets, avg)
-
- self.rets['positive'] = pos
- self.rets['negative'] = neg
- self.rets['nochange'] = nul
-
- self.rets['best'] = max(trets)
- self.rets['worst'] = min(trets)
diff --git a/spaces/Lianjd/stock_dashboard/backtrader/observers/drawdown.py b/spaces/Lianjd/stock_dashboard/backtrader/observers/drawdown.py
deleted file mode 100644
index 9d7a7f467934c001570065abf7708719fa6612f4..0000000000000000000000000000000000000000
--- a/spaces/Lianjd/stock_dashboard/backtrader/observers/drawdown.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8; py-indent-offset:4 -*-
-###############################################################################
-#
-# Copyright (C) 2015-2020 Daniel Rodriguez
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-###############################################################################
-from __future__ import (absolute_import, division, print_function,
- unicode_literals)
-
-import backtrader as bt
-from .. import Observer
-
-
-class DrawDown(Observer):
- '''This observer keeps track of the current drawdown level (plotted) and
- the maxdrawdown (not plotted) levels
-
- Params:
-
- - ``fund`` (default: ``None``)
-
- If ``None`` the actual mode of the broker (fundmode - True/False) will
- be autodetected to decide if the returns are based on the total net
- asset value or on the fund value. See ``set_fundmode`` in the broker
- documentation
-
- Set it to ``True`` or ``False`` for a specific behavior
-
- '''
- _stclock = True
-
- params = (
- ('fund', None),
- )
-
- lines = ('drawdown', 'maxdrawdown',)
-
- plotinfo = dict(plot=True, subplot=True)
-
- plotlines = dict(maxdrawdown=dict(_plotskip=True,))
-
- def __init__(self):
- kwargs = self.p._getkwargs()
- self._dd = self._owner._addanalyzer_slave(bt.analyzers.DrawDown,
- **kwargs)
-
- def next(self):
- self.lines.drawdown[0] = self._dd.rets.drawdown # update drawdown
- self.lines.maxdrawdown[0] = self._dd.rets.max.drawdown # update max
-
-
-class DrawDownLength(Observer):
- '''This observer keeps track of the current drawdown length (plotted) and
- the drawdown max length (not plotted)
-
- Params: None
- '''
- _stclock = True
-
- lines = ('len', 'maxlen',)
-
- plotinfo = dict(plot=True, subplot=True)
-
- plotlines = dict(maxlength=dict(_plotskip=True,))
-
- def __init__(self):
- self._dd = self._owner._addanalyzer_slave(bt.analyzers.DrawDown)
-
- def next(self):
- self.lines.len[0] = self._dd.rets.len # update drawdown length
- self.lines.maxlen[0] = self._dd.rets.max.len # update max length
-
-
-class DrawDown_Old(Observer):
- '''This observer keeps track of the current drawdown level (plotted) and
- the maxdrawdown (not plotted) levels
-
- Params: None
- '''
- _stclock = True
-
- lines = ('drawdown', 'maxdrawdown',)
-
- plotinfo = dict(plot=True, subplot=True)
-
- plotlines = dict(maxdrawdown=dict(_plotskip='True',))
-
- def __init__(self):
- super(DrawDown_Old, self).__init__()
-
- self.maxdd = 0.0
- self.peak = float('-inf')
-
- def next(self):
- value = self._owner.broker.getvalue()
-
- # update the maximum seen peak
- if value > self.peak:
- self.peak = value
-
- # calculate the current drawdown
- self.lines.drawdown[0] = dd = 100.0 * (self.peak - value) / self.peak
-
- # update the maxdrawdown if needed
- self.lines.maxdrawdown[0] = self.maxdd = max(self.maxdd, dd)
diff --git a/spaces/Lianjd/stock_dashboard/backtrader/store.py b/spaces/Lianjd/stock_dashboard/backtrader/store.py
deleted file mode 100644
index e1f0f4f29d1ea5d71cb1546cd58c8a000f13494e..0000000000000000000000000000000000000000
--- a/spaces/Lianjd/stock_dashboard/backtrader/store.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8; py-indent-offset:4 -*-
-###############################################################################
-#
-# Copyright (C) 2015-2020 Daniel Rodriguez
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-###############################################################################
-from __future__ import (absolute_import, division, print_function,
- unicode_literals)
-
-import collections
-
-from backtrader.metabase import MetaParams
-from backtrader.utils.py3 import with_metaclass
-
-
-class MetaSingleton(MetaParams):
- '''Metaclass to make a metaclassed class a singleton'''
- def __init__(cls, name, bases, dct):
- super(MetaSingleton, cls).__init__(name, bases, dct)
- cls._singleton = None
-
- def __call__(cls, *args, **kwargs):
- if cls._singleton is None:
- cls._singleton = (
- super(MetaSingleton, cls).__call__(*args, **kwargs))
-
- return cls._singleton
-
-
-class Store(with_metaclass(MetaSingleton, object)):
- '''Base class for all Stores'''
-
- _started = False
-
- params = ()
-
- def getdata(self, *args, **kwargs):
- '''Returns ``DataCls`` with args, kwargs'''
- data = self.DataCls(*args, **kwargs)
- data._store = self
- return data
-
- @classmethod
- def getbroker(cls, *args, **kwargs):
- '''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
- broker = cls.BrokerCls(*args, **kwargs)
- broker._store = cls
- return broker
-
- BrokerCls = None # broker class will autoregister
- DataCls = None # data class will auto register
-
- def start(self, data=None, broker=None):
- if not self._started:
- self._started = True
- self.notifs = collections.deque()
- self.datas = list()
- self.broker = None
-
- if data is not None:
- self._cerebro = self._env = data._env
- self.datas.append(data)
-
- if self.broker is not None:
- if hasattr(self.broker, 'data_started'):
- self.broker.data_started(data)
-
- elif broker is not None:
- self.broker = broker
-
- def stop(self):
- pass
-
- def put_notification(self, msg, *args, **kwargs):
- self.notifs.append((msg, args, kwargs))
-
- def get_notifications(self):
- '''Return the pending "store" notifications'''
- self.notifs.append(None) # put a mark / threads could still append
- return [x for x in iter(self.notifs.popleft, None)]
diff --git a/spaces/LinkSoul/Chinese-LLaVa/static/js/index.js b/spaces/LinkSoul/Chinese-LLaVa/static/js/index.js
deleted file mode 100644
index 22f41c71c4c529c882b3d51740478f92b3a428a5..0000000000000000000000000000000000000000
--- a/spaces/LinkSoul/Chinese-LLaVa/static/js/index.js
+++ /dev/null
@@ -1,78 +0,0 @@
-window.HELP_IMPROVE_VIDEOJS = false;
-
-// var INTERP_BASE = "./static/interpolation/stacked";
-var NUM_INTERP_FRAMES = 240;
-
-var interp_images = [];
-// function preloadInterpolationImages() {
-// for (var i = 0; i < NUM_INTERP_FRAMES; i++) {
-// var path = INTERP_BASE + '/' + String(i).padStart(6, '0') + '.jpg';
-// interp_images[i] = new Image();
-// interp_images[i].src = path;
-// }
-// }
-
-// function setInterpolationImage(i) {
-// var image = interp_images[i];
-// image.ondragstart = function() { return false; };
-// image.oncontextmenu = function() { return false; };
-// $('#interpolation-image-wrapper').empty().append(image);
-// }
-
-
-$(document).ready(function() {
- // Check for click events on the navbar burger icon
- $(".navbar-burger").click(function() {
- // Toggle the "is-active" class on both the "navbar-burger" and the "navbar-menu"
- $(".navbar-burger").toggleClass("is-active");
- $(".navbar-menu").toggleClass("is-active");
-
- });
-
- var options = {
- slidesToScroll: 1,
- slidesToShow: 3,
- loop: true,
- infinite: true,
- autoplay: false,
- autoplaySpeed: 3000,
- }
-
- // Initialize all div with carousel class
- var carousels = bulmaCarousel.attach('.carousel', options);
-
- // Loop on each carousel initialized
- for(var i = 0; i < carousels.length; i++) {
- // Add listener to event
- carousels[i].on('before:show', state => {
- console.log(state);
- });
- }
-
- // Access to bulmaCarousel instance of an element
- var element = document.querySelector('#my-element');
- if (element && element.bulmaCarousel) {
- // bulmaCarousel instance is available as element.bulmaCarousel
- element.bulmaCarousel.on('before-show', function(state) {
- console.log(state);
- });
- }
-
- /*var player = document.getElementById('interpolation-video');
- player.addEventListener('loadedmetadata', function() {
- $('#interpolation-slider').on('input', function(event) {
- console.log(this.value, player.duration);
- player.currentTime = player.duration / 100 * this.value;
- })
- }, false);*/
- // preloadInterpolationImages();
-
- // $('#interpolation-slider').on('input', function(event) {
- // setInterpolationImage(this.value);
- // });
- // setInterpolationImage(0);
- // $('#interpolation-slider').prop('max', NUM_INTERP_FRAMES - 1);
-
- bulmaSlider.attach();
-
-})
diff --git a/spaces/LittleLirow/fearflixai/story.py b/spaces/LittleLirow/fearflixai/story.py
deleted file mode 100644
index 8446bc25370f318d5975702afe7428c8312bd62c..0000000000000000000000000000000000000000
--- a/spaces/LittleLirow/fearflixai/story.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import os
-import openai
-
-def text2story(text, auth):
- openai.api_key = auth
-
- response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a psychedelic horror storywriter."},
- {"role": "user", "content": f"Write a 250-word narrated story about {text} with a bad ending. It must have {text}."}
- ],
- max_tokens=1000,
- temperature=0.7)
-
- return response["choices"][0]["message"]["content"]
\ No newline at end of file
diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/util/visualizer.py b/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/util/visualizer.py
deleted file mode 100644
index 2cc519b52e9e15f5891ac3f4dcab620793794322..0000000000000000000000000000000000000000
--- a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/util/visualizer.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import os
-import ntpath
-import time
-from . import util
-import scipy.misc
-
-try:
- from StringIO import StringIO # Python 2.7
-except ImportError:
- from io import BytesIO # Python 3.x
-import torchvision.utils as vutils
-from tensorboardX import SummaryWriter
-import torch
-import numpy as np
-
-
-class Visualizer:
- def __init__(self, opt):
- self.opt = opt
- self.tf_log = opt.isTrain and opt.tf_log
-
- self.tensorboard_log = opt.tensorboard_log
-
- self.win_size = opt.display_winsize
- self.name = opt.name
- if self.tensorboard_log:
-
- if self.opt.isTrain:
- self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, "logs")
- if not os.path.exists(self.log_dir):
- os.makedirs(self.log_dir)
- self.writer = SummaryWriter(log_dir=self.log_dir)
- else:
- print("hi :)")
- self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, opt.results_dir)
- if not os.path.exists(self.log_dir):
- os.makedirs(self.log_dir)
-
- if opt.isTrain:
- self.log_name = os.path.join(opt.checkpoints_dir, opt.name, "loss_log.txt")
- with open(self.log_name, "a") as log_file:
- now = time.strftime("%c")
- log_file.write("================ Training Loss (%s) ================\n" % now)
-
- # |visuals|: dictionary of images to display or save
- def display_current_results(self, visuals, epoch, step):
-
- all_tensor = []
- if self.tensorboard_log:
-
- for key, tensor in visuals.items():
- all_tensor.append((tensor.data.cpu() + 1) / 2)
-
- output = torch.cat(all_tensor, 0)
- img_grid = vutils.make_grid(output, nrow=self.opt.batchSize, padding=0, normalize=False)
-
- if self.opt.isTrain:
- self.writer.add_image("Face_SPADE/training_samples", img_grid, step)
- else:
- vutils.save_image(
- output,
- os.path.join(self.log_dir, str(step) + ".png"),
- nrow=self.opt.batchSize,
- padding=0,
- normalize=False,
- )
-
- # errors: dictionary of error labels and values
- def plot_current_errors(self, errors, step):
- if self.tf_log:
- for tag, value in errors.items():
- value = value.mean().float()
- summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)])
- self.writer.add_summary(summary, step)
-
- if self.tensorboard_log:
-
- self.writer.add_scalar("Loss/GAN_Feat", errors["GAN_Feat"].mean().float(), step)
- self.writer.add_scalar("Loss/VGG", errors["VGG"].mean().float(), step)
- self.writer.add_scalars(
- "Loss/GAN",
- {
- "G": errors["GAN"].mean().float(),
- "D": (errors["D_Fake"].mean().float() + errors["D_real"].mean().float()) / 2,
- },
- step,
- )
-
- # errors: same format as |errors| of plotCurrentErrors
- def print_current_errors(self, epoch, i, errors, t):
- message = "(epoch: %d, iters: %d, time: %.3f) " % (epoch, i, t)
- for k, v in errors.items():
- v = v.mean().float()
- message += "%s: %.3f " % (k, v)
-
- print(message)
- with open(self.log_name, "a") as log_file:
- log_file.write("%s\n" % message)
-
- def convert_visuals_to_numpy(self, visuals):
- for key, t in visuals.items():
- tile = self.opt.batchSize > 8
- if "input_label" == key:
- t = util.tensor2label(t, self.opt.label_nc + 2, tile=tile) ## B*H*W*C 0-255 numpy
- else:
- t = util.tensor2im(t, tile=tile)
- visuals[key] = t
- return visuals
-
- # save image to the disk
- def save_images(self, webpage, visuals, image_path):
- visuals = self.convert_visuals_to_numpy(visuals)
-
- image_dir = webpage.get_image_dir()
- short_path = ntpath.basename(image_path[0])
- name = os.path.splitext(short_path)[0]
-
- webpage.add_header(name)
- ims = []
- txts = []
- links = []
-
- for label, image_numpy in visuals.items():
- image_name = os.path.join(label, "%s.png" % (name))
- save_path = os.path.join(image_dir, image_name)
- util.save_image(image_numpy, save_path, create_dir=True)
-
- ims.append(image_name)
- txts.append(label)
- links.append(image_name)
- webpage.add_images(ims, txts, links, width=self.win_size)
diff --git a/spaces/Makiing/coolb-in-gtest/src/components/settings.tsx b/spaces/Makiing/coolb-in-gtest/src/components/settings.tsx
deleted file mode 100644
index e18aa5b484852bb5d047442a06e7143b6893cb0d..0000000000000000000000000000000000000000
--- a/spaces/Makiing/coolb-in-gtest/src/components/settings.tsx
+++ /dev/null
@@ -1,141 +0,0 @@
-import { useEffect, useState } from 'react'
-import { useAtom } from 'jotai'
-import { Switch } from '@headlessui/react'
-import { toast } from 'react-hot-toast'
-import { hashAtom, voiceAtom } from '@/state'
-import {
- Dialog,
- DialogContent,
- DialogDescription,
- DialogFooter,
- DialogHeader,
- DialogTitle
-} from '@/components/ui/dialog'
-import { Button } from './ui/button'
-import { Input } from './ui/input'
-import { ChunkKeys, parseCookies, extraCurlFromCookie, randomIP, encodeHeadersToCookie } from '@/lib/utils'
-import { ExternalLink } from './external-link'
-import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
-
-export function Settings() {
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
- const [loc, setLoc] = useAtom(hashAtom)
- const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys)))
- const [enableTTS, setEnableTTS] = useAtom(voiceAtom)
-
- useEffect(() => {
- if (isCopied) {
- toast.success('复制成功')
- }
- }, [isCopied])
-
- if (loc === 'settings') {
- return (
-
- )
- } else if (loc === 'voice') {
- return (
-
- )
- }
- return null
-}
diff --git a/spaces/Manmay/tortoise-tts/tortoise/models/classifier.py b/spaces/Manmay/tortoise-tts/tortoise/models/classifier.py
deleted file mode 100644
index f92d99e511d08f8b9e9807fb5ef34e6e871a998c..0000000000000000000000000000000000000000
--- a/spaces/Manmay/tortoise-tts/tortoise/models/classifier.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import torch
-import torch.nn as nn
-
-from tortoise.models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock
-
-
-class ResBlock(nn.Module):
- def __init__(
- self,
- channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- up=False,
- down=False,
- kernel_size=3,
- do_checkpoint=True,
- ):
- super().__init__()
- self.channels = channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_scale_shift_norm = use_scale_shift_norm
- self.do_checkpoint = do_checkpoint
- padding = 1 if kernel_size == 3 else 2
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = nn.Conv1d(
- dims, channels, self.out_channels, kernel_size, padding=padding
- )
- else:
- self.skip_connection = nn.Conv1d(dims, channels, self.out_channels, 1)
-
- def forward(self, x):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- h = self.out_layers(h)
- return self.skip_connection(x) + h
-
-
-class AudioMiniEncoder(nn.Module):
- def __init__(self,
- spec_dim,
- embedding_dim,
- base_channels=128,
- depth=2,
- resnet_blocks=2,
- attn_blocks=4,
- num_attn_heads=4,
- dropout=0,
- downsample_factor=2,
- kernel_size=3):
- super().__init__()
- self.init = nn.Sequential(
- nn.Conv1d(spec_dim, base_channels, 3, padding=1)
- )
- ch = base_channels
- res = []
- self.layers = depth
- for l in range(depth):
- for r in range(resnet_blocks):
- res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size))
- res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
- ch *= 2
- self.res = nn.Sequential(*res)
- self.final = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- nn.Conv1d(ch, embedding_dim, 1)
- )
- attn = []
- for a in range(attn_blocks):
- attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False))
- self.attn = nn.Sequential(*attn)
- self.dim = embedding_dim
-
- def forward(self, x):
- h = self.init(x)
- h = self.res(h)
- h = self.final(h)
- for blk in self.attn:
- h = blk(h)
- return h[:, :, 0]
-
-
-class AudioMiniEncoderWithClassifierHead(nn.Module):
- def __init__(self, classes, distribute_zero_label=True, **kwargs):
- super().__init__()
- self.enc = AudioMiniEncoder(**kwargs)
- self.head = nn.Linear(self.enc.dim, classes)
- self.num_classes = classes
- self.distribute_zero_label = distribute_zero_label
-
- def forward(self, x, labels=None):
- h = self.enc(x)
- logits = self.head(h)
- if labels is None:
- return logits
- else:
- if self.distribute_zero_label:
- oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes)
- zeros_indices = (labels == 0).unsqueeze(-1)
- # Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise.
- zero_extra_mass = torch.full_like(oh_labels, dtype=torch.float, fill_value=.2/(self.num_classes-1))
- zero_extra_mass[:, 0] = -.2
- zero_extra_mass = zero_extra_mass * zeros_indices
- oh_labels = oh_labels + zero_extra_mass
- else:
- oh_labels = labels
- loss = nn.functional.cross_entropy(logits, oh_labels)
- return loss
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py
deleted file mode 100644
index 333280c5947066fd3c7ebcfe302a0e7ad65480d5..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import torch
-from annotator.uniformer.mmcv.cnn import NonLocal2d
-from torch import nn
-
-from ..builder import HEADS
-from .fcn_head import FCNHead
-
-
-class DisentangledNonLocal2d(NonLocal2d):
- """Disentangled Non-Local Blocks.
-
- Args:
- temperature (float): Temperature to adjust attention. Default: 0.05
- """
-
- def __init__(self, *arg, temperature, **kwargs):
- super().__init__(*arg, **kwargs)
- self.temperature = temperature
- self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1)
-
- def embedded_gaussian(self, theta_x, phi_x):
- """Embedded gaussian with temperature."""
-
- # NonLocal2d pairwise_weight: [N, HxW, HxW]
- pairwise_weight = torch.matmul(theta_x, phi_x)
- if self.use_scale:
- # theta_x.shape[-1] is `self.inter_channels`
- pairwise_weight /= theta_x.shape[-1]**0.5
- pairwise_weight /= self.temperature
- pairwise_weight = pairwise_weight.softmax(dim=-1)
- return pairwise_weight
-
- def forward(self, x):
- # x: [N, C, H, W]
- n = x.size(0)
-
- # g_x: [N, HxW, C]
- g_x = self.g(x).view(n, self.inter_channels, -1)
- g_x = g_x.permute(0, 2, 1)
-
- # theta_x: [N, HxW, C], phi_x: [N, C, HxW]
- if self.mode == 'gaussian':
- theta_x = x.view(n, self.in_channels, -1)
- theta_x = theta_x.permute(0, 2, 1)
- if self.sub_sample:
- phi_x = self.phi(x).view(n, self.in_channels, -1)
- else:
- phi_x = x.view(n, self.in_channels, -1)
- elif self.mode == 'concatenation':
- theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
- phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
- else:
- theta_x = self.theta(x).view(n, self.inter_channels, -1)
- theta_x = theta_x.permute(0, 2, 1)
- phi_x = self.phi(x).view(n, self.inter_channels, -1)
-
- # subtract mean
- theta_x -= theta_x.mean(dim=-2, keepdim=True)
- phi_x -= phi_x.mean(dim=-1, keepdim=True)
-
- pairwise_func = getattr(self, self.mode)
- # pairwise_weight: [N, HxW, HxW]
- pairwise_weight = pairwise_func(theta_x, phi_x)
-
- # y: [N, HxW, C]
- y = torch.matmul(pairwise_weight, g_x)
- # y: [N, C, H, W]
- y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
- *x.size()[2:])
-
- # unary_mask: [N, 1, HxW]
- unary_mask = self.conv_mask(x)
- unary_mask = unary_mask.view(n, 1, -1)
- unary_mask = unary_mask.softmax(dim=-1)
- # unary_x: [N, 1, C]
- unary_x = torch.matmul(unary_mask, g_x)
- # unary_x: [N, C, 1, 1]
- unary_x = unary_x.permute(0, 2, 1).contiguous().reshape(
- n, self.inter_channels, 1, 1)
-
- output = x + self.conv_out(y + unary_x)
-
- return output
-
-
-@HEADS.register_module()
-class DNLHead(FCNHead):
- """Disentangled Non-Local Neural Networks.
-
- This head is the implementation of `DNLNet
- `_.
-
- Args:
- reduction (int): Reduction factor of projection transform. Default: 2.
- use_scale (bool): Whether to scale pairwise_weight by
- sqrt(1/inter_channels). Default: False.
- mode (str): The nonlocal mode. Options are 'embedded_gaussian',
- 'dot_product'. Default: 'embedded_gaussian.'.
- temperature (float): Temperature to adjust attention. Default: 0.05
- """
-
- def __init__(self,
- reduction=2,
- use_scale=True,
- mode='embedded_gaussian',
- temperature=0.05,
- **kwargs):
- super(DNLHead, self).__init__(num_convs=2, **kwargs)
- self.reduction = reduction
- self.use_scale = use_scale
- self.mode = mode
- self.temperature = temperature
- self.dnl_block = DisentangledNonLocal2d(
- in_channels=self.channels,
- reduction=self.reduction,
- use_scale=self.use_scale,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- mode=self.mode,
- temperature=self.temperature)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- output = self.convs[0](x)
- output = self.dnl_block(output)
- output = self.convs[1](output)
- if self.concat_input:
- output = self.conv_cat(torch.cat([x, output], dim=1))
- output = self.cls_seg(output)
- return output
diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/data/BaseDataset.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/data/BaseDataset.py
deleted file mode 100644
index 2d3e842341ecd51514ac96ce51a13fcaa12d1733..0000000000000000000000000000000000000000
--- a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/data/BaseDataset.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from torch.utils.data import Dataset
-import random
-
-
-class BaseDataset(Dataset):
- '''
- This is the Base Datasets.
- Itself does nothing and is not runnable.
- Check self.get_item function to see what it should return.
- '''
-
- @staticmethod
- def modify_commandline_options(parser, is_train):
- return parser
-
- def __init__(self, opt, phase='train'):
- self.opt = opt
- self.is_train = self.phase == 'train'
- self.projection_mode = 'orthogonal' # Declare projection mode here
-
- def __len__(self):
- return 0
-
- def get_item(self, index):
- # In case of a missing file or IO error, switch to a random sample instead
- try:
- res = {
- 'name': None, # name of this subject
- 'b_min': None, # Bounding box (x_min, y_min, z_min) of target space
- 'b_max': None, # Bounding box (x_max, y_max, z_max) of target space
-
- 'samples': None, # [3, N] samples
- 'labels': None, # [1, N] labels
-
- 'img': None, # [num_views, C, H, W] input images
- 'calib': None, # [num_views, 4, 4] calibration matrix
- 'extrinsic': None, # [num_views, 4, 4] extrinsic matrix
- 'mask': None, # [num_views, 1, H, W] segmentation masks
- }
- return res
- except:
- print("Requested index %s has missing files. Using a random sample instead." % index)
- return self.get_item(index=random.randint(0, self.__len__() - 1))
-
- def __getitem__(self, index):
- return self.get_item(index)
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/module_losses/ce_module_loss.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/module_losses/ce_module_loss.py
deleted file mode 100644
index a351ea0c553bf1e1c7c9534630178904ba0f1a30..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/module_losses/ce_module_loss.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import warnings
-from typing import Dict, Sequence, Union
-
-import torch
-import torch.nn as nn
-
-from mmocr.models.common.dictionary import Dictionary
-from mmocr.registry import MODELS
-from mmocr.structures import TextRecogDataSample
-from .base import BaseTextRecogModuleLoss
-
-
-@MODELS.register_module()
-class CEModuleLoss(BaseTextRecogModuleLoss):
- """Implementation of loss module for encoder-decoder based text recognition
- method with CrossEntropy loss.
-
- Args:
- dictionary (dict or :obj:`Dictionary`): The config for `Dictionary` or
- the instance of `Dictionary`.
- max_seq_len (int): Maximum sequence length. The sequence is usually
- generated from decoder. Defaults to 40.
- letter_case (str): There are three options to alter the letter cases
- of gt texts:
- - unchanged: Do not change gt texts.
- - upper: Convert gt texts into uppercase characters.
- - lower: Convert gt texts into lowercase characters.
- Usually, it only works for English characters. Defaults to
- 'unchanged'.
- pad_with (str): The padding strategy for ``gt_text.padded_indexes``.
- Defaults to 'auto'. Options are:
- - 'auto': Use dictionary.padding_idx to pad gt texts, or
- dictionary.end_idx if dictionary.padding_idx
- is None.
- - 'padding': Always use dictionary.padding_idx to pad gt texts.
- - 'end': Always use dictionary.end_idx to pad gt texts.
- - 'none': Do not pad gt texts.
- ignore_char (int or str): Specifies a target value that is
- ignored and does not contribute to the input gradient.
- ignore_char can be int or str. If int, it is the index of
- the ignored char. If str, it is the character to ignore.
- Apart from single characters, each item can be one of the
- following reversed keywords: 'padding', 'start', 'end',
- and 'unknown', which refer to their corresponding special
- tokens in the dictionary. It will not ignore any special
- tokens when ignore_char == -1 or 'none'. Defaults to 'padding'.
- flatten (bool): Whether to flatten the output and target before
- computing CE loss. Defaults to False.
- reduction (str): Specifies the reduction to apply to the output,
- should be one of the following: ('none', 'mean', 'sum'). Defaults
- to 'none'.
- ignore_first_char (bool): Whether to ignore the first token in target (
- usually the start token). If ``True``, the last token of the output
- sequence will also be removed to be aligned with the target length.
- Defaults to ``False``.
- flatten (bool): Whether to flatten the vectors for loss computation.
- Defaults to False.
- """
-
- def __init__(self,
- dictionary: Union[Dict, Dictionary],
- max_seq_len: int = 40,
- letter_case: str = 'unchanged',
- pad_with: str = 'auto',
- ignore_char: Union[int, str] = 'padding',
- flatten: bool = False,
- reduction: str = 'none',
- ignore_first_char: bool = False):
- super().__init__(
- dictionary=dictionary,
- max_seq_len=max_seq_len,
- letter_case=letter_case,
- pad_with=pad_with)
- assert isinstance(ignore_char, (int, str))
- assert isinstance(reduction, str)
- assert reduction in ['none', 'mean', 'sum']
- assert isinstance(ignore_first_char, bool)
- assert isinstance(flatten, bool)
- self.flatten = flatten
-
- self.ignore_first_char = ignore_first_char
-
- if isinstance(ignore_char, int):
- ignore_index = ignore_char
- else:
- mapping_table = {
- 'none': -1,
- 'start': self.dictionary.start_idx,
- 'padding': self.dictionary.padding_idx,
- 'end': self.dictionary.end_idx,
- 'unknown': self.dictionary.unknown_idx,
- }
-
- ignore_index = mapping_table.get(
- ignore_char,
- self.dictionary.char2idx(ignore_char, strict=False))
- if ignore_index is None or (ignore_index
- == self.dictionary.unknown_idx
- and ignore_char != 'unknown'):
- warnings.warn(
- f'{ignore_char} does not exist in the dictionary',
- UserWarning)
- ignore_index = -1
-
- self.ignore_char = ignore_char
- self.ignore_index = ignore_index
- self.loss_ce = nn.CrossEntropyLoss(
- ignore_index=ignore_index, reduction=reduction)
-
- def forward(self, outputs: torch.Tensor,
- data_samples: Sequence[TextRecogDataSample]) -> Dict:
- """
- Args:
- outputs (Tensor): A raw logit tensor of shape :math:`(N, T, C)`.
- data_samples (list[TextRecogDataSample]): List of
- ``TextRecogDataSample`` which are processed by ``get_target``.
-
- Returns:
- dict: A loss dict with the key ``loss_ce``.
- """
- targets = list()
- for data_sample in data_samples:
- targets.append(data_sample.gt_text.padded_indexes)
- targets = torch.stack(targets, dim=0).long()
- if self.ignore_first_char:
- targets = targets[:, 1:].contiguous()
- outputs = outputs[:, :-1, :].contiguous()
- if self.flatten:
- outputs = outputs.view(-1, outputs.size(-1))
- targets = targets.view(-1)
- else:
- outputs = outputs.permute(0, 2, 1).contiguous()
-
- loss_ce = self.loss_ce(outputs, targets.to(outputs.device))
- losses = dict(loss_ce=loss_ce)
-
- return losses
diff --git a/spaces/NagaSaiAbhinay/unclip_text_interpolation_demo/app.py b/spaces/NagaSaiAbhinay/unclip_text_interpolation_demo/app.py
deleted file mode 100644
index ff538d0d2bebfdf769581ee1c719851cdf3dd3e2..0000000000000000000000000000000000000000
--- a/spaces/NagaSaiAbhinay/unclip_text_interpolation_demo/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from diffusers import DiffusionPipeline
-import gradio as gr
-import torch
-import math
-
-orig_start_prompt = "a photograph of an adult lion"
-orig_end_prompt = "a photograph of a lion cub"
-
-if torch.cuda.is_available():
- device = "cuda"
- dtype = torch.float16
-else:
- device = "cpu"
- dtype = torch.bfloat16
-
-pipe = DiffusionPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=dtype, custom_pipeline='unclip_text_interpolation')
-pipe.to(device)
-
-def unclip_text_interpolation(
- start_prompt,
- end_prompt,
- steps,
- seed
-):
- generator = torch.Generator()
- generator.manual_seed(seed)
-
- output = pipe(start_prompt, end_prompt, steps, enable_sequential_cpu_offload=False, generator=generator)
- return output.images
-
-inputs = [
- gr.Textbox(lines=2, default=orig_start_prompt, label="Start Prompt"),
- gr.Textbox(lines=2, default=orig_end_prompt, label="End Prompt"),
- gr.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps"),
- gr.Number(0, label="Seed", precision=0)
-]
-
-output = gr.Gallery(
- label="Generated images", show_label=False, elem_id="gallery"
- ).style(grid=[2], height="auto")
-
-examples = [
- [orig_start_prompt, orig_end_prompt, 5, 42],
- ["a photo of a landscape in winter","a photo of a landscape in fall", 5, 20],
- ["a photo of a victorian house", "a photo of a modern house", 5, 20]
-]
-
-title = "UnClip Text Interpolation Pipeline"
-
-demo_app = gr.Interface(
- fn=unclip_text_interpolation,
- inputs=inputs,
- outputs=output,
- title=title,
- theme='huggingface',
- examples=examples,
- cache_examples=False
-)
-demo_app.launch(debug=True, enable_queue=True)
\ No newline at end of file
diff --git a/spaces/Narsil/graph_spectrum/app.py b/spaces/Narsil/graph_spectrum/app.py
deleted file mode 100644
index 298706277f1dce3c515a2b5efbbc2c58cf9c6d21..0000000000000000000000000000000000000000
--- a/spaces/Narsil/graph_spectrum/app.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-import numpy as np
-import pandas as pd
-import re
-import torch
-import altair as alt
-
-
-alt.data_transformers.disable_max_rows()
-
-number_re = re.compile(r"\.[0-9]*\.")
-
-STATE_DICT = {}
-PIPE = None
-DATA = pd.DataFrame()
-
-
-def scatter_plot_fn(group_name):
- global DATA
- df = DATA[DATA.group_name == group_name]
- return gr.LinePlot.update(
- value=df,
- x="rank",
- y="val",
- color="layer",
- tooltip=["val", "rank", "layer"],
- caption="",
- )
-
-
-def find_choices(state_dict):
- if not state_dict:
- return [], []
- global DATA
- layered_tensors = [
- k for k, v in state_dict.items() if number_re.findall(k) and len(v.shape) == 2
- ]
- choices = set()
- data = []
- max_layer = 0
- for name in layered_tensors:
- group_name = number_re.sub(".{N}.", name)
- choices.add(group_name)
- layer = int(number_re.search(name).group()[1:-1])
- if layer > max_layer:
- max_layer = layer
-
- svdvals = torch.linalg.svdvals(state_dict[name])
- svdvals /= svdvals.sum()
- for rank, val in enumerate(svdvals.tolist()):
- data.append((name, layer, group_name, rank, val))
- data = np.array(data)
- DATA = pd.DataFrame(data, columns=["name", "layer", "group_name", "rank", "val"])
- DATA["val"] = DATA["val"].astype("float")
- DATA["layer"] = DATA["layer"].astype("category")
- DATA["rank"] = DATA["rank"].astype("int32")
- return choices, list(range(max_layer + 1))
-
-
-def weights_fn(model_id):
- global STATE_DICT, PIPE
- try:
- pipe = pipeline(model=model_id)
- PIPE = pipe
- STATE_DICT = pipe.model.state_dict()
- except Exception as e:
- print(e)
- STATE_DICT = {}
- choices, layers = find_choices(STATE_DICT)
- return [gr.Dropdown.update(choices=choices), gr.Dropdown.update(choices=layers)]
-
-
-def layer_fn(weights, layer):
- k = 5
- directions = 10
-
- embeddings = PIPE.model.get_input_embeddings().weight
- weight_name = weights.replace("{N}", str(layer))
-
- weight = STATE_DICT[weight_name]
-
- U, S, Vh = torch.linalg.svd(weight)
-
- D = U if U.shape[0] == embeddings.shape[0] else Vh
-
- # words = D[:directions].matmul(embeddings.T).topk(k=k)
- # words_t = D[:, :directions].T.matmul(embeddings.T).topk(k=k)
-
- # Cosine similarity
- words = (
- (D[:directions] / D[:directions].norm(dim=0))
- .matmul(embeddings.T / embeddings.T.norm(dim=0))
- .topk(k=k)
- )
- words_t = (
- (D[:, :directions].T / D[:, :directions].norm(dim=1))
- .matmul(embeddings.T / embeddings.T.norm(dim=0))
- .topk(k=k)
- )
-
- data = [[PIPE.tokenizer.decode(w) for w in indices] for indices in words.indices]
- data = np.array(data)
- data = pd.DataFrame(data)
-
- data_t = [
- [PIPE.tokenizer.decode(w) for w in indices] for indices in words_t.indices
- ]
- data_t = np.array(data_t)
- data_t = pd.DataFrame(data_t)
-
- return (
- gr.Dataframe.update(value=data, interactive=False),
- gr.Dataframe.update(value=data_t, interactive=False),
- )
-
-
-with gr.Blocks() as scatter_plot:
- with gr.Row():
- with gr.Column():
- model_id = gr.Textbox(label="model_id")
- weights = gr.Dropdown(label="weights")
- layer = gr.Dropdown(label="layer")
- with gr.Column():
- plot = gr.LinePlot(show_label=False).style(container=True)
- directions = gr.Dataframe(interactive=False)
- directions_t = gr.Dataframe(interactive=False)
- model_id.change(weights_fn, inputs=model_id, outputs=[weights, layer])
- weights.change(fn=scatter_plot_fn, inputs=weights, outputs=plot)
- layer.change(
- fn=layer_fn, inputs=[weights, layer], outputs=[directions, directions_t]
- )
-
-if __name__ == "__main__":
- scatter_plot.launch()
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py
deleted file mode 100644
index 079db13e61c5ef46d1b1d288012145148eb0be04..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.nn.functional as F
-from fairseq import metrics, utils
-from fairseq.criterions import FairseqCriterion, register_criterion
-from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
-
-
-@register_criterion("label_smoothed_cross_entropy_r3f")
-class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion):
- def __init__(
- self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type
- ):
- super().__init__(task)
- self.sentence_avg = sentence_avg
- self.label_smoothing = label_smoothing
- self.eps = eps
- self.r3f_lambda = r3f_lambda
- self.noise_type = noise_type
- if self.noise_type in {"normal"}:
- self.noise_sampler = torch.distributions.normal.Normal(
- loc=0.0, scale=self.eps
- )
- elif self.noise_type == "uniform":
- self.noise_sampler = torch.distributions.uniform.Uniform(
- low=-self.eps, high=self.eps
- )
- else:
- raise Exception(f"unrecognized noise type {self.noise_type}")
-
- @staticmethod
- def add_args(parser):
- """Add criterion-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
- help='epsilon for label smoothing, 0 means no label smoothing')
- parser.add_argument('--eps', type=float, default=1e-5,
- help='noise eps')
- parser.add_argument('--r3f-lambda', type=float, default=1.0,
- help='lambda for combining logistic loss and noisy KL loss')
- parser.add_argument('--noise-type', type=str, default='normal',
- choices=['normal', 'uniform'],
- help='type of noises')
- # fmt: on
-
- def _get_symm_kl(self, noised_logits, input_logits):
- return (
- F.kl_div(
- F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
- F.softmax(input_logits, dim=-1, dtype=torch.float32),
- None,
- None,
- "sum",
- )
- + F.kl_div(
- F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
- F.softmax(noised_logits, dim=-1, dtype=torch.float32),
- None,
- None,
- "sum",
- )
- ) / noised_logits.size(0)
-
- def forward(self, model, sample, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"])
- input_logits, extra = model(**sample["net_input"])
- loss, nll_loss = self.compute_loss(
- model, (input_logits, extra), sample, reduce=reduce
- )
- sample_size = (
- sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
- )
-
- if model.training:
- noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
- token_embeddings
- )
- noised_embeddings = token_embeddings.clone() + noise
-
- noised_logits, _ = model(
- **sample["net_input"], token_embeddings=noised_embeddings
- )
- symm_kl = self._get_symm_kl(noised_logits, input_logits)
-
- if model.training:
- symm_kl = symm_kl * sample_size
- loss = loss + self.r3f_lambda * symm_kl
-
- logging_output = {
- "loss": loss.data,
- "nll_loss": nll_loss.data,
- "ntokens": sample["ntokens"],
- "nsentences": sample["target"].size(0),
- "sample_size": sample_size,
- }
-
- if model.training:
- logging_output.update(
- symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
- )
-
- return loss, sample_size, logging_output
-
- def compute_loss(self, model, net_output, sample, reduce=True):
- lprobs = model.get_normalized_probs(net_output, log_probs=True)
- lprobs = lprobs.view(-1, lprobs.size(-1))
- target = model.get_targets(sample, net_output).view(-1, 1)
- loss, nll_loss = label_smoothed_nll_loss(
- lprobs,
- target,
- self.label_smoothing,
- ignore_index=self.padding_idx,
- reduce=reduce,
- )
- return loss, nll_loss
-
- @staticmethod
- def reduce_metrics(logging_outputs) -> None:
- """Aggregate logging outputs from data parallel training."""
- loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
- nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
- ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
- sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
- symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
-
- metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3)
- metrics.log_scalar(
- "loss", loss_sum / sample_size / math.log(2), sample_size, round=3
- )
- metrics.log_scalar(
- "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
- )
- metrics.log_derived(
- "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
- )
-
- @staticmethod
- def logging_outputs_can_be_summed() -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- return True
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py
deleted file mode 100644
index 6a6585e8b6901a059445ff54ca20ea87751bbb11..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# import sys
-# sys.path.append('tacotron2')
-import torch
-from .layers import STFT
-
-
-class Denoiser(torch.nn.Module):
- """ Removes model bias from audio produced with waveglow """
-
- def __init__(self, waveglow, filter_length=1024, n_overlap=4,
- win_length=1024, mode='zeros'):
- super(Denoiser, self).__init__()
- self.stft = STFT(filter_length=filter_length,
- hop_length=int(filter_length/n_overlap),
- win_length=win_length).cuda()
- if mode == 'zeros':
- mel_input = torch.zeros(
- (1, 80, 88),
- dtype=waveglow.upsample.weight.dtype,
- device=waveglow.upsample.weight.device)
- elif mode == 'normal':
- mel_input = torch.randn(
- (1, 80, 88),
- dtype=waveglow.upsample.weight.dtype,
- device=waveglow.upsample.weight.device)
- else:
- raise Exception("Mode {} if not supported".format(mode))
-
- with torch.no_grad():
- bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
- bias_spec, _ = self.stft.transform(bias_audio)
-
- self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
-
- def forward(self, audio, strength=0.1):
- audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
- audio_spec_denoised = audio_spec - self.bias_spec * strength
- audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
- audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
- return audio_denoised
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py
deleted file mode 100644
index 7aced08d38301b98b19e2df7d19f1c61150107bc..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import torch
-from examples.textless_nlp.gslm.unit2speech.tacotron2.model import Tacotron2
-from examples.textless_nlp.gslm.unit2speech.tacotron2.waveglow_denoiser import (
- Denoiser,
-)
-
-
-def load_quantized_audio_from_file(file_path):
- base_fname_batch, quantized_units_batch = [], []
- with open(file_path) as f:
- for line in f:
- base_fname, quantized_units_str = line.rstrip().split("|")
- quantized_units = [int(q) for q in quantized_units_str.split(" ")]
- base_fname_batch.append(base_fname)
- quantized_units_batch.append(quantized_units)
- return base_fname_batch, quantized_units_batch
-
-
-def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0):
- assert inp.size(0) == 1
- inp = inp.cuda()
- if lab is not None:
- lab = torch.LongTensor(1).cuda().fill_(lab)
-
- with torch.no_grad():
- _, mel, _, ali, has_eos = model.inference(inp, lab, ret_has_eos=True)
- aud = waveglow.infer(mel, sigma=0.666)
- aud_dn = denoiser(aud, strength=strength).squeeze(1)
- return mel, aud, aud_dn, has_eos
-
-
-def load_tacotron(tacotron_model_path, max_decoder_steps):
- ckpt_dict = torch.load(tacotron_model_path)
- hparams = ckpt_dict["hparams"]
- hparams.max_decoder_steps = max_decoder_steps
- sr = hparams.sampling_rate
- model = Tacotron2(hparams)
- model.load_state_dict(ckpt_dict["model_dict"])
- model = model.cuda().eval().half()
- return model, sr, hparams
-
-
-def load_waveglow(waveglow_path):
- waveglow = torch.load(waveglow_path)["model"]
- waveglow = waveglow.cuda().eval().half()
- for k in waveglow.convinv:
- k.float()
- denoiser = Denoiser(waveglow)
- return waveglow, denoiser
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/prepend_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/prepend_dataset.py
deleted file mode 100644
index ad74784d2d7920e4a6225282d95543ce16ea50d9..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/prepend_dataset.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-
-from . import BaseWrapperDataset
-
-
-class PrependDataset(BaseWrapperDataset):
- def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
- super().__init__(dataset)
- self.prepend_getter = prepend_getter
- self.ensure_first_token = ensure_first_token_is
-
- def __getitem__(self, idx):
- item = self.dataset[idx]
- is_tuple = isinstance(item, tuple)
- src = item[0] if is_tuple else item
-
- assert self.ensure_first_token is None or src[0] == self.ensure_first_token
- prepend_idx = self.prepend_getter(self.dataset, idx)
- assert isinstance(prepend_idx, int)
- src[0] = prepend_idx
- item = tuple((src,) + item[1:]) if is_tuple else src
- return item
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/fp16_optimizer.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/fp16_optimizer.py
deleted file mode 100644
index c59b21cf6b36650a4dd899e62b83a01715d2e2a1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/fp16_optimizer.py
+++ /dev/null
@@ -1,548 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import defaultdict
-from itertools import chain
-
-import torch
-from fairseq import optim
-from omegaconf import DictConfig
-
-from .dynamic_loss_scaler import DynamicLossScaler
-
-
-class _FP16OptimizerMixin(object):
- def __init__(self, *args, **kwargs):
- # forward __init__ call to the next class in mro(method resolution order)
- super().__init__(*args, **kwargs)
- self._multiply_factor = 1.0
-
- @property
- def has_flat_params(self):
- return torch.is_tensor(self.fp32_params) or (
- isinstance(self.fp32_params, dict)
- and all(torch.is_tensor(t) for t in self.fp32_params.values())
- )
-
- @classmethod
- def build_fp32_params(cls, args, params, flatten=True):
- # create FP32 copy of parameters and grads
- if flatten:
- is_pipeline_parallel = getattr(
- args, "pipeline_model_parallel", False
- ) and getattr(args, "distributed_no_spawn", False)
- total_param_size = sum(p.data.numel() for p in params)
- devices = [torch.cuda.current_device()]
- if is_pipeline_parallel:
- devices = list(set(args.pipeline_devices))
- fp32_params = {}
- for device in devices:
- if is_pipeline_parallel:
- device_param_size = sum(
- p.data.numel() for p in params if p.device.index == device
- )
- device_params = [p for p in params if p.device.index == device]
- else:
- device_param_size = total_param_size
- device_params = params
- fp32_params[device] = (
- device_params[0].new(0).float().new(device_param_size)
- )
- offset = 0
- for p in device_params:
- numel = p.data.numel()
- fp32_params[device][offset : offset + numel].copy_(p.data.view(-1))
- offset += numel
- fp32_params[device] = torch.nn.Parameter(fp32_params[device])
- fp32_params[device].grad = fp32_params[device].data.new(
- device_param_size
- )
- return fp32_params
- else:
- fp32_params = []
- for p in params:
- p32 = torch.nn.Parameter(p.data.float())
- if hasattr(p, 'expert'):
- p32.expert = True
- elif hasattr(p, 'base_expert'):
- p32.base_expert = True
- p32.grad = torch.zeros_like(p32.data)
- if hasattr(p, "param_group"):
- p32.param_group = p.param_group
- fp32_params.append(p32)
- return fp32_params
-
- def state_dict(self):
- """Return the optimizer's state dict."""
- state_dict = self.fp32_optimizer.state_dict()
- if self.scaler is not None:
- state_dict["loss_scale"] = self.scaler.loss_scale
- return state_dict
-
- def load_state_dict(self, state_dict, optimizer_overrides=None):
- """Load an optimizer state dict.
-
- In general we should prefer the configuration of the existing optimizer
- instance (e.g., learning rate) over that found in the state_dict. This
- allows us to resume training from a checkpoint using a new set of
- optimizer args.
- """
- if "loss_scale" in state_dict and self.scaler is not None:
- self.scaler.loss_scale = state_dict["loss_scale"]
- self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
-
- def backward(self, loss):
- """Computes the sum of gradients of the given tensor w.r.t. graph leaves.
-
- Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
- function additionally dynamically scales the loss to avoid gradient
- underflow.
- """
- if self.scaler is not None:
- loss = self.scaler.scale(loss)
- loss.backward()
- self._needs_sync = True
-
- def _sync_fp16_grads_to_fp32(self):
- if self._needs_sync:
- # copy FP16 grads to FP32
- if self.has_flat_params:
- devices = list(self.fp32_params.keys())
- device_params_dict = defaultdict(list)
- for p in self.fp16_params:
- if p.requires_grad:
- device_params_dict[p.device.index].append(p)
- for device in devices:
- device_params = device_params_dict[device]
- offset = 0
- for p in device_params:
- grad_data = (
- p.grad.data
- if p.grad is not None
- else p.data.new_zeros(p.data.shape)
- )
- numel = grad_data.numel()
- self.fp32_params[device].grad.data[
- offset : offset + numel
- ].copy_(grad_data.view(-1))
- offset += numel
- else:
- for p, p32 in zip(self.fp16_params, self.fp32_params):
- if not p.requires_grad:
- continue
- if p.grad is not None:
- if p32.grad is None:
- p32.grad = p.grad.data.float()
- else:
- p32.grad.data.copy_(p.grad.data)
- else:
- p32.grad = torch.zeros_like(p.data, dtype=torch.float)
-
- self._needs_sync = False
-
- def _sync_fp32_params_to_fp16(self):
- # copy FP32 params back into FP16 model
- if self.has_flat_params:
- devices = list(self.fp32_params.keys())
- device_params_dict = defaultdict(list)
- for p in self.fp16_params:
- device_params_dict[p.device.index].append(p)
- for device in devices:
- device_params = device_params_dict[device]
- offset = 0
- for p in device_params:
- numel = p.data.numel()
- p.data.copy_(
- self.fp32_params[device]
- .data[offset : offset + numel]
- .view_as(p.data)
- )
- offset += numel
- else:
- for p, p32 in zip(self.fp16_params, self.fp32_params):
- if not p.requires_grad:
- continue
- p.data.copy_(p32.data)
-
- def _unscale_grads(self):
- self._sync_fp16_grads_to_fp32()
- if (
- # Skip the multiplication if it's a no-op (i.e., if _multiply_factor
- # is 1.0). At the same time, we want to avoid the device-to-host
- # transfer by comparing it to 1.0. Since _multiply_factor starts as
- # a Python float, we roughly assume that if it's a tensor then it's
- # probably not =1.0 anymore and we do the multiplication. Otherwise
- # we can safely check the value without a D2H transfer.
- torch.is_tensor(self._multiply_factor)
- or self._multiply_factor != 1.0
- ):
- self.fp32_optimizer.multiply_grads(self._multiply_factor)
- self._multiply_factor = 1.0
-
- def multiply_grads(self, c):
- """Multiplies grads by a constant ``c``."""
- self._multiply_factor *= c
-
- def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
- """Clips gradient norm and updates dynamic loss scaler."""
- self._sync_fp16_grads_to_fp32()
-
- grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(
- 0, aggregate_norm_fn
- )
-
- if self.scaler is not None:
- if grad_norm > max_norm > 0.0:
- self._multiply_factor *= max_norm / grad_norm
-
- self.scaler.check_overflow(grad_norm)
- elif max_norm > 0.0:
- clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
- self._multiply_factor *= clip_coef
-
- return grad_norm
-
- def step(self, closure=None, groups=None):
- """Performs a single optimization step."""
- self._sync_fp16_grads_to_fp32()
-
- if getattr(self, "supports_step_with_scale", False):
- self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
- else:
- self._unscale_grads()
- self.fp32_optimizer.step(closure, groups=groups)
-
- if self.scaler is not None:
- self.scaler.update()
-
- self._sync_fp32_params_to_fp16()
-
- def zero_grad(self):
- """Clears the gradients of all optimized parameters."""
- for p in self.fp16_params:
- p.grad = None
- if self.has_flat_params:
- if torch.is_tensor(self.fp32_params):
- self.fp32_params.grad.zero_()
- elif isinstance(self.fp32_params, dict):
- for fp32_params in self.fp32_params.values():
- fp32_params.grad.zero_()
- else:
- raise RuntimeError("self.fp32_params must be a tensor or dict")
- else:
- for p32 in self.fp32_params:
- if p32.grad is not None:
- p32.grad.zero_()
- self._needs_sync = False
-
- if self.scaler is not None:
- self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
-
-
-class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
- """
- Wrap an *optimizer* to support FP16 (mixed precision) training.
- """
-
- def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs):
- super().__init__(cfg.optimizer)
- self.fp16_params = params
- self.fp32_optimizer = fp32_optimizer
- self.fp32_params = fp32_params
-
- if getattr(cfg.common, "fp16_scale_window", None) is None:
- if len(cfg.optimization.update_freq) > 1:
- raise ValueError(
- "--fp16-scale-window must be given explicitly when using a "
- "custom --update-freq schedule"
- )
- data_parallel_size = int(
- cfg.distributed_training.distributed_world_size
- / cfg.common.model_parallel_size
- )
- scale_window = int(
- 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0]
- )
- else:
- scale_window = cfg.common.fp16_scale_window
-
- if not getattr(cfg.common, "bf16", False):
- self.scaler = DynamicLossScaler(
- init_scale=cfg.common.fp16_init_scale,
- scale_window=scale_window,
- tolerance=cfg.common.fp16_scale_tolerance,
- threshold=cfg.common.threshold_loss_scale,
- min_loss_scale=cfg.common.min_loss_scale,
- )
- else:
- # disable loss scaling for bfloat16
- self.scaler = None
-
- @classmethod
- def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
- """
- Args:
- cfg (omegaconf.DictConfig): fairseq args
- params (iterable): iterable of parameters to optimize
- """
- flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False)
- if getattr(cfg.common, "bf16", False):
- flatten = False # mixed precision is faster on TPUs without flat grads
- fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten)
- if flatten:
- fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params])
- else:
- fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params)
- if flatten and not fp32_optimizer.supports_flat_params:
- raise RuntimeError(
- f"chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads"
- )
- return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs)
-
- @property
- def optimizer(self):
- return self.fp32_optimizer.optimizer
-
- @optimizer.setter
- def optimizer(self, optimizer):
- self.fp32_optimizer.optimizer = optimizer
-
- @property
- def lr_scheduler(self):
- return getattr(self.fp32_optimizer, "lr_scheduler", None)
-
- @property
- def optimizer_config(self):
- return self.fp32_optimizer.optimizer_config
-
- def get_lr(self):
- return self.fp32_optimizer.get_lr()
-
- def set_lr(self, lr):
- self.fp32_optimizer.set_lr(lr)
-
- def all_reduce_grads(self, module):
- self.fp32_optimizer.all_reduce_grads(module)
-
- @property
- def supports_flat_params(self):
- return self.fp32_optimizer.supports_flat_params
-
-
-class _MemoryEfficientFP16OptimizerMixin(object):
- def __init__(self, *args, **kwargs):
- # forward __init__ call to the next class in MRO (method resolution order)
- super().__init__(*args, **kwargs)
- self._multiply_factor = 1.0
-
- @property
- def has_flat_params(self):
- return False
-
- def state_dict(self):
- """Return the optimizer's state dict."""
- state_dict = self.wrapped_optimizer.state_dict()
- if self.scaler is not None:
- state_dict["loss_scale"] = self.scaler.loss_scale
- return state_dict
-
- def load_state_dict(self, state_dict, optimizer_overrides=None):
- """Load an optimizer state dict.
-
- In general we should prefer the configuration of the existing optimizer
- instance (e.g., learning rate) over that found in the state_dict. This
- allows us to resume training from a checkpoint using a new set of
- optimizer args.
- """
- if "loss_scale" in state_dict and self.scaler is not None:
- self.scaler.loss_scale = state_dict["loss_scale"]
-
- self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
-
- # Hack: PyTorch automatically casts the optimizer state to match the
- # type of the current parameters. But with --memory-efficient-fp16 the
- # params are FP16 while the optimizer state is FP32 and we don't want
- # to cast. A workaround is to manually copy back the original state
- # after the optimizer has been loaded.
- if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False):
- groups = self.optimizer.param_groups
- saved_groups = state_dict["param_groups"]
- id_map = {
- old_id: p
- for old_id, p in zip(
- chain(*(g["params"] for g in saved_groups)),
- chain(*(g["params"] for g in groups)),
- )
- }
- for k, v in state_dict["state"].items():
- if k in id_map:
- param = id_map[k]
- self.optimizer.state[param] = v
-
- def backward(self, loss):
- """Computes the sum of gradients of the given tensor w.r.t. graph leaves.
-
- Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
- function additionally dynamically scales the loss to avoid gradient
- underflow.
- """
- if self.scaler is not None:
- loss = self.scaler.scale(loss)
- loss.backward()
-
- def _unscale_grads(self):
- if (
- # Skip the multiplication if it's a no-op (i.e., if _multiply_factor
- # is 1.0). At the same time, we want to avoid the device-to-host
- # transfer by comparing it to 1.0. Since _multiply_factor starts as
- # a Python float, we roughly assume that if it's a tensor then it's
- # probably not =1.0 anymore and we do the multiplication. Otherwise
- # we can safely check the value without a D2H transfer.
- torch.is_tensor(self._multiply_factor)
- or self._multiply_factor != 1.0
- ):
- self.wrapped_optimizer.multiply_grads(self._multiply_factor)
- self._multiply_factor = 1.0
-
- def multiply_grads(self, c):
- """Multiplies grads by a constant *c*."""
- self._multiply_factor *= c
-
- def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
- """Clips gradient norm and updates dynamic loss scaler."""
- max_norm = float(max_norm)
- grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(
- 0, aggregate_norm_fn
- )
-
- if self.scaler is not None:
- grad_norm_cpu = float(grad_norm)
- if grad_norm_cpu > max_norm > 0.0:
- self._multiply_factor *= max_norm / grad_norm_cpu
-
- # detect overflow and adjust loss scale
- self.scaler.check_overflow(grad_norm_cpu)
- elif max_norm > 0.0:
- clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
- self._multiply_factor *= clip_coef
-
- return grad_norm
-
- def step(self, closure=None, groups=None):
- """Performs a single optimization step."""
- if getattr(self, "supports_step_with_scale", False):
- # NOTE(msb) optimizer divides by scale factor
- self.wrapped_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
- else:
- self._unscale_grads()
- self.wrapped_optimizer.step(closure, groups=groups)
-
- if self.scaler is not None:
- self.scaler.update()
-
- def zero_grad(self):
- """Clears the gradients of all optimized parameters."""
- self.wrapped_optimizer.zero_grad()
- if self.scaler is not None:
- self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
- else:
- self._multiply_factor = 1.0
-
- @property
- def supports_flat_params(self):
- return self.wrapped_optimizer.supports_flat_params
-
-
-class MemoryEfficientFP16Optimizer(
- _MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer
-):
- """
- Wrap an *optimizer* to support FP16 (mixed precision) training.
-
- Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
- maintain an FP32 copy of the model. We instead expect the optimizer to
- convert the gradients to FP32 internally and sync the results back to the
- FP16 model params. This significantly reduces memory usage but slightly
- increases the time spent in the optimizer.
-
- Since this wrapper depends on specific functionality in the wrapped
- optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
- optimizers can be wrapped. This is determined by the
- *supports_memory_efficient_fp16* property.
- """
-
- def __init__(
- self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs
- ):
- if not allow_unsupported and not optimizer.supports_memory_efficient_fp16:
- raise ValueError(
- "Unsupported optimizer: {}".format(optimizer.__class__.__name__)
- )
-
- super().__init__(getattr(cfg, "optimizer", None))
- self.wrapped_optimizer = optimizer
-
- if getattr(cfg.common, "fp16_scale_window", None) is None:
- if len(cfg.optimization.update_freq) > 1:
- raise ValueError(
- "--fp16-scale-window must be given explicitly when using a "
- "custom --update-freq schedule"
- )
- data_parallel_size = int(
- cfg.distributed_training.distributed_world_size
- / cfg.common.model_parallel_size
- )
- scale_window = int(
- 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0]
- )
- else:
- scale_window = cfg.common.fp16_scale_window
-
- if not getattr(cfg.common, "bf16", False):
- self.scaler = DynamicLossScaler(
- init_scale=cfg.common.fp16_init_scale,
- scale_window=scale_window,
- tolerance=cfg.common.fp16_scale_tolerance,
- threshold=cfg.common.threshold_loss_scale,
- min_loss_scale=cfg.common.min_loss_scale,
- )
- else:
- # disable loss scaling for bfloat16
- self.scaler = None
-
- @classmethod
- def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
- """
- Args:
- args (argparse.Namespace): fairseq args
- params (iterable): iterable of parameters to optimize
- """
- fp16_optimizer = optim.build_optimizer(cfg.optimizer, params)
- return cls(cfg, params, fp16_optimizer, **kwargs)
-
- @property
- def optimizer(self):
- return self.wrapped_optimizer.optimizer
-
- @optimizer.setter
- def optimizer(self, optimizer):
- self.wrapped_optimizer.optimizer = optimizer
-
- @property
- def optimizer_config(self):
- return self.wrapped_optimizer.optimizer_config
-
- @property
- def lr_scheduler(self):
- return getattr(self.wrapped_optimizer, "lr_scheduler", None)
-
- def get_lr(self):
- return self.wrapped_optimizer.get_lr()
-
- def set_lr(self, lr):
- self.wrapped_optimizer.set_lr(lr)
-
- def all_reduce_grads(self, module):
- self.wrapped_optimizer.all_reduce_grads(module)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/positional_embedding.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/positional_embedding.py
deleted file mode 100644
index 8e94e35edb46bf9dea911fe74577d8ecbe9b5ff1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/positional_embedding.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch.nn as nn
-
-from .learned_positional_embedding import LearnedPositionalEmbedding
-from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
-
-
-def PositionalEmbedding(
- num_embeddings: int,
- embedding_dim: int,
- padding_idx: int,
- learned: bool = False,
-):
- if learned:
- # if padding_idx is specified then offset the embedding ids by
- # this index and adjust num_embeddings appropriately
- # TODO: The right place for this offset would be inside
- # LearnedPositionalEmbedding. Move this there for a cleaner implementation.
- if padding_idx is not None:
- num_embeddings = num_embeddings + padding_idx + 1
- m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
- if padding_idx is not None:
- nn.init.constant_(m.weight[padding_idx], 0)
- else:
- m = SinusoidalPositionalEmbedding(
- embedding_dim,
- padding_idx,
- init_size=num_embeddings + padding_idx + 1,
- )
- return m
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/fairseq_optimizer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/fairseq_optimizer.py
deleted file mode 100644
index 7e5411753a2ba94f3a7a68316131530b8b17d22a..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/fairseq_optimizer.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-from fairseq import utils
-from fairseq.dataclass.utils import gen_parser_from_dataclass
-
-
-class FairseqOptimizer(object):
- def __init__(self, cfg):
- super().__init__()
- self.cfg = cfg
-
- @classmethod
- def add_args(cls, parser):
- """Add optimizer-specific arguments to the parser."""
- dc = getattr(cls, "__dataclass", None)
- if dc is not None:
- gen_parser_from_dataclass(parser, dc())
-
- @property
- def optimizer(self):
- """Return a torch.optim.optimizer.Optimizer instance."""
- if not hasattr(self, "_optimizer"):
- raise NotImplementedError
- if not isinstance(self._optimizer, torch.optim.Optimizer):
- raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
- return self._optimizer
-
- @optimizer.setter
- def optimizer(self, optimizer):
- """Reset optimizer instance."""
- if not hasattr(self, "_optimizer"):
- raise NotImplementedError
- if not isinstance(self._optimizer, torch.optim.Optimizer):
- raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
- self._optimizer = optimizer
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- """
- raise NotImplementedError
-
- @property
- def params(self):
- """Return an iterable of the parameters held by the optimizer."""
- for param_group in self.param_groups:
- for p in param_group["params"]:
- yield p
-
- @property
- def param_groups(self):
- return self.optimizer.param_groups
-
- def __getstate__(self):
- return self._optimizer.__getstate__()
-
- def get_lr(self):
- """Return the current learning rate."""
- return self.param_groups[0]["lr"]
-
- def set_lr(self, lr):
- """Set the learning rate."""
- for param_group in self.param_groups:
- param_group["lr"] = lr
-
- def state_dict(self):
- """Return the optimizer's state dict."""
- return self.optimizer.state_dict()
-
- def load_state_dict(self, state_dict, optimizer_overrides=None):
- """Load an optimizer state dict.
-
- In general we should prefer the configuration of the existing optimizer
- instance (e.g., learning rate) over that found in the state_dict. This
- allows us to resume training from a checkpoint using a new set of
- optimizer args.
- """
- self.optimizer.load_state_dict(state_dict)
-
- if optimizer_overrides is not None and len(optimizer_overrides) > 0:
- # override learning rate, momentum, etc. with latest values
- for group in self.param_groups:
- group.update(optimizer_overrides)
-
- def backward(self, loss):
- """Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
- loss.backward()
-
- def all_reduce_grads(self, module):
- """Manually all-reduce gradients (if required)."""
- if hasattr(module, "all_reduce_grads"):
- module.all_reduce_grads()
-
- def multiply_grads(self, c):
- """Multiplies grads by a constant *c*."""
- for p in self.params:
- if p.grad is not None:
- if torch.is_tensor(c):
- c = c.to(p.grad.device)
- p.grad.data.mul_(c)
-
- def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
- """Clips gradient norm."""
- return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
-
- def step(self, closure=None, scale=1.0, groups=None):
- """Performs a single optimization step."""
- if self.supports_step_with_scale:
- if self.supports_groups:
- self.optimizer.step(closure, scale=scale, groups=groups)
- else:
- self.optimizer.step(closure, scale=scale)
- else:
- if scale != 1.0:
- self.multiply_grads(1.0 / scale)
- if self.supports_groups:
- self.optimizer.step(closure, groups=groups)
- else:
- self.optimizer.step(closure)
-
- def zero_grad(self):
- """Clears the gradients of all optimized parameters."""
- for p in self.params:
- p.grad = None
- self.optimizer.zero_grad()
-
- @property
- def supports_memory_efficient_fp16(self):
- if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
- return self.optimizer.supports_memory_efficient_fp16
- return False
-
- @property
- def supports_step_with_scale(self):
- if hasattr(self.optimizer, "supports_step_with_scale"):
- return self.optimizer.supports_step_with_scale
- return False
-
- @property
- def supports_groups(self):
- if hasattr(self.optimizer, "supports_groups"):
- return self.optimizer.supports_groups
- return False
-
- @property
- def supports_flat_params(self):
- """
- Whether the optimizer supports collapsing of the model
- parameters/gradients into a single contiguous Tensor.
- """
- if hasattr(self.optimizer, "supports_flat_params"):
- return self.optimizer.supports_flat_params
- return False
-
- def average_params(self):
- pass
-
- def broadcast_global_state_dict(self, state_dict):
- """
- Broadcasts a global state dict to all ranks.
- Useful for optimizers that shard state between ranks.
- """
- if hasattr(self.optimizer, "broadcast_global_state_dict"):
- return self.optimizer.broadcast_global_state_dict(state_dict)
- else:
- return state_dict
-
-
-class LegacyFairseqOptimizer(FairseqOptimizer):
- def __init__(self, args):
- self.args = args
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_text_joint_to_text/scripts/g2p_encode.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_text_joint_to_text/scripts/g2p_encode.py
deleted file mode 100644
index 9db779396f492e3f71b08d7b895beb81d8e46bc9..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_text_joint_to_text/scripts/g2p_encode.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import itertools
-import logging
-import re
-import time
-
-from g2p_en import G2p
-
-logger = logging.getLogger(__name__)
-
-FAIL_SENT = "FAILED_SENTENCE"
-
-
-def parse():
- parser = argparse.ArgumentParser()
- parser.add_argument("--data-path", type=str, required=True)
- parser.add_argument("--out-path", type=str, required=True)
- parser.add_argument("--lower-case", action="store_true")
- parser.add_argument("--do-filter", action="store_true")
- parser.add_argument("--use-word-start", action="store_true")
- parser.add_argument("--dup-vowel", default=1, type=int)
- parser.add_argument("--dup-consonant", default=1, type=int)
- parser.add_argument("--no-punc", action="store_true")
- parser.add_argument("--reserve-word", type=str, default="")
- parser.add_argument(
- "--reserve-first-column",
- action="store_true",
- help="first column is sentence id",
- )
- ###
- parser.add_argument("--parallel-process-num", default=1, type=int)
- parser.add_argument("--logdir", default="")
- args = parser.parse_args()
- return args
-
-
-def process_sent(sent, g2p, res_wrds, args):
- sents = pre_process_sent(sent, args.do_filter, args.lower_case, res_wrds)
- pho_seqs = [do_g2p(g2p, s, res_wrds, i == 0) for i, s in enumerate(sents)]
- pho_seq = (
- [FAIL_SENT]
- if [FAIL_SENT] in pho_seqs
- else list(itertools.chain.from_iterable(pho_seqs))
- )
- if args.no_punc:
- pho_seq = remove_punc(pho_seq)
- if args.dup_vowel > 1 or args.dup_consonant > 1:
- pho_seq = dup_pho(pho_seq, args.dup_vowel, args.dup_consonant)
- if args.use_word_start:
- pho_seq = add_word_start(pho_seq)
- return " ".join(pho_seq)
-
-
-def remove_punc(sent):
- ns = []
- regex = re.compile("[^a-zA-Z0-9 ]")
- for p in sent:
- if (not regex.search(p)) or p == FAIL_SENT:
- if p == " " and (len(ns) == 0 or ns[-1] == " "):
- continue
- ns.append(p)
- return ns
-
-
-def do_g2p(g2p, sent, res_wrds, is_first_sent):
- if sent in res_wrds:
- pho_seq = [res_wrds[sent]]
- else:
- pho_seq = g2p(sent)
- if not is_first_sent:
- pho_seq = [" "] + pho_seq # add space to separate
- return pho_seq
-
-
-def pre_process_sent(sent, do_filter, lower_case, res_wrds):
- if do_filter:
- sent = re.sub("-", " ", sent)
- sent = re.sub("—", " ", sent)
- if len(res_wrds) > 0:
- wrds = sent.split()
- wrds = ["SPLIT_ME " + w + " SPLIT_ME" if w in res_wrds else w for w in wrds]
- sents = [x.strip() for x in " ".join(wrds).split("SPLIT_ME") if x.strip() != ""]
- else:
- sents = [sent]
- if lower_case:
- sents = [s.lower() if s not in res_wrds else s for s in sents]
- return sents
-
-
-def dup_pho(sent, dup_v_num, dup_c_num):
- """
- duplicate phoneme defined as cmudict
- http://www.speech.cs.cmu.edu/cgi-bin/cmudict
- """
- if dup_v_num == 1 and dup_c_num == 1:
- return sent
- ns = []
- for p in sent:
- ns.append(p)
- if re.search(r"\d$", p):
- for i in range(1, dup_v_num):
- ns.append(f"{p}-{i}P")
- elif re.search(r"\w", p):
- for i in range(1, dup_c_num):
- ns.append(f"{p}-{i}P")
- return ns
-
-
-def add_word_start(sent):
- ns = []
- do_add = True
- ws = "▁"
- for p in sent:
- if do_add:
- p = ws + p
- do_add = False
- if p == " ":
- do_add = True
- else:
- ns.append(p)
- return ns
-
-
-def load_reserve_word(reserve_word):
- if reserve_word == "":
- return []
- with open(reserve_word, "r") as fp:
- res_wrds = [x.strip().split() for x in fp.readlines() if x.strip() != ""]
- assert sum([0 if len(x) == 2 else 1 for x in res_wrds]) == 0
- res_wrds = dict(res_wrds)
- return res_wrds
-
-
-def process_sents(sents, args):
- g2p = G2p()
- out_sents = []
- res_wrds = load_reserve_word(args.reserve_word)
- for sent in sents:
- col1 = ""
- if args.reserve_first_column:
- col1, sent = sent.split(None, 1)
- sent = process_sent(sent, g2p, res_wrds, args)
- if args.reserve_first_column and col1 != "":
- sent = f"{col1} {sent}"
- out_sents.append(sent)
- return out_sents
-
-
-def main():
- args = parse()
- out_sents = []
- with open(args.data_path, "r") as fp:
- sent_list = [x.strip() for x in fp.readlines()]
- if args.parallel_process_num > 1:
- try:
- import submitit
- except ImportError:
- logger.warn(
- "submitit is not found and only one job is used to process the data"
- )
- submitit = None
-
- if args.parallel_process_num == 1 or submitit is None:
- out_sents = process_sents(sent_list, args)
- else:
- # process sentences with parallel computation
- lsize = len(sent_list) // args.parallel_process_num + 1
- executor = submitit.AutoExecutor(folder=args.logdir)
- executor.update_parameters(timeout_min=1000, cpus_per_task=4)
- jobs = []
- for i in range(args.parallel_process_num):
- job = executor.submit(
- process_sents, sent_list[lsize * i : lsize * (i + 1)], args
- )
- jobs.append(job)
- is_running = True
- while is_running:
- time.sleep(5)
- is_running = sum([job.done() for job in jobs]) < len(jobs)
- out_sents = list(itertools.chain.from_iterable([job.result() for job in jobs]))
- with open(args.out_path, "w") as fp:
- fp.write("\n".join(out_sents) + "\n")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/download.py b/spaces/Olivier-Truong/faster-whisper-webui-v2/src/download.py
deleted file mode 100644
index 20565153f9e582be73246a1e2a3b7be3f368b322..0000000000000000000000000000000000000000
--- a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/download.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from tempfile import mkdtemp
-from typing import List
-from yt_dlp import YoutubeDL
-
-import yt_dlp
-from yt_dlp.postprocessor import PostProcessor
-
-class FilenameCollectorPP(PostProcessor):
- def __init__(self):
- super(FilenameCollectorPP, self).__init__(None)
- self.filenames = []
-
- def run(self, information):
- self.filenames.append(information["filepath"])
- return [], information
-
-def download_url(url: str, maxDuration: int = None, destinationDirectory: str = None, playlistItems: str = "1") -> List[str]:
- try:
- return _perform_download(url, maxDuration=maxDuration, outputTemplate=None, destinationDirectory=destinationDirectory, playlistItems=playlistItems)
- except yt_dlp.utils.DownloadError as e:
- # In case of an OS error, try again with a different output template
- if e.msg and e.msg.find("[Errno 36] File name too long") >= 0:
- return _perform_download(url, maxDuration=maxDuration, outputTemplate="%(title).10s %(id)s.%(ext)s")
- pass
-
-def _perform_download(url: str, maxDuration: int = None, outputTemplate: str = None, destinationDirectory: str = None, playlistItems: str = "1"):
- # Create a temporary directory to store the downloaded files
- if destinationDirectory is None:
- destinationDirectory = mkdtemp()
-
- ydl_opts = {
- "format": "bestaudio/best",
- 'paths': {
- 'home': destinationDirectory
- }
- }
- if (playlistItems):
- ydl_opts['playlist_items'] = playlistItems
-
- # Add output template if specified
- if outputTemplate:
- ydl_opts['outtmpl'] = outputTemplate
-
- filename_collector = FilenameCollectorPP()
-
- with YoutubeDL(ydl_opts) as ydl:
- if maxDuration and maxDuration > 0:
- info = ydl.extract_info(url, download=False)
- entries = "entries" in info and info["entries"] or [info]
-
- total_duration = 0
-
- # Compute total duration
- for entry in entries:
- total_duration += float(entry["duration"])
-
- if total_duration >= maxDuration:
- raise ExceededMaximumDuration(videoDuration=total_duration, maxDuration=maxDuration, message="Video is too long")
-
- ydl.add_post_processor(filename_collector)
- ydl.download([url])
-
- if len(filename_collector.filenames) <= 0:
- raise Exception("Cannot download " + url)
-
- result = []
-
- for filename in filename_collector.filenames:
- result.append(filename)
- print("Downloaded " + filename)
-
- return result
-
-class ExceededMaximumDuration(Exception):
- def __init__(self, videoDuration, maxDuration, message):
- self.videoDuration = videoDuration
- self.maxDuration = maxDuration
- super().__init__(message)
\ No newline at end of file
diff --git a/spaces/Omnibus/MusicGen/audiocraft/data/audio.py b/spaces/Omnibus/MusicGen/audiocraft/data/audio.py
deleted file mode 100644
index 2048df6f175d7303bcf5c7b931922fd297908ead..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/MusicGen/audiocraft/data/audio.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Audio IO methods are defined in this module (info, read, write),
-We rely on av library for faster read when possible, otherwise on torchaudio.
-"""
-
-from dataclasses import dataclass
-from pathlib import Path
-import logging
-import typing as tp
-
-import numpy as np
-import soundfile
-import torch
-from torch.nn import functional as F
-import torchaudio as ta
-
-import av
-
-from .audio_utils import f32_pcm, i16_pcm, normalize_audio
-
-
-_av_initialized = False
-
-
-def _init_av():
- global _av_initialized
- if _av_initialized:
- return
- logger = logging.getLogger('libav.mp3')
- logger.setLevel(logging.ERROR)
- _av_initialized = True
-
-
-@dataclass(frozen=True)
-class AudioFileInfo:
- sample_rate: int
- duration: float
- channels: int
-
-
-def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- _init_av()
- with av.open(str(filepath)) as af:
- stream = af.streams.audio[0]
- sample_rate = stream.codec_context.sample_rate
- duration = float(stream.duration * stream.time_base)
- channels = stream.channels
- return AudioFileInfo(sample_rate, duration, channels)
-
-
-def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- info = soundfile.info(filepath)
- return AudioFileInfo(info.samplerate, info.duration, info.channels)
-
-
-def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- # torchaudio no longer returns useful duration informations for some formats like mp3s.
- filepath = Path(filepath)
- if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info
- # ffmpeg has some weird issue with flac.
- return _soundfile_info(filepath)
- else:
- return _av_info(filepath)
-
-
-def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]:
- """FFMPEG-based audio file reading using PyAV bindings.
- Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
-
- Args:
- filepath (str or Path): Path to audio file to read.
- seek_time (float): Time at which to start reading in the file.
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
- Returns:
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate
- """
- _init_av()
- with av.open(str(filepath)) as af:
- stream = af.streams.audio[0]
- sr = stream.codec_context.sample_rate
- num_frames = int(sr * duration) if duration >= 0 else -1
- frame_offset = int(sr * seek_time)
- # we need a small negative offset otherwise we get some edge artifact
- # from the mp3 decoder.
- af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream)
- frames = []
- length = 0
- for frame in af.decode(streams=stream.index):
- current_offset = int(frame.rate * frame.pts * frame.time_base)
- strip = max(0, frame_offset - current_offset)
- buf = torch.from_numpy(frame.to_ndarray())
- if buf.shape[0] != stream.channels:
- buf = buf.view(-1, stream.channels).t()
- buf = buf[:, strip:]
- frames.append(buf)
- length += buf.shape[1]
- if num_frames > 0 and length >= num_frames:
- break
- assert frames
- # If the above assert fails, it is likely because we seeked past the end of file point,
- # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp.
- # This will need proper debugging, in due time.
- wav = torch.cat(frames, dim=1)
- assert wav.shape[0] == stream.channels
- if num_frames > 0:
- wav = wav[:, :num_frames]
- return f32_pcm(wav), sr
-
-
-def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,
- duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:
- """Read audio by picking the most appropriate backend tool based on the audio format.
-
- Args:
- filepath (str or Path): Path to audio file to read.
- seek_time (float): Time at which to start reading in the file.
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
- pad (bool): Pad output audio if not reaching expected duration.
- Returns:
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate.
- """
- fp = Path(filepath)
- if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg
- # There is some bug with ffmpeg and reading flac
- info = _soundfile_info(filepath)
- frames = -1 if duration <= 0 else int(duration * info.sample_rate)
- frame_offset = int(seek_time * info.sample_rate)
- wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)
- assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}"
- wav = torch.from_numpy(wav).t().contiguous()
- if len(wav.shape) == 1:
- wav = torch.unsqueeze(wav, 0)
- elif (
- fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats()
- and duration <= 0 and seek_time == 0
- ):
- # Torchaudio is faster if we load an entire file at once.
- wav, sr = ta.load(fp)
- else:
- wav, sr = _av_read(filepath, seek_time, duration)
- if pad and duration > 0:
- expected_frames = int(duration * sr)
- wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))
- return wav, sr
-
-
-def audio_write(stem_name: tp.Union[str, Path],
- wav: torch.Tensor, sample_rate: int,
- format: str = 'wav', mp3_rate: int = 320, normalize: bool = True,
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
- loudness_compressor: bool = False,
- log_clipping: bool = True, make_parent_dir: bool = True,
- add_suffix: bool = True) -> Path:
- """Convenience function for saving audio to disk. Returns the filename the audio was written to.
-
- Args:
- stem_name (str or Path): Filename without extension which will be added automatically.
- format (str): Either "wav" or "mp3".
- mp3_rate (int): kbps when using mp3s.
- normalize (bool): if `True` (default), normalizes according to the prescribed
- strategy (see after). If `False`, the strategy is only used in case clipping
- would happen.
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
- with extra headroom to avoid clipping. 'clip' just clips.
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
- than the `peak_clip` one to avoid further clipping.
- loudness_headroom_db (float): Target loudness for loudness normalization.
- loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.
- when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still
- occurs despite strategy (only for 'rms').
- make_parent_dir (bool): Make parent directory if it doesn't exist.
- Returns:
- Path: Path of the saved audio.
- """
- assert wav.dtype.is_floating_point, "wav is not floating point"
- if wav.dim() == 1:
- wav = wav[None]
- elif wav.dim() > 2:
- raise ValueError("Input wav should be at most 2 dimension.")
- assert wav.isfinite().all()
- wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,
- rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping,
- sample_rate=sample_rate, stem_name=str(stem_name))
- kwargs: dict = {}
- if format == 'mp3':
- suffix = '.mp3'
- kwargs.update({"compression": mp3_rate})
- elif format == 'wav':
- wav = i16_pcm(wav)
- suffix = '.wav'
- kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16})
- else:
- raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.")
- if not add_suffix:
- suffix = ''
- path = Path(str(stem_name) + suffix)
- if make_parent_dir:
- path.parent.mkdir(exist_ok=True, parents=True)
- try:
- ta.save(path, wav, sample_rate, **kwargs)
- except Exception:
- if path.exists():
- # we do not want to leave half written files around.
- path.unlink()
- raise
- return path
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/transformer_layers.py b/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/transformer_layers.py
deleted file mode 100644
index 7b53429a5168de69cb581a2016c00f3560da0e1f..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/tools/transformer_layers.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# -*- coding: utf-8 -*-
-import math
-import torch
-import torch.nn as nn
-from torch import Tensor
-
-# Took from https://github.com/joeynmt/joeynmt/blob/fb66afcbe1beef9acd59283bcc084c4d4c1e6343/joeynmt/transformer_layers.py
-
-
-# pylint: disable=arguments-differ
-class MultiHeadedAttention(nn.Module):
- """
- Multi-Head Attention module from "Attention is All You Need"
-
- Implementation modified from OpenNMT-py.
- https://github.com/OpenNMT/OpenNMT-py
- """
-
- def __init__(self, num_heads: int, size: int, dropout: float = 0.1):
- """
- Create a multi-headed attention layer.
- :param num_heads: the number of heads
- :param size: model size (must be divisible by num_heads)
- :param dropout: probability of dropping a unit
- """
- super().__init__()
-
- assert size % num_heads == 0
-
- self.head_size = head_size = size // num_heads
- self.model_size = size
- self.num_heads = num_heads
-
- self.k_layer = nn.Linear(size, num_heads * head_size)
- self.v_layer = nn.Linear(size, num_heads * head_size)
- self.q_layer = nn.Linear(size, num_heads * head_size)
-
- self.output_layer = nn.Linear(size, size)
- self.softmax = nn.Softmax(dim=-1)
- self.dropout = nn.Dropout(dropout)
-
- def forward(self, k: Tensor, v: Tensor, q: Tensor, mask: Tensor = None):
- """
- Computes multi-headed attention.
-
- :param k: keys [B, M, D] with M being the sentence length.
- :param v: values [B, M, D]
- :param q: query [B, M, D]
- :param mask: optional mask [B, 1, M] or [B, M, M]
- :return:
- """
- batch_size = k.size(0)
- num_heads = self.num_heads
-
- # project the queries (q), keys (k), and values (v)
- k = self.k_layer(k)
- v = self.v_layer(v)
- q = self.q_layer(q)
-
- # reshape q, k, v for our computation to [batch_size, num_heads, ..]
- k = k.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
- v = v.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
- q = q.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
-
- # compute scores
- q = q / math.sqrt(self.head_size)
-
- # batch x num_heads x query_len x key_len
- scores = torch.matmul(q, k.transpose(2, 3))
- # torch.Size([48, 8, 183, 183])
-
- # apply the mask (if we have one)
- # we add a dimension for the heads to it below: [B, 1, 1, M]
- if mask is not None:
- scores = scores.masked_fill(~mask.unsqueeze(1), float('-inf'))
-
- # apply attention dropout and compute context vectors.
- attention = self.softmax(scores)
- attention = self.dropout(attention)
- # torch.Size([48, 8, 183, 183]) [bs, nheads, time, time] (for decoding)
-
- # v: torch.Size([48, 8, 183, 32]) (32 is 256/8)
- # get context vector (select values with attention) and reshape
- # back to [B, M, D]
- context = torch.matmul(attention, v) # torch.Size([48, 8, 183, 32])
- context = context.transpose(1, 2).contiguous().view(
- batch_size, -1, num_heads * self.head_size)
- # torch.Size([48, 183, 256]) put back to 256 (combine the heads)
-
- output = self.output_layer(context)
- # torch.Size([48, 183, 256]): 1 output per time step
-
- return output
-
-
-# pylint: disable=arguments-differ
-class PositionwiseFeedForward(nn.Module):
- """
- Position-wise Feed-forward layer
- Projects to ff_size and then back down to input_size.
- """
-
- def __init__(self, input_size, ff_size, dropout=0.1):
- """
- Initializes position-wise feed-forward layer.
- :param input_size: dimensionality of the input.
- :param ff_size: dimensionality of intermediate representation
- :param dropout:
- """
- super().__init__()
- self.layer_norm = nn.LayerNorm(input_size, eps=1e-6)
- self.pwff_layer = nn.Sequential(
- nn.Linear(input_size, ff_size),
- nn.ReLU(),
- nn.Dropout(dropout),
- nn.Linear(ff_size, input_size),
- nn.Dropout(dropout),
- )
-
- def forward(self, x):
- x_norm = self.layer_norm(x)
- return self.pwff_layer(x_norm) + x
-
-
-# pylint: disable=arguments-differ
-class PositionalEncoding(nn.Module):
- """
- Pre-compute position encodings (PE).
- In forward pass, this adds the position-encodings to the
- input for as many time steps as necessary.
-
- Implementation based on OpenNMT-py.
- https://github.com/OpenNMT/OpenNMT-py
- """
-
- def __init__(self, size: int = 0, max_len: int = 5000):
- """
- Positional Encoding with maximum length max_len
- :param size:
- :param max_len:
- :param dropout:
- """
- if size % 2 != 0:
- raise ValueError("Cannot use sin/cos positional encoding with "
- "odd dim (got dim={:d})".format(size))
- pe = torch.zeros(max_len, size)
- position = torch.arange(0, max_len).unsqueeze(1)
- div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) *
- -(math.log(10000.0) / size)))
- pe[:, 0::2] = torch.sin(position.float() * div_term)
- pe[:, 1::2] = torch.cos(position.float() * div_term)
- pe = pe.unsqueeze(0) # shape: [1, size, max_len]
- super().__init__()
- self.register_buffer('pe', pe)
- self.dim = size
-
- def forward(self, emb):
- """Embed inputs.
- Args:
- emb (FloatTensor): Sequence of word vectors
- ``(seq_len, batch_size, self.dim)``
- """
- # Add position encodings
- return emb + self.pe[:, :emb.size(1)]
-
-
-class TransformerEncoderLayer(nn.Module):
- """
- One Transformer encoder layer has a Multi-head attention layer plus
- a position-wise feed-forward layer.
- """
-
- def __init__(self,
- size: int = 0,
- ff_size: int = 0,
- num_heads: int = 0,
- dropout: float = 0.1):
- """
- A single Transformer layer.
- :param size:
- :param ff_size:
- :param num_heads:
- :param dropout:
- """
- super().__init__()
-
- self.layer_norm = nn.LayerNorm(size, eps=1e-6)
- self.src_src_att = MultiHeadedAttention(num_heads,
- size,
- dropout=dropout)
- self.feed_forward = PositionwiseFeedForward(size,
- ff_size=ff_size,
- dropout=dropout)
- self.dropout = nn.Dropout(dropout)
- self.size = size
-
- # pylint: disable=arguments-differ
- def forward(self, x: Tensor, mask: Tensor) -> Tensor:
- """
- Forward pass for a single transformer encoder layer.
- First applies layer norm, then self attention,
- then dropout with residual connection (adding the input to the result),
- and then a position-wise feed-forward layer.
-
- :param x: layer input
- :param mask: input mask
- :return: output tensor
- """
- x_norm = self.layer_norm(x)
- h = self.src_src_att(x_norm, x_norm, x_norm, mask)
- h = self.dropout(h) + x
- o = self.feed_forward(h)
- return o
-
-
-class TransformerDecoderLayer(nn.Module):
- """
- Transformer decoder layer.
-
- Consists of self-attention, source-attention, and feed-forward.
- """
-
- def __init__(self,
- size: int = 0,
- ff_size: int = 0,
- num_heads: int = 0,
- dropout: float = 0.1):
- """
- Represents a single Transformer decoder layer.
-
- It attends to the source representation and the previous decoder states.
-
- :param size: model dimensionality
- :param ff_size: size of the feed-forward intermediate layer
- :param num_heads: number of heads
- :param dropout: dropout to apply to input
- """
- super().__init__()
- self.size = size
-
- self.trg_trg_att = MultiHeadedAttention(num_heads,
- size,
- dropout=dropout)
- self.src_trg_att = MultiHeadedAttention(num_heads,
- size,
- dropout=dropout)
-
- self.feed_forward = PositionwiseFeedForward(size,
- ff_size=ff_size,
- dropout=dropout)
-
- self.x_layer_norm = nn.LayerNorm(size, eps=1e-6)
- self.dec_layer_norm = nn.LayerNorm(size, eps=1e-6)
-
- self.dropout = nn.Dropout(dropout)
-
- # pylint: disable=arguments-differ
- def forward(self,
- x: Tensor = None,
- memory: Tensor = None,
- src_mask: Tensor = None,
- trg_mask: Tensor = None) -> Tensor:
- """
- Forward pass of a single Transformer decoder layer.
-
- :param x: inputs
- :param memory: source representations
- :param src_mask: source mask
- :param trg_mask: target mask (so as to not condition on future steps)
- :return: output tensor
- """
- # decoder/target self-attention
- x_norm = self.x_layer_norm(x) # torch.Size([48, 183, 256])
- h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)
- h1 = self.dropout(h1) + x
-
- # source-target attention
- h1_norm = self.dec_layer_norm(
- h1) # torch.Size([48, 183, 256]) (same for memory)
- h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)
-
- # final position-wise feed-forward layer
- o = self.feed_forward(self.dropout(h2) + h1)
-
- return o
diff --git a/spaces/ParityError/LimeFace/theme_dropdown.py b/spaces/ParityError/LimeFace/theme_dropdown.py
deleted file mode 100644
index 6235388fd00549553df44028f3ccf03e946994ea..0000000000000000000000000000000000000000
--- a/spaces/ParityError/LimeFace/theme_dropdown.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-import pathlib
-
-from gradio.themes.utils import ThemeAsset
-
-
-def create_theme_dropdown():
- import gradio as gr
-
- asset_path = pathlib.Path(__file__).parent / "themes"
- themes = []
- for theme_asset in os.listdir(str(asset_path)):
- themes.append(
- (ThemeAsset(theme_asset), gr.Theme.load(str(asset_path / theme_asset)))
- )
-
- def make_else_if(theme_asset):
- return f"""
- else if (theme == '{str(theme_asset[0].version)}') {{
- var theme_css = `{theme_asset[1]._get_theme_css()}`
- }}"""
-
- head, tail = themes[0], themes[1:]
- if_statement = f"""
- if (theme == "{str(head[0].version)}") {{
- var theme_css = `{head[1]._get_theme_css()}`
- }} {" ".join(make_else_if(t) for t in tail)}
- """
-
- latest_to_oldest = sorted([t[0] for t in themes], key=lambda asset: asset.version)[
- ::-1
- ]
- latest_to_oldest = [str(t.version) for t in latest_to_oldest]
-
- component = gr.Dropdown(
- choices=latest_to_oldest,
- value=latest_to_oldest[0],
- render=False,
- label="Select Version",
- ).style(container=False)
-
- return (
- component,
- f"""
- (theme) => {{
- if (!document.querySelector('.theme-css')) {{
- var theme_elem = document.createElement('style');
- theme_elem.classList.add('theme-css');
- document.head.appendChild(theme_elem);
- }} else {{
- var theme_elem = document.querySelector('.theme-css');
- }}
- {if_statement}
- theme_elem.innerHTML = theme_css;
- }}
- """,
- )
diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/commands/google_search.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/commands/google_search.py
deleted file mode 100644
index 7d38ce7568d2de207d521b077cfebd72527c9795..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/autogpt/commands/google_search.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""Google search command for Autogpt."""
-from __future__ import annotations
-
-import json
-
-from duckduckgo_search import ddg
-
-from autogpt.config import Config
-
-CFG = Config()
-
-
-def google_search(query: str, num_results: int = 8) -> str:
- """Return the results of a Google search
-
- Args:
- query (str): The search query.
- num_results (int): The number of results to return.
-
- Returns:
- str: The results of the search.
- """
- search_results = []
- if not query:
- return json.dumps(search_results)
-
- results = ddg(query, max_results=num_results)
- if not results:
- return json.dumps(search_results)
-
- for j in results:
- search_results.append(j)
-
- return json.dumps(search_results, ensure_ascii=False, indent=4)
-
-
-def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
- """Return the results of a Google search using the official Google API
-
- Args:
- query (str): The search query.
- num_results (int): The number of results to return.
-
- Returns:
- str: The results of the search.
- """
-
- from googleapiclient.discovery import build
- from googleapiclient.errors import HttpError
-
- try:
- # Get the Google API key and Custom Search Engine ID from the config file
- api_key = CFG.google_api_key
- custom_search_engine_id = CFG.custom_search_engine_id
-
- # Initialize the Custom Search API service
- service = build("customsearch", "v1", developerKey=api_key)
-
- # Send the search query and retrieve the results
- result = (
- service.cse()
- .list(q=query, cx=custom_search_engine_id, num=num_results)
- .execute()
- )
-
- # Extract the search result items from the response
- search_results = result.get("items", [])
-
- # Create a list of only the URLs from the search results
- search_results_links = [item["link"] for item in search_results]
-
- except HttpError as e:
- # Handle errors in the API call
- error_details = json.loads(e.content.decode())
-
- # Check if the error is related to an invalid or missing API key
- if error_details.get("error", {}).get(
- "code"
- ) == 403 and "invalid API key" in error_details.get("error", {}).get(
- "message", ""
- ):
- return "Error: The provided Google API key is invalid or missing."
- else:
- return f"Error: {e}"
-
- # Return the list of search result URLs
- return search_results_links
diff --git a/spaces/Pie31415/control-animation/annotator/midas/midas/base_model.py b/spaces/Pie31415/control-animation/annotator/midas/midas/base_model.py
deleted file mode 100644
index 5cf430239b47ec5ec07531263f26f5c24a2311cd..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/midas/midas/base_model.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import torch
-
-
-class BaseModel(torch.nn.Module):
- def load(self, path):
- """Load model from file.
-
- Args:
- path (str): file path
- """
- parameters = torch.load(path, map_location=torch.device('cpu'))
-
- if "optimizer" in parameters:
- parameters = parameters["model"]
-
- self.load_state_dict(parameters)
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/dataset_wrappers.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/dataset_wrappers.py
deleted file mode 100644
index d6a5e957ec3b44465432617cf6e8f0b86a8a5efa..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/datasets/dataset_wrappers.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
-
-from .builder import DATASETS
-
-
-@DATASETS.register_module()
-class ConcatDataset(_ConcatDataset):
- """A wrapper of concatenated dataset.
-
- Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
- concat the group flag for image aspect ratio.
-
- Args:
- datasets (list[:obj:`Dataset`]): A list of datasets.
- """
-
- def __init__(self, datasets):
- super(ConcatDataset, self).__init__(datasets)
- self.CLASSES = datasets[0].CLASSES
- self.PALETTE = datasets[0].PALETTE
-
-
-@DATASETS.register_module()
-class RepeatDataset(object):
- """A wrapper of repeated dataset.
-
- The length of repeated dataset will be `times` larger than the original
- dataset. This is useful when the data loading time is long but the dataset
- is small. Using RepeatDataset can reduce the data loading time between
- epochs.
-
- Args:
- dataset (:obj:`Dataset`): The dataset to be repeated.
- times (int): Repeat times.
- """
-
- def __init__(self, dataset, times):
- self.dataset = dataset
- self.times = times
- self.CLASSES = dataset.CLASSES
- self.PALETTE = dataset.PALETTE
- self._ori_len = len(self.dataset)
-
- def __getitem__(self, idx):
- """Get item from original dataset."""
- return self.dataset[idx % self._ori_len]
-
- def __len__(self):
- """The length is multiplied by ``times``"""
- return self.times * self._ori_len
diff --git a/spaces/Pie31415/control-animation/webui/app_control_animation.py b/spaces/Pie31415/control-animation/webui/app_control_animation.py
deleted file mode 100644
index 047b27b01500ec8bfbfd294fb1038431f87cbb02..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/webui/app_control_animation.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import gradio as gr
-from text_to_animation.model import ControlAnimationModel
-import os
-from utils.hf_utils import get_model_list
-
-huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME")
-on_huggingspace = huggingspace_name if huggingspace_name is not None else False
-
-examples = [["A surfer in miami walking by the beach",
- None,
- "Motion 3",
- None,
- 3,
- 0,
- None,
- None,
- None,
- None,
- None,
- None,
- 0],
- ]
-
-def on_video_path_update(evt: gr.EventData):
- return f"Selection: **{evt._data}**"
-
-def pose_gallery_callback(evt: gr.SelectData):
- return f"Motion {evt.index+1}"
-
-
-def get_frame_index(evt: gr.SelectData):
- return evt.index
-
-
-def create_demo(model: ControlAnimationModel):
- with gr.Blocks() as demo:
- with gr.Column():
- with gr.Row():
- with gr.Column():
- # TODO: update so that model_link is customizable
- # model_link = gr.Dropdown(
- # label="Model Link",
- # choices=["runwayml/stable-diffusion-v1-5"],
- # value="runwayml/stable-diffusion-v1-5",
- # )
- prompt = gr.Textbox(
- placeholder="Prompt",
- show_label=False,
- lines=2,
- info="Give a prompt for an animation you would like to generate. The prompt will be used to create the first initial frame and then the animation.",
- )
- negative_prompt = gr.Textbox(
- placeholder="Negative Prompt (optional)",
- show_label=False,
- lines=2,
- )
-
- gen_frames_button = gr.Button(
- value="Generate Initial Frames", variant="primary"
- )
-
- with gr.Accordion("Advanced options", open=False):
- if on_huggingspace:
- video_length = gr.Slider(
- label="Video length", minimum=8, maximum=16, step=1
- )
- else:
- video_length = gr.Number(
- label="Video length", value=8, precision=0
- )
-
- seed = gr.Slider(
- label="Seed",
- info="-1 for random seed on each run. Otherwise, the seed will be fixed.",
- minimum=-1,
- maximum=65536,
- value=0,
- step=1,
- )
-
- motion_field_strength_x = gr.Slider(
- label="Global Translation $\\delta_{x}$",
- minimum=-20,
- maximum=20,
- value=12,
- step=1,
- )
-
- motion_field_strength_y = gr.Slider(
- label="Global Translation $\\delta_{y}$",
- minimum=-20,
- maximum=20,
- value=12,
- step=1,
- )
-
- t0 = gr.Slider(
- label="Timestep t0",
- minimum=0,
- maximum=47,
- value=44,
- step=1,
- info="Perform DDPM steps from t0 to t1. The larger the gap between t0 and t1, the more variance between the frames. Ensure t0 < t1 ",
- )
-
- t1 = gr.Slider(
- label="Timestep t1",
- minimum=1,
- info="Perform DDPM steps from t0 to t1. The larger the gap between t0 and t1, the more variance between the frames. Ensure t0 < t1",
- maximum=48,
- value=47,
- step=1,
- )
-
- chunk_size = gr.Slider(
- label="Chunk size",
- minimum=2,
- maximum=16,
- value=8,
- step=1,
- visible=not on_huggingspace,
- info="Number of frames processed at once. Reduce for lower memory usage.",
- )
- merging_ratio = gr.Slider(
- label="Merging ratio",
- minimum=0.0,
- maximum=0.9,
- step=0.1,
- value=0.0,
- visible=not on_huggingspace,
- info="Ratio of how many tokens are merged. The higher the more compression (less memory and faster inference).",
- )
-
- with gr.Column():
- gallery_pose_sequence = gr.Gallery(
- label="Pose Sequence",
- value=[
- ("__assets__/walk_01.gif", "Motion 1"),
- ("__assets__/walk_02.gif", "Motion 2"),
- ("__assets__/walk_03.gif", "Motion 3"),
- ("__assets__/run.gif", "Motion 4"),
- ("__assets__/dance1.gif", "Motion 5"),
- ("__assets__/dance2.gif", "Motion 6"),
- ("__assets__/dance3.gif", "Motion 7"),
- ("__assets__/dance4.gif", "Motion 8"),
- ("__assets__/dance5.gif", "Motion 9"),
- ],
- ).style(grid=3, columns=3)
- input_video_path = gr.Textbox(
- label="Pose Sequence", visible=False, value="Motion 1"
- )
- pose_sequence_selector = gr.Markdown("Pose Sequence: **Motion 1**")
-
- with gr.Row():
- with gr.Column(visible=True) as frame_selection_view:
- initial_frames = gr.Gallery(
- label="Initial Frames", show_label=False
- ).style(grid=4, columns=4, rows=1, object_fit="contain", preview=True)
-
- gr.Markdown("Select an initial frame to start your animation with.")
-
- gen_animation_button = gr.Button(
- value="Select Initial Frame & Generate Animation",
- variant="secondary",
- )
-
- with gr.Column(visible=True) as animation_view:
- result = gr.Image(label="Generated Video")
-
- with gr.Box(visible=False):
- controlnet_video = gr.Video(label="ControlNet Video")
- initial_frame_index = gr.Number(
- label="Selected Initial Frame Index", value=-1, precision=0
- )
-
- input_video_path.change(on_video_path_update, None, pose_sequence_selector)
- gallery_pose_sequence.select(pose_gallery_callback, None, input_video_path)
- initial_frames.select(fn=get_frame_index, outputs=initial_frame_index)
-
- frame_inputs = [
- prompt,
- input_video_path,
- negative_prompt,
- seed,
- ]
-
- animation_inputs = [
- controlnet_video,
- prompt,
- # initial_frame_index,
- # input_video_path,
- # model_link,
- # motion_field_strength_x,
- # motion_field_strength_y,
- # t0,
- # t1,
- # negative_prompt,
- # chunk_size,
- # video_length,
- # merging_ratio,
- negative_prompt,
- seed
- ]
-
- def submit_select(initial_frame_index: int):
- if initial_frame_index != -1: # More to next step
- return {
- frame_selection_view: gr.update(visible=False),
- animation_view: gr.update(visible=True),
- }
-
- return {
- frame_selection_view: gr.update(visible=True),
- animation_view: gr.update(visible=False),
- }
-
- gen_frames_button.click(
- fn=model.generate_initial_frames,
- inputs=frame_inputs,
- outputs=[controlnet_video, initial_frames],
- )
-
- gen_animation_button.click(
- fn=submit_select,
- inputs=initial_frame_index,
- outputs=[frame_selection_view, animation_view],
- ).then(
- fn=model.generate_video_from_frame,
- inputs=animation_inputs,
- outputs=result,
- )
-
- # gr.Examples(examples=examples,
- # inputs=animation_inputs,
- # outputs=result,
- # fn=model.generate_animation,
- # cache_examples=on_huggingspace,
- # run_on_click=True,
- # )
-
- return demo
diff --git a/spaces/PrismaticAI/MangaMaker/app.py b/spaces/PrismaticAI/MangaMaker/app.py
deleted file mode 100644
index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000
--- a/spaces/PrismaticAI/MangaMaker/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
\ No newline at end of file
diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/scripts/__init__.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/scripts/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/scripts/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py
deleted file mode 100644
index 540e7a4dc79d02a820e291b57c43335d5aa25a41..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import operator
-import os
-import platform
-import sys
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-from pip._vendor.pyparsing import ( # noqa: N817
- Forward,
- Group,
- Literal as L,
- ParseException,
- ParseResults,
- QuotedString,
- ZeroOrMore,
- stringEnd,
- stringStart,
-)
-
-from .specifiers import InvalidSpecifier, Specifier
-
-__all__ = [
- "InvalidMarker",
- "UndefinedComparison",
- "UndefinedEnvironmentName",
- "Marker",
- "default_environment",
-]
-
-Operator = Callable[[str, str], bool]
-
-
-class InvalidMarker(ValueError):
- """
- An invalid marker was found, users should refer to PEP 508.
- """
-
-
-class UndefinedComparison(ValueError):
- """
- An invalid operation was attempted on a value that doesn't support it.
- """
-
-
-class UndefinedEnvironmentName(ValueError):
- """
- A name was attempted to be used that does not exist inside of the
- environment.
- """
-
-
-class Node:
- def __init__(self, value: Any) -> None:
- self.value = value
-
- def __str__(self) -> str:
- return str(self.value)
-
- def __repr__(self) -> str:
- return f"<{self.__class__.__name__}('{self}')>"
-
- def serialize(self) -> str:
- raise NotImplementedError
-
-
-class Variable(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-class Value(Node):
- def serialize(self) -> str:
- return f'"{self}"'
-
-
-class Op(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-VARIABLE = (
- L("implementation_version")
- | L("platform_python_implementation")
- | L("implementation_name")
- | L("python_full_version")
- | L("platform_release")
- | L("platform_version")
- | L("platform_machine")
- | L("platform_system")
- | L("python_version")
- | L("sys_platform")
- | L("os_name")
- | L("os.name") # PEP-345
- | L("sys.platform") # PEP-345
- | L("platform.version") # PEP-345
- | L("platform.machine") # PEP-345
- | L("platform.python_implementation") # PEP-345
- | L("python_implementation") # undocumented setuptools legacy
- | L("extra") # PEP-508
-)
-ALIASES = {
- "os.name": "os_name",
- "sys.platform": "sys_platform",
- "platform.version": "platform_version",
- "platform.machine": "platform_machine",
- "platform.python_implementation": "platform_python_implementation",
- "python_implementation": "platform_python_implementation",
-}
-VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
-
-VERSION_CMP = (
- L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
-)
-
-MARKER_OP = VERSION_CMP | L("not in") | L("in")
-MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
-
-MARKER_VALUE = QuotedString("'") | QuotedString('"')
-MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
-
-BOOLOP = L("and") | L("or")
-
-MARKER_VAR = VARIABLE | MARKER_VALUE
-
-MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
-MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
-
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-
-MARKER_EXPR = Forward()
-MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
-MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
-
-MARKER = stringStart + MARKER_EXPR + stringEnd
-
-
-def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
- if isinstance(results, ParseResults):
- return [_coerce_parse_result(i) for i in results]
- else:
- return results
-
-
-def _format_marker(
- marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
-) -> str:
-
- assert isinstance(marker, (list, tuple, str))
-
- # Sometimes we have a structure like [[...]] which is a single item list
- # where the single item is itself it's own list. In that case we want skip
- # the rest of this function so that we don't get extraneous () on the
- # outside.
- if (
- isinstance(marker, list)
- and len(marker) == 1
- and isinstance(marker[0], (list, tuple))
- ):
- return _format_marker(marker[0])
-
- if isinstance(marker, list):
- inner = (_format_marker(m, first=False) for m in marker)
- if first:
- return " ".join(inner)
- else:
- return "(" + " ".join(inner) + ")"
- elif isinstance(marker, tuple):
- return " ".join([m.serialize() for m in marker])
- else:
- return marker
-
-
-_operators: Dict[str, Operator] = {
- "in": lambda lhs, rhs: lhs in rhs,
- "not in": lambda lhs, rhs: lhs not in rhs,
- "<": operator.lt,
- "<=": operator.le,
- "==": operator.eq,
- "!=": operator.ne,
- ">=": operator.ge,
- ">": operator.gt,
-}
-
-
-def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
- try:
- spec = Specifier("".join([op.serialize(), rhs]))
- except InvalidSpecifier:
- pass
- else:
- return spec.contains(lhs)
-
- oper: Optional[Operator] = _operators.get(op.serialize())
- if oper is None:
- raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
-
- return oper(lhs, rhs)
-
-
-class Undefined:
- pass
-
-
-_undefined = Undefined()
-
-
-def _get_env(environment: Dict[str, str], name: str) -> str:
- value: Union[str, Undefined] = environment.get(name, _undefined)
-
- if isinstance(value, Undefined):
- raise UndefinedEnvironmentName(
- f"{name!r} does not exist in evaluation environment."
- )
-
- return value
-
-
-def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
- groups: List[List[bool]] = [[]]
-
- for marker in markers:
- assert isinstance(marker, (list, tuple, str))
-
- if isinstance(marker, list):
- groups[-1].append(_evaluate_markers(marker, environment))
- elif isinstance(marker, tuple):
- lhs, op, rhs = marker
-
- if isinstance(lhs, Variable):
- lhs_value = _get_env(environment, lhs.value)
- rhs_value = rhs.value
- else:
- lhs_value = lhs.value
- rhs_value = _get_env(environment, rhs.value)
-
- groups[-1].append(_eval_op(lhs_value, op, rhs_value))
- else:
- assert marker in ["and", "or"]
- if marker == "or":
- groups.append([])
-
- return any(all(item) for item in groups)
-
-
-def format_full_version(info: "sys._version_info") -> str:
- version = "{0.major}.{0.minor}.{0.micro}".format(info)
- kind = info.releaselevel
- if kind != "final":
- version += kind[0] + str(info.serial)
- return version
-
-
-def default_environment() -> Dict[str, str]:
- iver = format_full_version(sys.implementation.version)
- implementation_name = sys.implementation.name
- return {
- "implementation_name": implementation_name,
- "implementation_version": iver,
- "os_name": os.name,
- "platform_machine": platform.machine(),
- "platform_release": platform.release(),
- "platform_system": platform.system(),
- "platform_version": platform.version(),
- "python_full_version": platform.python_version(),
- "platform_python_implementation": platform.python_implementation(),
- "python_version": ".".join(platform.python_version_tuple()[:2]),
- "sys_platform": sys.platform,
- }
-
-
-class Marker:
- def __init__(self, marker: str) -> None:
- try:
- self._markers = _coerce_parse_result(MARKER.parseString(marker))
- except ParseException as e:
- raise InvalidMarker(
- f"Invalid marker: {marker!r}, parse error at "
- f"{marker[e.loc : e.loc + 8]!r}"
- )
-
- def __str__(self) -> str:
- return _format_marker(self._markers)
-
- def __repr__(self) -> str:
- return f""
-
- def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
- """Evaluate a marker.
-
- Return the boolean from evaluating the given marker against the
- environment. environment is an optional argument to override all or
- part of the determined environment.
-
- The environment is determined from the current Python process.
- """
- current_environment = default_environment()
- if environment is not None:
- current_environment.update(environment)
-
- return _evaluate_markers(self._markers, current_environment)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/tenacity/wait.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/tenacity/wait.py
deleted file mode 100644
index 8fdfc8f9d4e8e28087d1eeb1ec190abecf7341ce..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/tenacity/wait.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright 2016–2021 Julien Danjou
-# Copyright 2016 Joshua Harlow
-# Copyright 2013-2014 Ray Holder
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import abc
-import random
-import typing
-from datetime import timedelta
-
-from pip._vendor.tenacity import _utils
-
-if typing.TYPE_CHECKING:
- from pip._vendor.tenacity import RetryCallState
-
-wait_unit_type = typing.Union[int, float, timedelta]
-
-
-def to_seconds(wait_unit: wait_unit_type) -> float:
- return float(wait_unit.total_seconds() if isinstance(wait_unit, timedelta) else wait_unit)
-
-
-class wait_base(abc.ABC):
- """Abstract base class for wait strategies."""
-
- @abc.abstractmethod
- def __call__(self, retry_state: "RetryCallState") -> float:
- pass
-
- def __add__(self, other: "wait_base") -> "wait_combine":
- return wait_combine(self, other)
-
- def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]:
- # make it possible to use multiple waits with the built-in sum function
- if other == 0:
- return self
- return self.__add__(other)
-
-
-class wait_fixed(wait_base):
- """Wait strategy that waits a fixed amount of time between each retry."""
-
- def __init__(self, wait: wait_unit_type) -> None:
- self.wait_fixed = to_seconds(wait)
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- return self.wait_fixed
-
-
-class wait_none(wait_fixed):
- """Wait strategy that doesn't wait at all before retrying."""
-
- def __init__(self) -> None:
- super().__init__(0)
-
-
-class wait_random(wait_base):
- """Wait strategy that waits a random amount of time between min/max."""
-
- def __init__(self, min: wait_unit_type = 0, max: wait_unit_type = 1) -> None: # noqa
- self.wait_random_min = to_seconds(min)
- self.wait_random_max = to_seconds(max)
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min))
-
-
-class wait_combine(wait_base):
- """Combine several waiting strategies."""
-
- def __init__(self, *strategies: wait_base) -> None:
- self.wait_funcs = strategies
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- return sum(x(retry_state=retry_state) for x in self.wait_funcs)
-
-
-class wait_chain(wait_base):
- """Chain two or more waiting strategies.
-
- If all strategies are exhausted, the very last strategy is used
- thereafter.
-
- For example::
-
- @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] +
- [wait_fixed(2) for j in range(5)] +
- [wait_fixed(5) for k in range(4)))
- def wait_chained():
- print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s
- thereafter.")
- """
-
- def __init__(self, *strategies: wait_base) -> None:
- self.strategies = strategies
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies))
- wait_func = self.strategies[wait_func_no - 1]
- return wait_func(retry_state=retry_state)
-
-
-class wait_incrementing(wait_base):
- """Wait an incremental amount of time after each attempt.
-
- Starting at a starting value and incrementing by a value for each attempt
- (and restricting the upper limit to some maximum value).
- """
-
- def __init__(
- self,
- start: wait_unit_type = 0,
- increment: wait_unit_type = 100,
- max: wait_unit_type = _utils.MAX_WAIT, # noqa
- ) -> None:
- self.start = to_seconds(start)
- self.increment = to_seconds(increment)
- self.max = to_seconds(max)
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- result = self.start + (self.increment * (retry_state.attempt_number - 1))
- return max(0, min(result, self.max))
-
-
-class wait_exponential(wait_base):
- """Wait strategy that applies exponential backoff.
-
- It allows for a customized multiplier and an ability to restrict the
- upper and lower limits to some maximum and minimum value.
-
- The intervals are fixed (i.e. there is no jitter), so this strategy is
- suitable for balancing retries against latency when a required resource is
- unavailable for an unknown duration, but *not* suitable for resolving
- contention between multiple processes for a shared resource. Use
- wait_random_exponential for the latter case.
- """
-
- def __init__(
- self,
- multiplier: typing.Union[int, float] = 1,
- max: wait_unit_type = _utils.MAX_WAIT, # noqa
- exp_base: typing.Union[int, float] = 2,
- min: wait_unit_type = 0, # noqa
- ) -> None:
- self.multiplier = multiplier
- self.min = to_seconds(min)
- self.max = to_seconds(max)
- self.exp_base = exp_base
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- try:
- exp = self.exp_base ** (retry_state.attempt_number - 1)
- result = self.multiplier * exp
- except OverflowError:
- return self.max
- return max(max(0, self.min), min(result, self.max))
-
-
-class wait_random_exponential(wait_exponential):
- """Random wait with exponentially widening window.
-
- An exponential backoff strategy used to mediate contention between multiple
- uncoordinated processes for a shared resource in distributed systems. This
- is the sense in which "exponential backoff" is meant in e.g. Ethernet
- networking, and corresponds to the "Full Jitter" algorithm described in
- this blog post:
-
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
-
- Each retry occurs at a random time in a geometrically expanding interval.
- It allows for a custom multiplier and an ability to restrict the upper
- limit of the random interval to some maximum value.
-
- Example::
-
- wait_random_exponential(multiplier=0.5, # initial window 0.5s
- max=60) # max 60s timeout
-
- When waiting for an unavailable resource to become available again, as
- opposed to trying to resolve contention for a shared resource, the
- wait_exponential strategy (which uses a fixed interval) may be preferable.
-
- """
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- high = super().__call__(retry_state=retry_state)
- return random.uniform(0, high)
-
-
-class wait_exponential_jitter(wait_base):
- """Wait strategy that applies exponential backoff and jitter.
-
- It allows for a customized initial wait, maximum wait and jitter.
-
- This implements the strategy described here:
- https://cloud.google.com/storage/docs/retry-strategy
-
- The wait time is min(initial * (2**n + random.uniform(0, jitter)), maximum)
- where n is the retry count.
- """
-
- def __init__(
- self,
- initial: float = 1,
- max: float = _utils.MAX_WAIT, # noqa
- exp_base: float = 2,
- jitter: float = 1,
- ) -> None:
- self.initial = initial
- self.max = max
- self.exp_base = exp_base
- self.jitter = jitter
-
- def __call__(self, retry_state: "RetryCallState") -> float:
- jitter = random.uniform(0, self.jitter)
- try:
- exp = self.exp_base ** (retry_state.attempt_number - 1)
- result = self.initial * exp + jitter
- except OverflowError:
- result = self.max
- return max(0, min(result, self.max))
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py
deleted file mode 100644
index 19a169fc30183db91f931ad6ad04fbc0e16559b3..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .more import * # noqa
-from .recipes import * # noqa
-
-__version__ = '8.8.0'
diff --git a/spaces/Realcat/image-matching-webui/hloc/extractors/alike.py b/spaces/Realcat/image-matching-webui/hloc/extractors/alike.py
deleted file mode 100644
index dcfe4542301eaf0b0092d5e166e59915d033db57..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/hloc/extractors/alike.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import sys
-from pathlib import Path
-import torch
-
-from ..utils.base_model import BaseModel
-
-alike_path = Path(__file__).parent / "../../third_party/ALIKE"
-sys.path.append(str(alike_path))
-from alike import ALike as Alike_
-from alike import configs
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-
-class Alike(BaseModel):
- default_conf = {
- "model_name": "alike-t", # 'alike-t', 'alike-s', 'alike-n', 'alike-l'
- "use_relu": True,
- "multiscale": False,
- "max_keypoints": 1000,
- "detection_threshold": 0.5,
- "top_k": -1,
- "sub_pixel": False,
- }
-
- required_inputs = ["image"]
-
- def _init(self, conf):
- self.net = Alike_(
- **configs[conf["model_name"]],
- device=device,
- top_k=conf["top_k"],
- scores_th=conf["detection_threshold"],
- n_limit=conf["max_keypoints"],
- )
-
- def _forward(self, data):
- image = data["image"]
- image = image.permute(0, 2, 3, 1).squeeze()
- image = image.cpu().numpy() * 255.0
- pred = self.net(image, sub_pixel=self.conf["sub_pixel"])
-
- keypoints = pred["keypoints"]
- descriptors = pred["descriptors"]
- scores = pred["scores"]
-
- return {
- "keypoints": torch.from_numpy(keypoints)[None],
- "scores": torch.from_numpy(scores)[None],
- "descriptors": torch.from_numpy(descriptors.T)[None],
- }
diff --git a/spaces/RedYan/nitrosocke-Ghibli-Diffusion/README.md b/spaces/RedYan/nitrosocke-Ghibli-Diffusion/README.md
deleted file mode 100644
index 94c0690c65cfc71f63500f39aa3af20aed849d1a..0000000000000000000000000000000000000000
--- a/spaces/RedYan/nitrosocke-Ghibli-Diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Nitrosocke Ghibli Diffusion
-emoji: 🌖
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Redgon/bingo/src/lib/isomorphic/browser.ts b/spaces/Redgon/bingo/src/lib/isomorphic/browser.ts
deleted file mode 100644
index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000
--- a/spaces/Redgon/bingo/src/lib/isomorphic/browser.ts
+++ /dev/null
@@ -1,11 +0,0 @@
-'use client'
-
-const debug = console.info.bind(console)
-
-class WebSocketAlias extends WebSocket {
- constructor(address: string | URL, ...args: any) {
- super(address)
- }
-}
-
-export default { fetch, WebSocket: WebSocketAlias, debug }
diff --git a/spaces/Reself/StableVideo/stablevideo/implicit_neural_networks.py b/spaces/Reself/StableVideo/stablevideo/implicit_neural_networks.py
deleted file mode 100644
index d52c4c28fdd71f3510df0cc25ca3e450668140a8..0000000000000000000000000000000000000000
--- a/spaces/Reself/StableVideo/stablevideo/implicit_neural_networks.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import numpy as np
-
-# code taken from https://github.com/ykasten/layered-neural-atlases
-
-
-def count_parameters(model):
- return sum(p.numel() for p in model.parameters() if p.requires_grad)
-
-
-def positionalEncoding_vec(in_tensor, b):
- proj = torch.einsum("ij, k -> ijk", in_tensor, b) # shape (batch, in_tensor.size(1), freqNum)
- mapped_coords = torch.cat((torch.sin(proj), torch.cos(proj)), dim=1) # shape (batch, 2*in_tensor.size(1), freqNum)
- output = mapped_coords.transpose(2, 1).contiguous().view(mapped_coords.size(0), -1)
- return output
-
-
-class IMLP(nn.Module):
- def __init__(
- self,
- input_dim,
- output_dim,
- hidden_dim=256,
- use_positional=True,
- positional_dim=10,
- skip_layers=[4, 6],
- num_layers=8, # includes the output layer
- verbose=True,
- use_tanh=True,
- apply_softmax=False,
- ):
- super(IMLP, self).__init__()
- self.verbose = verbose
- self.use_tanh = use_tanh
- self.apply_softmax = apply_softmax
- if apply_softmax:
- self.softmax = nn.Softmax()
- if use_positional:
- encoding_dimensions = 2 * input_dim * positional_dim
- self.b = torch.tensor([(2 ** j) * np.pi for j in range(positional_dim)], requires_grad=False)
- else:
- encoding_dimensions = input_dim
-
- self.hidden = nn.ModuleList()
- for i in range(num_layers):
- if i == 0:
- input_dims = encoding_dimensions
- elif i in skip_layers:
- input_dims = hidden_dim + encoding_dimensions
- else:
- input_dims = hidden_dim
-
- if i == num_layers - 1:
- # last layer
- self.hidden.append(nn.Linear(input_dims, output_dim, bias=True))
- else:
- self.hidden.append(nn.Linear(input_dims, hidden_dim, bias=True))
-
- self.skip_layers = skip_layers
- self.num_layers = num_layers
-
- self.positional_dim = positional_dim
- self.use_positional = use_positional
-
- if self.verbose:
- print(f"Model has {count_parameters(self)} params")
-
- def forward(self, x):
- if self.use_positional:
- if self.b.device != x.device:
- self.b = self.b.to(x.device)
- pos = positionalEncoding_vec(x, self.b)
- x = pos
-
- input = x.detach().clone()
- for i, layer in enumerate(self.hidden):
- if i > 0:
- x = F.relu(x)
- if i in self.skip_layers:
- x = torch.cat((x, input), 1)
- x = layer(x)
- if self.use_tanh:
- x = torch.tanh(x)
-
- if self.apply_softmax:
- x = self.softmax(x)
- return x
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/dense_heads/fcos_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/dense_heads/fcos_head.py
deleted file mode 100644
index 905a703507f279ac8d34cff23c99af33c0d5f973..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/dense_heads/fcos_head.py
+++ /dev/null
@@ -1,629 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import Scale, normal_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import distance2bbox, multi_apply, multiclass_nms, reduce_mean
-from ..builder import HEADS, build_loss
-from .anchor_free_head import AnchorFreeHead
-
-INF = 1e8
-
-
-@HEADS.register_module()
-class FCOSHead(AnchorFreeHead):
- """Anchor-free head used in `FCOS `_.
-
- The FCOS head does not use anchor boxes. Instead bounding boxes are
- predicted at each pixel and a centerness measure is used to suppress
- low-quality predictions.
- Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training
- tricks used in official repo, which will bring remarkable mAP gains
- of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for
- more detail.
-
- Args:
- num_classes (int): Number of categories excluding the background
- category.
- in_channels (int): Number of channels in the input feature map.
- strides (list[int] | list[tuple[int, int]]): Strides of points
- in multiple feature levels. Default: (4, 8, 16, 32, 64).
- regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
- level points.
- center_sampling (bool): If true, use center sampling. Default: False.
- center_sample_radius (float): Radius of center sampling. Default: 1.5.
- norm_on_bbox (bool): If true, normalize the regression targets
- with FPN strides. Default: False.
- centerness_on_reg (bool): If true, position centerness on the
- regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
- Default: False.
- conv_bias (bool | str): If specified as `auto`, it will be decided by the
- norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
- False. Default: "auto".
- loss_cls (dict): Config of classification loss.
- loss_bbox (dict): Config of localization loss.
- loss_centerness (dict): Config of centerness loss.
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
-
- Example:
- >>> self = FCOSHead(11, 7)
- >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
- >>> cls_score, bbox_pred, centerness = self.forward(feats)
- >>> assert len(cls_score) == len(self.scales)
- """ # noqa: E501
-
- def __init__(self,
- num_classes,
- in_channels,
- regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
- (512, INF)),
- center_sampling=False,
- center_sample_radius=1.5,
- norm_on_bbox=False,
- centerness_on_reg=False,
- loss_cls=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- loss_bbox=dict(type='IoULoss', loss_weight=1.0),
- loss_centerness=dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.0),
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
- **kwargs):
- self.regress_ranges = regress_ranges
- self.center_sampling = center_sampling
- self.center_sample_radius = center_sample_radius
- self.norm_on_bbox = norm_on_bbox
- self.centerness_on_reg = centerness_on_reg
- super().__init__(
- num_classes,
- in_channels,
- loss_cls=loss_cls,
- loss_bbox=loss_bbox,
- norm_cfg=norm_cfg,
- **kwargs)
- self.loss_centerness = build_loss(loss_centerness)
-
- def _init_layers(self):
- """Initialize layers of the head."""
- super()._init_layers()
- self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
- self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
-
- def init_weights(self):
- """Initialize weights of the head."""
- super().init_weights()
- normal_init(self.conv_centerness, std=0.01)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple:
- cls_scores (list[Tensor]): Box scores for each scale level, \
- each is a 4D-tensor, the channel number is \
- num_points * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for each \
- scale level, each is a 4D-tensor, the channel number is \
- num_points * 4.
- centernesses (list[Tensor]): centerness for each scale level, \
- each is a 4D-tensor, the channel number is num_points * 1.
- """
- return multi_apply(self.forward_single, feats, self.scales,
- self.strides)
-
- def forward_single(self, x, scale, stride):
- """Forward features of a single scale level.
-
- Args:
- x (Tensor): FPN feature maps of the specified stride.
- scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
- the bbox prediction.
- stride (int): The corresponding stride for feature maps, only
- used to normalize the bbox prediction when self.norm_on_bbox
- is True.
-
- Returns:
- tuple: scores for each class, bbox predictions and centerness \
- predictions of input feature maps.
- """
- cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)
- if self.centerness_on_reg:
- centerness = self.conv_centerness(reg_feat)
- else:
- centerness = self.conv_centerness(cls_feat)
- # scale the bbox_pred of different level
- # float to avoid overflow when enabling FP16
- bbox_pred = scale(bbox_pred).float()
- if self.norm_on_bbox:
- bbox_pred = F.relu(bbox_pred)
- if not self.training:
- bbox_pred *= stride
- else:
- bbox_pred = bbox_pred.exp()
- return cls_score, bbox_pred, centerness
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
- def loss(self,
- cls_scores,
- bbox_preds,
- centernesses,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute loss of the head.
-
- Args:
- cls_scores (list[Tensor]): Box scores for each scale level,
- each is a 4D-tensor, the channel number is
- num_points * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
- level, each is a 4D-tensor, the channel number is
- num_points * 4.
- centernesses (list[Tensor]): centerness for each scale level, each
- is a 4D-tensor, the channel number is num_points * 1.
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- assert len(cls_scores) == len(bbox_preds) == len(centernesses)
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
- bbox_preds[0].device)
- labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes,
- gt_labels)
-
- num_imgs = cls_scores[0].size(0)
- # flatten cls_scores, bbox_preds and centerness
- flatten_cls_scores = [
- cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
- for cls_score in cls_scores
- ]
- flatten_bbox_preds = [
- bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
- for bbox_pred in bbox_preds
- ]
- flatten_centerness = [
- centerness.permute(0, 2, 3, 1).reshape(-1)
- for centerness in centernesses
- ]
- flatten_cls_scores = torch.cat(flatten_cls_scores)
- flatten_bbox_preds = torch.cat(flatten_bbox_preds)
- flatten_centerness = torch.cat(flatten_centerness)
- flatten_labels = torch.cat(labels)
- flatten_bbox_targets = torch.cat(bbox_targets)
- # repeat points to align with bbox_preds
- flatten_points = torch.cat(
- [points.repeat(num_imgs, 1) for points in all_level_points])
-
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- bg_class_ind = self.num_classes
- pos_inds = ((flatten_labels >= 0)
- & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)
- num_pos = torch.tensor(
- len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)
- num_pos = max(reduce_mean(num_pos), 1.0)
- loss_cls = self.loss_cls(
- flatten_cls_scores, flatten_labels, avg_factor=num_pos)
-
- pos_bbox_preds = flatten_bbox_preds[pos_inds]
- pos_centerness = flatten_centerness[pos_inds]
-
- if len(pos_inds) > 0:
- pos_bbox_targets = flatten_bbox_targets[pos_inds]
- pos_centerness_targets = self.centerness_target(pos_bbox_targets)
- pos_points = flatten_points[pos_inds]
- pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
- pos_decoded_target_preds = distance2bbox(pos_points,
- pos_bbox_targets)
- # centerness weighted iou loss
- centerness_denorm = max(
- reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)
- loss_bbox = self.loss_bbox(
- pos_decoded_bbox_preds,
- pos_decoded_target_preds,
- weight=pos_centerness_targets,
- avg_factor=centerness_denorm)
- loss_centerness = self.loss_centerness(
- pos_centerness, pos_centerness_targets, avg_factor=num_pos)
- else:
- loss_bbox = pos_bbox_preds.sum()
- loss_centerness = pos_centerness.sum()
-
- return dict(
- loss_cls=loss_cls,
- loss_bbox=loss_bbox,
- loss_centerness=loss_centerness)
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
- def get_bboxes(self,
- cls_scores,
- bbox_preds,
- centernesses,
- img_metas,
- cfg=None,
- rescale=False,
- with_nms=True):
- """Transform network output for a batch into bbox predictions.
-
- Args:
- cls_scores (list[Tensor]): Box scores for each scale level
- with shape (N, num_points * num_classes, H, W).
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
- level with shape (N, num_points * 4, H, W).
- centernesses (list[Tensor]): Centerness for each scale level with
- shape (N, num_points * 1, H, W).
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used. Default: None.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before return boxes.
- Default: True.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is an (n, 5) tensor, where 5 represent
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
- The shape of the second tensor in the tuple is (n,), and
- each element represents the class label of the corresponding
- box.
- """
- assert len(cls_scores) == len(bbox_preds)
- num_levels = len(cls_scores)
-
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
- bbox_preds[0].device)
-
- cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
- bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
- centerness_pred_list = [
- centernesses[i].detach() for i in range(num_levels)
- ]
- if torch.onnx.is_in_onnx_export():
- assert len(
- img_metas
- ) == 1, 'Only support one input image while in exporting to ONNX'
- img_shapes = img_metas[0]['img_shape_for_onnx']
- else:
- img_shapes = [
- img_metas[i]['img_shape']
- for i in range(cls_scores[0].shape[0])
- ]
- scale_factors = [
- img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
- ]
- result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
- centerness_pred_list, mlvl_points,
- img_shapes, scale_factors, cfg, rescale,
- with_nms)
- return result_list
-
- def _get_bboxes(self,
- cls_scores,
- bbox_preds,
- centernesses,
- mlvl_points,
- img_shapes,
- scale_factors,
- cfg,
- rescale=False,
- with_nms=True):
- """Transform outputs for a single batch item into bbox predictions.
-
- Args:
- cls_scores (list[Tensor]): Box scores for a single scale level
- with shape (N, num_points * num_classes, H, W).
- bbox_preds (list[Tensor]): Box energies / deltas for a single scale
- level with shape (N, num_points * 4, H, W).
- centernesses (list[Tensor]): Centerness for a single scale level
- with shape (N, num_points * 4, H, W).
- mlvl_points (list[Tensor]): Box reference for a single scale level
- with shape (num_total_points, 4).
- img_shapes (list[tuple[int]]): Shape of the input image,
- list[(height, width, 3)].
- scale_factors (list[ndarray]): Scale factor of the image arrange as
- (w_scale, h_scale, w_scale, h_scale).
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before return boxes.
- Default: True.
-
- Returns:
- tuple(Tensor):
- det_bboxes (Tensor): BBox predictions in shape (n, 5), where
- the first 4 columns are bounding box positions
- (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
- between 0 and 1.
- det_labels (Tensor): A (n,) tensor where each item is the
- predicted class label of the corresponding box.
- """
- cfg = self.test_cfg if cfg is None else cfg
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
- device = cls_scores[0].device
- batch_size = cls_scores[0].shape[0]
- # convert to tensor to keep tracing
- nms_pre_tensor = torch.tensor(
- cfg.get('nms_pre', -1), device=device, dtype=torch.long)
- mlvl_bboxes = []
- mlvl_scores = []
- mlvl_centerness = []
- for cls_score, bbox_pred, centerness, points in zip(
- cls_scores, bbox_preds, centernesses, mlvl_points):
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
- scores = cls_score.permute(0, 2, 3, 1).reshape(
- batch_size, -1, self.cls_out_channels).sigmoid()
- centerness = centerness.permute(0, 2, 3,
- 1).reshape(batch_size,
- -1).sigmoid()
-
- bbox_pred = bbox_pred.permute(0, 2, 3,
- 1).reshape(batch_size, -1, 4)
- # Always keep topk op for dynamic input in onnx
- if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
- or scores.shape[-2] > nms_pre_tensor):
- from torch import _shape_as_tensor
- # keep shape as tensor and get k
- num_anchor = _shape_as_tensor(scores)[-2].to(device)
- nms_pre = torch.where(nms_pre_tensor < num_anchor,
- nms_pre_tensor, num_anchor)
-
- max_scores, _ = (scores * centerness[..., None]).max(-1)
- _, topk_inds = max_scores.topk(nms_pre)
- points = points[topk_inds, :]
- batch_inds = torch.arange(batch_size).view(
- -1, 1).expand_as(topk_inds).long()
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
- scores = scores[batch_inds, topk_inds, :]
- centerness = centerness[batch_inds, topk_inds]
-
- bboxes = distance2bbox(points, bbox_pred, max_shape=img_shapes)
- mlvl_bboxes.append(bboxes)
- mlvl_scores.append(scores)
- mlvl_centerness.append(centerness)
-
- batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
- if rescale:
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
- scale_factors).unsqueeze(1)
- batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
- batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
-
- # Set max number of box to be feed into nms in deployment
- deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
- if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
- batch_mlvl_scores, _ = (
- batch_mlvl_scores *
- batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
- ).max(-1)
- _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
- batch_inds = torch.arange(batch_mlvl_scores.shape[0]).view(
- -1, 1).expand_as(topk_inds)
- batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
- batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
- batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
- topk_inds]
-
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
- # BG cat_id: num_class
- padding = batch_mlvl_scores.new_zeros(batch_size,
- batch_mlvl_scores.shape[1], 1)
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
-
- if with_nms:
- det_results = []
- for (mlvl_bboxes, mlvl_scores,
- mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
- batch_mlvl_centerness):
- det_bbox, det_label = multiclass_nms(
- mlvl_bboxes,
- mlvl_scores,
- cfg.score_thr,
- cfg.nms,
- cfg.max_per_img,
- score_factors=mlvl_centerness)
- det_results.append(tuple([det_bbox, det_label]))
- else:
- det_results = [
- tuple(mlvl_bs)
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
- batch_mlvl_centerness)
- ]
- return det_results
-
- def _get_points_single(self,
- featmap_size,
- stride,
- dtype,
- device,
- flatten=False):
- """Get points according to feature map sizes."""
- y, x = super()._get_points_single(featmap_size, stride, dtype, device)
- points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
- dim=-1) + stride // 2
- return points
-
- def get_targets(self, points, gt_bboxes_list, gt_labels_list):
- """Compute regression, classification and centerness targets for points
- in multiple images.
-
- Args:
- points (list[Tensor]): Points of each fpn level, each has shape
- (num_points, 2).
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
- each has shape (num_gt, 4).
- gt_labels_list (list[Tensor]): Ground truth labels of each box,
- each has shape (num_gt,).
-
- Returns:
- tuple:
- concat_lvl_labels (list[Tensor]): Labels of each level. \
- concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \
- level.
- """
- assert len(points) == len(self.regress_ranges)
- num_levels = len(points)
- # expand regress ranges to align with points
- expanded_regress_ranges = [
- points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
- points[i]) for i in range(num_levels)
- ]
- # concat all levels points and regress ranges
- concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
- concat_points = torch.cat(points, dim=0)
-
- # the number of points per img, per lvl
- num_points = [center.size(0) for center in points]
-
- # get labels and bbox_targets of each image
- labels_list, bbox_targets_list = multi_apply(
- self._get_target_single,
- gt_bboxes_list,
- gt_labels_list,
- points=concat_points,
- regress_ranges=concat_regress_ranges,
- num_points_per_lvl=num_points)
-
- # split to per img, per level
- labels_list = [labels.split(num_points, 0) for labels in labels_list]
- bbox_targets_list = [
- bbox_targets.split(num_points, 0)
- for bbox_targets in bbox_targets_list
- ]
-
- # concat per level image
- concat_lvl_labels = []
- concat_lvl_bbox_targets = []
- for i in range(num_levels):
- concat_lvl_labels.append(
- torch.cat([labels[i] for labels in labels_list]))
- bbox_targets = torch.cat(
- [bbox_targets[i] for bbox_targets in bbox_targets_list])
- if self.norm_on_bbox:
- bbox_targets = bbox_targets / self.strides[i]
- concat_lvl_bbox_targets.append(bbox_targets)
- return concat_lvl_labels, concat_lvl_bbox_targets
-
- def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
- num_points_per_lvl):
- """Compute regression and classification targets for a single image."""
- num_points = points.size(0)
- num_gts = gt_labels.size(0)
- if num_gts == 0:
- return gt_labels.new_full((num_points,), self.num_classes), \
- gt_bboxes.new_zeros((num_points, 4))
-
- areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
- gt_bboxes[:, 3] - gt_bboxes[:, 1])
- # TODO: figure out why these two are different
- # areas = areas[None].expand(num_points, num_gts)
- areas = areas[None].repeat(num_points, 1)
- regress_ranges = regress_ranges[:, None, :].expand(
- num_points, num_gts, 2)
- gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
- xs, ys = points[:, 0], points[:, 1]
- xs = xs[:, None].expand(num_points, num_gts)
- ys = ys[:, None].expand(num_points, num_gts)
-
- left = xs - gt_bboxes[..., 0]
- right = gt_bboxes[..., 2] - xs
- top = ys - gt_bboxes[..., 1]
- bottom = gt_bboxes[..., 3] - ys
- bbox_targets = torch.stack((left, top, right, bottom), -1)
-
- if self.center_sampling:
- # condition1: inside a `center bbox`
- radius = self.center_sample_radius
- center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
- center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
- center_gts = torch.zeros_like(gt_bboxes)
- stride = center_xs.new_zeros(center_xs.shape)
-
- # project the points on current lvl back to the `original` sizes
- lvl_begin = 0
- for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
- lvl_end = lvl_begin + num_points_lvl
- stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
- lvl_begin = lvl_end
-
- x_mins = center_xs - stride
- y_mins = center_ys - stride
- x_maxs = center_xs + stride
- y_maxs = center_ys + stride
- center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
- x_mins, gt_bboxes[..., 0])
- center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
- y_mins, gt_bboxes[..., 1])
- center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
- gt_bboxes[..., 2], x_maxs)
- center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
- gt_bboxes[..., 3], y_maxs)
-
- cb_dist_left = xs - center_gts[..., 0]
- cb_dist_right = center_gts[..., 2] - xs
- cb_dist_top = ys - center_gts[..., 1]
- cb_dist_bottom = center_gts[..., 3] - ys
- center_bbox = torch.stack(
- (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
- inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
- else:
- # condition1: inside a gt bbox
- inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
-
- # condition2: limit the regression range for each location
- max_regress_distance = bbox_targets.max(-1)[0]
- inside_regress_range = (
- (max_regress_distance >= regress_ranges[..., 0])
- & (max_regress_distance <= regress_ranges[..., 1]))
-
- # if there are still more than one objects for a location,
- # we choose the one with minimal area
- areas[inside_gt_bbox_mask == 0] = INF
- areas[inside_regress_range == 0] = INF
- min_area, min_area_inds = areas.min(dim=1)
-
- labels = gt_labels[min_area_inds]
- labels[min_area == INF] = self.num_classes # set as BG
- bbox_targets = bbox_targets[range(num_points), min_area_inds]
-
- return labels, bbox_targets
-
- def centerness_target(self, pos_bbox_targets):
- """Compute centerness targets.
-
- Args:
- pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape
- (num_pos, 4)
-
- Returns:
- Tensor: Centerness target.
- """
- # only calculate pos centerness targets, otherwise there may be nan
- left_right = pos_bbox_targets[:, [0, 2]]
- top_bottom = pos_bbox_targets[:, [1, 3]]
- centerness_targets = (
- left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
- top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
- return torch.sqrt(centerness_targets)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/bbox/assigners/point_assigner.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/bbox/assigners/point_assigner.py
deleted file mode 100644
index fb8f5e4edc63f4851e2067034c5e67a3558f31bc..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/bbox/assigners/point_assigner.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import torch
-
-from ..builder import BBOX_ASSIGNERS
-from .assign_result import AssignResult
-from .base_assigner import BaseAssigner
-
-
-@BBOX_ASSIGNERS.register_module()
-class PointAssigner(BaseAssigner):
- """Assign a corresponding gt bbox or background to each point.
-
- Each proposals will be assigned with `0`, or a positive integer
- indicating the ground truth index.
-
- - 0: negative sample, no assigned gt
- - positive integer: positive sample, index (1-based) of assigned gt
- """
-
- def __init__(self, scale=4, pos_num=3):
- self.scale = scale
- self.pos_num = pos_num
-
- def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
- """Assign gt to points.
-
- This method assign a gt bbox to every points set, each points set
- will be assigned with the background_label (-1), or a label number.
- -1 is background, and semi-positive number is the index (0-based) of
- assigned gt.
- The assignment is done in following steps, the order matters.
-
- 1. assign every points to the background_label (-1)
- 2. A point is assigned to some gt bbox if
- (i) the point is within the k closest points to the gt bbox
- (ii) the distance between this point and the gt is smaller than
- other gt bboxes
-
- Args:
- points (Tensor): points to be assigned, shape(n, 3) while last
- dimension stands for (x, y, stride).
- gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
- labelled as `ignored`, e.g., crowd boxes in COCO.
- NOTE: currently unused.
- gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
-
- Returns:
- :obj:`AssignResult`: The assign result.
- """
- num_points = points.shape[0]
- num_gts = gt_bboxes.shape[0]
-
- if num_gts == 0 or num_points == 0:
- # If no truth assign everything to the background
- assigned_gt_inds = points.new_full((num_points, ),
- 0,
- dtype=torch.long)
- if gt_labels is None:
- assigned_labels = None
- else:
- assigned_labels = points.new_full((num_points, ),
- -1,
- dtype=torch.long)
- return AssignResult(
- num_gts, assigned_gt_inds, None, labels=assigned_labels)
-
- points_xy = points[:, :2]
- points_stride = points[:, 2]
- points_lvl = torch.log2(
- points_stride).int() # [3...,4...,5...,6...,7...]
- lvl_min, lvl_max = points_lvl.min(), points_lvl.max()
-
- # assign gt box
- gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2
- gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)
- scale = self.scale
- gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +
- torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()
- gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)
-
- # stores the assigned gt index of each point
- assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)
- # stores the assigned gt dist (to this point) of each point
- assigned_gt_dist = points.new_full((num_points, ), float('inf'))
- points_range = torch.arange(points.shape[0])
-
- for idx in range(num_gts):
- gt_lvl = gt_bboxes_lvl[idx]
- # get the index of points in this level
- lvl_idx = gt_lvl == points_lvl
- points_index = points_range[lvl_idx]
- # get the points in this level
- lvl_points = points_xy[lvl_idx, :]
- # get the center point of gt
- gt_point = gt_bboxes_xy[[idx], :]
- # get width and height of gt
- gt_wh = gt_bboxes_wh[[idx], :]
- # compute the distance between gt center and
- # all points in this level
- points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)
- # find the nearest k points to gt center in this level
- min_dist, min_dist_index = torch.topk(
- points_gt_dist, self.pos_num, largest=False)
- # the index of nearest k points to gt center in this level
- min_dist_points_index = points_index[min_dist_index]
- # The less_than_recorded_index stores the index
- # of min_dist that is less then the assigned_gt_dist. Where
- # assigned_gt_dist stores the dist from previous assigned gt
- # (if exist) to each point.
- less_than_recorded_index = min_dist < assigned_gt_dist[
- min_dist_points_index]
- # The min_dist_points_index stores the index of points satisfy:
- # (1) it is k nearest to current gt center in this level.
- # (2) it is closer to current gt center than other gt center.
- min_dist_points_index = min_dist_points_index[
- less_than_recorded_index]
- # assign the result
- assigned_gt_inds[min_dist_points_index] = idx + 1
- assigned_gt_dist[min_dist_points_index] = min_dist[
- less_than_recorded_index]
-
- if gt_labels is not None:
- assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)
- pos_inds = torch.nonzero(
- assigned_gt_inds > 0, as_tuple=False).squeeze()
- if pos_inds.numel() > 0:
- assigned_labels[pos_inds] = gt_labels[
- assigned_gt_inds[pos_inds] - 1]
- else:
- assigned_labels = None
-
- return AssignResult(
- num_gts, assigned_gt_inds, None, labels=assigned_labels)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/chase_db1.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/chase_db1.py
deleted file mode 100644
index 298594ea925f87f22b37094a2ec50e370aec96a0..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/chase_db1.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'ChaseDB1Dataset'
-data_root = 'data/CHASE_DB1'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (960, 999)
-crop_size = (128, 128)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/spaces/RockmanYang/vocal_remover/lib/nets.py b/spaces/RockmanYang/vocal_remover/lib/nets.py
deleted file mode 100644
index 1ec7af24cd5b3ad4fb41466f4c0283da8bdffb40..0000000000000000000000000000000000000000
--- a/spaces/RockmanYang/vocal_remover/lib/nets.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from lib import layers
-
-
-class BaseNet(nn.Module):
-
- def __init__(self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))):
- super(BaseNet, self).__init__()
- self.enc1 = layers.Conv2DBNActiv(nin, nout, 3, 1, 1)
- self.enc2 = layers.Encoder(nout, nout * 2, 3, 2, 1)
- self.enc3 = layers.Encoder(nout * 2, nout * 4, 3, 2, 1)
- self.enc4 = layers.Encoder(nout * 4, nout * 6, 3, 2, 1)
- self.enc5 = layers.Encoder(nout * 6, nout * 8, 3, 2, 1)
-
- self.aspp = layers.ASPPModule(nout * 8, nout * 8, dilations, dropout=True)
-
- self.dec4 = layers.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1)
- self.dec3 = layers.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1)
- self.dec2 = layers.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1)
- self.lstm_dec2 = layers.LSTMModule(nout * 2, nin_lstm, nout_lstm)
- self.dec1 = layers.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1)
-
- def __call__(self, x):
- e1 = self.enc1(x)
- e2 = self.enc2(e1)
- e3 = self.enc3(e2)
- e4 = self.enc4(e3)
- e5 = self.enc5(e4)
-
- h = self.aspp(e5)
-
- h = self.dec4(h, e4)
- h = self.dec3(h, e3)
- h = self.dec2(h, e2)
- h = torch.cat([h, self.lstm_dec2(h)], dim=1)
- h = self.dec1(h, e1)
-
- return h
-
-
-class CascadedNet(nn.Module):
-
- def __init__(self, n_fft, nout=32, nout_lstm=128):
- super(CascadedNet, self).__init__()
- self.max_bin = n_fft // 2
- self.output_bin = n_fft // 2 + 1
- self.nin_lstm = self.max_bin // 2
- self.offset = 64
-
- self.stg1_low_band_net = nn.Sequential(
- BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm),
- layers.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0)
- )
- self.stg1_high_band_net = BaseNet(
- 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2
- )
-
- self.stg2_low_band_net = nn.Sequential(
- BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm),
- layers.Conv2DBNActiv(nout, nout // 2, 1, 1, 0)
- )
- self.stg2_high_band_net = BaseNet(
- nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2
- )
-
- self.stg3_full_band_net = BaseNet(
- 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm
- )
-
- self.out = nn.Conv2d(nout, 2, 1, bias=False)
- self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False)
-
- def forward(self, x):
- x = x[:, :, :self.max_bin]
-
- bandw = x.size()[2] // 2
- l1_in = x[:, :, :bandw]
- h1_in = x[:, :, bandw:]
- l1 = self.stg1_low_band_net(l1_in)
- h1 = self.stg1_high_band_net(h1_in)
- aux1 = torch.cat([l1, h1], dim=2)
-
- l2_in = torch.cat([l1_in, l1], dim=1)
- h2_in = torch.cat([h1_in, h1], dim=1)
- l2 = self.stg2_low_band_net(l2_in)
- h2 = self.stg2_high_band_net(h2_in)
- aux2 = torch.cat([l2, h2], dim=2)
-
- f3_in = torch.cat([x, aux1, aux2], dim=1)
- f3 = self.stg3_full_band_net(f3_in)
-
- mask = torch.sigmoid(self.out(f3))
- mask = F.pad(
- input=mask,
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
- mode='replicate'
- )
-
- if self.training:
- aux = torch.cat([aux1, aux2], dim=1)
- aux = torch.sigmoid(self.aux_out(aux))
- aux = F.pad(
- input=aux,
- pad=(0, 0, 0, self.output_bin - aux.size()[2]),
- mode='replicate'
- )
- return mask, aux
- else:
- return mask
-
- def predict_mask(self, x):
- mask = self.forward(x)
-
- if self.offset > 0:
- mask = mask[:, :, :, self.offset:-self.offset]
- assert mask.size()[3] > 0
-
- return mask
-
- def predict(self, x):
- mask = self.forward(x)
- pred_mag = x * mask
-
- if self.offset > 0:
- pred_mag = pred_mag[:, :, :, self.offset:-self.offset]
- assert pred_mag.size()[3] > 0
-
- return pred_mag
diff --git a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/losses/__init__.py b/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/losses/__init__.py
deleted file mode 100644
index b03080a907cb5cb4b316ceb74866ddbc406b33bf..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/losses/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .stft_loss import * # NOQA
diff --git a/spaces/SagarPatel/YouMatter/README.md b/spaces/SagarPatel/YouMatter/README.md
deleted file mode 100644
index ea2c5fca98760d1129c197b53f306ccb165cae4d..0000000000000000000000000000000000000000
--- a/spaces/SagarPatel/YouMatter/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: YouMatter
-emoji: 🏢
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.1.3
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SeyedAli/English-To-Persian-Translation/README.md b/spaces/SeyedAli/English-To-Persian-Translation/README.md
deleted file mode 100644
index 22a36bafb55dfabb67f2fbcfe64e887a4603beb0..0000000000000000000000000000000000000000
--- a/spaces/SeyedAli/English-To-Persian-Translation/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: English To Persian Translation
-emoji: 🌍
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.44.4
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ShawnLJW/image2coloringbook/README.md b/spaces/ShawnLJW/image2coloringbook/README.md
deleted file mode 100644
index ebc8647a2bbf6131cb69c77b6397ff212883d063..0000000000000000000000000000000000000000
--- a/spaces/ShawnLJW/image2coloringbook/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
----
-title: image2coloringbook
-emoji: ✏️
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: "3.46.0"
-app_file: app.py
-pinned: false
----
-
-# image2coloringbook
-
-
-
-A simple tool that converts an image into a coloring book. It runs on a custom implementation of the k-means clustering algorithm by default but comes with the option to use scikit-learn's implementation.
-
-This [Colab notebook](https://colab.research.google.com/drive/1S91AsP2XHUKuxtUBEaFlboWd8ScAndcz?usp=sharing) explains how the coloring books are generated.
-
-## Usage
-
-The application is available as a [Hugging Face space](https://shawnljw-image2coloringbook.hf.space).
-
-To run the application locally, clone this repo and open the directory in your terminal.
-
-Install all requirements with pip:
-
-```shell
-pip install -r requirements.txt
-```
-
-Once all requirements are installed, you can run the web ui with:
-
-```shell
-gradio app.py
-```
\ No newline at end of file
diff --git a/spaces/Siddhant/ESPnet2-SLU/s3prl.sh b/spaces/Siddhant/ESPnet2-SLU/s3prl.sh
deleted file mode 100644
index 209bdb9421c577ef1e4c1235c952cd631c280f06..0000000000000000000000000000000000000000
--- a/spaces/Siddhant/ESPnet2-SLU/s3prl.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-git clone https://github.com/s3prl/s3prl.git
-cd s3prl
-git checkout -b sync_commit e52439edaeb1a443e82960e6401ae6ab4241def6
-cd ..
-# rm -rf fairseq
-
-# FairSeq Commit id when making this PR: `commit 313ff0581561c7725ea9430321d6af2901573dfb`
-# git clone --depth 1 https://github.com/pytorch/fairseq.git
-# python3 -m pip install --editable ./fairseq
-# python3 -m pip install filelock
\ No newline at end of file
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/formatters.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/formatters.py
deleted file mode 100644
index 15cf703c2a031b7a6512c0b141ee258e3dc54857..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/formatters.py
+++ /dev/null
@@ -1,1028 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Display formatters.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.core.formatters
- :parts: 3
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import abc
-import sys
-import traceback
-import warnings
-from io import StringIO
-
-from decorator import decorator
-
-from traitlets.config.configurable import Configurable
-from .getipython import get_ipython
-from ..utils.sentinel import Sentinel
-from ..utils.dir2 import get_real_method
-from ..lib import pretty
-from traitlets import (
- Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
- ForwardDeclaredInstance,
- default, observe,
-)
-
-from typing import Any
-
-
-class DisplayFormatter(Configurable):
-
- active_types = List(Unicode(),
- help="""List of currently active mime-types to display.
- You can use this to set a white-list for formats to display.
-
- Most users will not need to change this value.
- """).tag(config=True)
-
- @default('active_types')
- def _active_types_default(self):
- return self.format_types
-
- @observe('active_types')
- def _active_types_changed(self, change):
- for key, formatter in self.formatters.items():
- if key in change['new']:
- formatter.enabled = True
- else:
- formatter.enabled = False
-
- ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
- @default('ipython_display_formatter')
- def _default_formatter(self):
- return IPythonDisplayFormatter(parent=self)
-
- mimebundle_formatter = ForwardDeclaredInstance('FormatterABC')
- @default('mimebundle_formatter')
- def _default_mime_formatter(self):
- return MimeBundleFormatter(parent=self)
-
- # A dict of formatter whose keys are format types (MIME types) and whose
- # values are subclasses of BaseFormatter.
- formatters = Dict()
- @default('formatters')
- def _formatters_default(self):
- """Activate the default formatters."""
- formatter_classes = [
- PlainTextFormatter,
- HTMLFormatter,
- MarkdownFormatter,
- SVGFormatter,
- PNGFormatter,
- PDFFormatter,
- JPEGFormatter,
- LatexFormatter,
- JSONFormatter,
- JavascriptFormatter
- ]
- d = {}
- for cls in formatter_classes:
- f = cls(parent=self)
- d[f.format_type] = f
- return d
-
- def format(self, obj, include=None, exclude=None):
- """Return a format data dict for an object.
-
- By default all format types will be computed.
-
- The following MIME types are usually implemented:
-
- * text/plain
- * text/html
- * text/markdown
- * text/latex
- * application/json
- * application/javascript
- * application/pdf
- * image/png
- * image/jpeg
- * image/svg+xml
-
- Parameters
- ----------
- obj : object
- The Python object whose format data will be computed.
- include : list, tuple or set; optional
- A list of format type strings (MIME types) to include in the
- format data dict. If this is set *only* the format types included
- in this list will be computed.
- exclude : list, tuple or set; optional
- A list of format type string (MIME types) to exclude in the format
- data dict. If this is set all format types will be computed,
- except for those included in this argument.
- Mimetypes present in exclude will take precedence over the ones in include
-
- Returns
- -------
- (format_dict, metadata_dict) : tuple of two dicts
- format_dict is a dictionary of key/value pairs, one of each format that was
- generated for the object. The keys are the format types, which
- will usually be MIME type strings and the values and JSON'able
- data structure containing the raw data for the representation in
- that format.
-
- metadata_dict is a dictionary of metadata about each mime-type output.
- Its keys will be a strict subset of the keys in format_dict.
-
- Notes
- -----
- If an object implement `_repr_mimebundle_` as well as various
- `_repr_*_`, the data returned by `_repr_mimebundle_` will take
- precedence and the corresponding `_repr_*_` for this mimetype will
- not be called.
-
- """
- format_dict = {}
- md_dict = {}
-
- if self.ipython_display_formatter(obj):
- # object handled itself, don't proceed
- return {}, {}
-
- format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
-
- if format_dict or md_dict:
- if include:
- format_dict = {k:v for k,v in format_dict.items() if k in include}
- md_dict = {k:v for k,v in md_dict.items() if k in include}
- if exclude:
- format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
- md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
-
- for format_type, formatter in self.formatters.items():
- if format_type in format_dict:
- # already got it from mimebundle, maybe don't render again.
- # exception: manually registered per-mime renderer
- # check priority:
- # 1. user-registered per-mime formatter
- # 2. mime-bundle (user-registered or repr method)
- # 3. default per-mime formatter (e.g. repr method)
- try:
- formatter.lookup(obj)
- except KeyError:
- # no special formatter, use mime-bundle-provided value
- continue
- if include and format_type not in include:
- continue
- if exclude and format_type in exclude:
- continue
-
- md = None
- try:
- data = formatter(obj)
- except:
- # FIXME: log the exception
- raise
-
- # formatters can return raw data or (data, metadata)
- if isinstance(data, tuple) and len(data) == 2:
- data, md = data
-
- if data is not None:
- format_dict[format_type] = data
- if md is not None:
- md_dict[format_type] = md
- return format_dict, md_dict
-
- @property
- def format_types(self):
- """Return the format types (MIME types) of the active formatters."""
- return list(self.formatters.keys())
-
-
-#-----------------------------------------------------------------------------
-# Formatters for specific format types (text, html, svg, etc.)
-#-----------------------------------------------------------------------------
-
-
-def _safe_repr(obj):
- """Try to return a repr of an object
-
- always returns a string, at least.
- """
- try:
- return repr(obj)
- except Exception as e:
- return "un-repr-able object (%r)" % e
-
-
-class FormatterWarning(UserWarning):
- """Warning class for errors in formatters"""
-
-@decorator
-def catch_format_error(method, self, *args, **kwargs):
- """show traceback on failed format call"""
- try:
- r = method(self, *args, **kwargs)
- except NotImplementedError:
- # don't warn on NotImplementedErrors
- return self._check_return(None, args[0])
- except Exception:
- exc_info = sys.exc_info()
- ip = get_ipython()
- if ip is not None:
- ip.showtraceback(exc_info)
- else:
- traceback.print_exception(*exc_info)
- return self._check_return(None, args[0])
- return self._check_return(r, args[0])
-
-
-class FormatterABC(metaclass=abc.ABCMeta):
- """ Abstract base class for Formatters.
-
- A formatter is a callable class that is responsible for computing the
- raw format data for a particular format type (MIME type). For example,
- an HTML formatter would have a format type of `text/html` and would return
- the HTML representation of the object when called.
- """
-
- # The format type of the data returned, usually a MIME type.
- format_type = 'text/plain'
-
- # Is the formatter enabled...
- enabled = True
-
- @abc.abstractmethod
- def __call__(self, obj):
- """Return a JSON'able representation of the object.
-
- If the object cannot be formatted by this formatter,
- warn and return None.
- """
- return repr(obj)
-
-
-def _mod_name_key(typ):
- """Return a (__module__, __name__) tuple for a type.
-
- Used as key in Formatter.deferred_printers.
- """
- module = getattr(typ, '__module__', None)
- name = getattr(typ, '__name__', None)
- return (module, name)
-
-
-def _get_type(obj):
- """Return the type of an instance (old and new-style)"""
- return getattr(obj, '__class__', None) or type(obj)
-
-
-_raise_key_error = Sentinel('_raise_key_error', __name__,
-"""
-Special value to raise a KeyError
-
-Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
-""")
-
-
-class BaseFormatter(Configurable):
- """A base formatter class that is configurable.
-
- This formatter should usually be used as the base class of all formatters.
- It is a traited :class:`Configurable` class and includes an extensible
- API for users to determine how their objects are formatted. The following
- logic is used to find a function to format an given object.
-
- 1. The object is introspected to see if it has a method with the name
- :attr:`print_method`. If is does, that object is passed to that method
- for formatting.
- 2. If no print method is found, three internal dictionaries are consulted
- to find print method: :attr:`singleton_printers`, :attr:`type_printers`
- and :attr:`deferred_printers`.
-
- Users should use these dictionaries to register functions that will be
- used to compute the format data for their objects (if those objects don't
- have the special print methods). The easiest way of using these
- dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
- methods.
-
- If no function/callable is found to compute the format data, ``None`` is
- returned and this format type is not used.
- """
-
- format_type = Unicode("text/plain")
- _return_type: Any = str
-
- enabled = Bool(True).tag(config=True)
-
- print_method = ObjectName('__repr__')
-
- # The singleton printers.
- # Maps the IDs of the builtin singleton objects to the format functions.
- singleton_printers = Dict().tag(config=True)
-
- # The type-specific printers.
- # Map type objects to the format functions.
- type_printers = Dict().tag(config=True)
-
- # The deferred-import type-specific printers.
- # Map (modulename, classname) pairs to the format functions.
- deferred_printers = Dict().tag(config=True)
-
- @catch_format_error
- def __call__(self, obj):
- """Compute the format for an object."""
- if self.enabled:
- # lookup registered printer
- try:
- printer = self.lookup(obj)
- except KeyError:
- pass
- else:
- return printer(obj)
- # Finally look for special method names
- method = get_real_method(obj, self.print_method)
- if method is not None:
- return method()
- return None
- else:
- return None
-
- def __contains__(self, typ):
- """map in to lookup_by_type"""
- try:
- self.lookup_by_type(typ)
- except KeyError:
- return False
- else:
- return True
-
- def _check_return(self, r, obj):
- """Check that a return value is appropriate
-
- Return the value if so, None otherwise, warning if invalid.
- """
- if r is None or isinstance(r, self._return_type) or \
- (isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
- return r
- else:
- warnings.warn(
- "%s formatter returned invalid type %s (expected %s) for object: %s" % \
- (self.format_type, type(r), self._return_type, _safe_repr(obj)),
- FormatterWarning
- )
-
- def lookup(self, obj):
- """Look up the formatter for a given instance.
-
- Parameters
- ----------
- obj : object instance
-
- Returns
- -------
- f : callable
- The registered formatting callable for the type.
-
- Raises
- ------
- KeyError if the type has not been registered.
- """
- # look for singleton first
- obj_id = id(obj)
- if obj_id in self.singleton_printers:
- return self.singleton_printers[obj_id]
- # then lookup by type
- return self.lookup_by_type(_get_type(obj))
-
- def lookup_by_type(self, typ):
- """Look up the registered formatter for a type.
-
- Parameters
- ----------
- typ : type or '__module__.__name__' string for a type
-
- Returns
- -------
- f : callable
- The registered formatting callable for the type.
-
- Raises
- ------
- KeyError if the type has not been registered.
- """
- if isinstance(typ, str):
- typ_key = tuple(typ.rsplit('.',1))
- if typ_key not in self.deferred_printers:
- # We may have it cached in the type map. We will have to
- # iterate over all of the types to check.
- for cls in self.type_printers:
- if _mod_name_key(cls) == typ_key:
- return self.type_printers[cls]
- else:
- return self.deferred_printers[typ_key]
- else:
- for cls in pretty._get_mro(typ):
- if cls in self.type_printers or self._in_deferred_types(cls):
- return self.type_printers[cls]
-
- # If we have reached here, the lookup failed.
- raise KeyError("No registered printer for {0!r}".format(typ))
-
- def for_type(self, typ, func=None):
- """Add a format function for a given type.
-
- Parameters
- ----------
- typ : type or '__module__.__name__' string for a type
- The class of the object that will be formatted using `func`.
-
- func : callable
- A callable for computing the format data.
- `func` will be called with the object to be formatted,
- and will return the raw data in this formatter's format.
- Subclasses may use a different call signature for the
- `func` argument.
-
- If `func` is None or not specified, there will be no change,
- only returning the current value.
-
- Returns
- -------
- oldfunc : callable
- The currently registered callable.
- If you are registering a new formatter,
- this will be the previous value (to enable restoring later).
- """
- # if string given, interpret as 'pkg.module.class_name'
- if isinstance(typ, str):
- type_module, type_name = typ.rsplit('.', 1)
- return self.for_type_by_name(type_module, type_name, func)
-
- try:
- oldfunc = self.lookup_by_type(typ)
- except KeyError:
- oldfunc = None
-
- if func is not None:
- self.type_printers[typ] = func
-
- return oldfunc
-
- def for_type_by_name(self, type_module, type_name, func=None):
- """Add a format function for a type specified by the full dotted
- module and name of the type, rather than the type of the object.
-
- Parameters
- ----------
- type_module : str
- The full dotted name of the module the type is defined in, like
- ``numpy``.
-
- type_name : str
- The name of the type (the class name), like ``dtype``
-
- func : callable
- A callable for computing the format data.
- `func` will be called with the object to be formatted,
- and will return the raw data in this formatter's format.
- Subclasses may use a different call signature for the
- `func` argument.
-
- If `func` is None or unspecified, there will be no change,
- only returning the current value.
-
- Returns
- -------
- oldfunc : callable
- The currently registered callable.
- If you are registering a new formatter,
- this will be the previous value (to enable restoring later).
- """
- key = (type_module, type_name)
-
- try:
- oldfunc = self.lookup_by_type("%s.%s" % key)
- except KeyError:
- oldfunc = None
-
- if func is not None:
- self.deferred_printers[key] = func
- return oldfunc
-
- def pop(self, typ, default=_raise_key_error):
- """Pop a formatter for the given type.
-
- Parameters
- ----------
- typ : type or '__module__.__name__' string for a type
- default : object
- value to be returned if no formatter is registered for typ.
-
- Returns
- -------
- obj : object
- The last registered object for the type.
-
- Raises
- ------
- KeyError if the type is not registered and default is not specified.
- """
-
- if isinstance(typ, str):
- typ_key = tuple(typ.rsplit('.',1))
- if typ_key not in self.deferred_printers:
- # We may have it cached in the type map. We will have to
- # iterate over all of the types to check.
- for cls in self.type_printers:
- if _mod_name_key(cls) == typ_key:
- old = self.type_printers.pop(cls)
- break
- else:
- old = default
- else:
- old = self.deferred_printers.pop(typ_key)
- else:
- if typ in self.type_printers:
- old = self.type_printers.pop(typ)
- else:
- old = self.deferred_printers.pop(_mod_name_key(typ), default)
- if old is _raise_key_error:
- raise KeyError("No registered value for {0!r}".format(typ))
- return old
-
- def _in_deferred_types(self, cls):
- """
- Check if the given class is specified in the deferred type registry.
-
- Successful matches will be moved to the regular type registry for future use.
- """
- mod = getattr(cls, '__module__', None)
- name = getattr(cls, '__name__', None)
- key = (mod, name)
- if key in self.deferred_printers:
- # Move the printer over to the regular registry.
- printer = self.deferred_printers.pop(key)
- self.type_printers[cls] = printer
- return True
- return False
-
-
-class PlainTextFormatter(BaseFormatter):
- """The default pretty-printer.
-
- This uses :mod:`IPython.lib.pretty` to compute the format data of
- the object. If the object cannot be pretty printed, :func:`repr` is used.
- See the documentation of :mod:`IPython.lib.pretty` for details on
- how to write pretty printers. Here is a simple example::
-
- def dtype_pprinter(obj, p, cycle):
- if cycle:
- return p.text('dtype(...)')
- if hasattr(obj, 'fields'):
- if obj.fields is None:
- p.text(repr(obj))
- else:
- p.begin_group(7, 'dtype([')
- for i, field in enumerate(obj.descr):
- if i > 0:
- p.text(',')
- p.breakable()
- p.pretty(field)
- p.end_group(7, '])')
- """
-
- # The format type of data returned.
- format_type = Unicode('text/plain')
-
- # This subclass ignores this attribute as it always need to return
- # something.
- enabled = Bool(True).tag(config=False)
-
- max_seq_length = Integer(pretty.MAX_SEQ_LENGTH,
- help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
-
- Set to 0 to disable truncation.
- """
- ).tag(config=True)
-
- # Look for a _repr_pretty_ methods to use for pretty printing.
- print_method = ObjectName('_repr_pretty_')
-
- # Whether to pretty-print or not.
- pprint = Bool(True).tag(config=True)
-
- # Whether to be verbose or not.
- verbose = Bool(False).tag(config=True)
-
- # The maximum width.
- max_width = Integer(79).tag(config=True)
-
- # The newline character.
- newline = Unicode('\n').tag(config=True)
-
- # format-string for pprinting floats
- float_format = Unicode('%r')
- # setter for float precision, either int or direct format-string
- float_precision = CUnicode('').tag(config=True)
-
- @observe('float_precision')
- def _float_precision_changed(self, change):
- """float_precision changed, set float_format accordingly.
-
- float_precision can be set by int or str.
- This will set float_format, after interpreting input.
- If numpy has been imported, numpy print precision will also be set.
-
- integer `n` sets format to '%.nf', otherwise, format set directly.
-
- An empty string returns to defaults (repr for float, 8 for numpy).
-
- This parameter can be set via the '%precision' magic.
- """
- new = change['new']
- if '%' in new:
- # got explicit format string
- fmt = new
- try:
- fmt%3.14159
- except Exception as e:
- raise ValueError("Precision must be int or format string, not %r"%new) from e
- elif new:
- # otherwise, should be an int
- try:
- i = int(new)
- assert i >= 0
- except ValueError as e:
- raise ValueError("Precision must be int or format string, not %r"%new) from e
- except AssertionError as e:
- raise ValueError("int precision must be non-negative, not %r"%i) from e
-
- fmt = '%%.%if'%i
- if 'numpy' in sys.modules:
- # set numpy precision if it has been imported
- import numpy
- numpy.set_printoptions(precision=i)
- else:
- # default back to repr
- fmt = '%r'
- if 'numpy' in sys.modules:
- import numpy
- # numpy default is 8
- numpy.set_printoptions(precision=8)
- self.float_format = fmt
-
- # Use the default pretty printers from IPython.lib.pretty.
- @default('singleton_printers')
- def _singleton_printers_default(self):
- return pretty._singleton_pprinters.copy()
-
- @default('type_printers')
- def _type_printers_default(self):
- d = pretty._type_pprinters.copy()
- d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
- # if NumPy is used, set precision for its float64 type
- if "numpy" in sys.modules:
- import numpy
-
- d[numpy.float64] = lambda obj, p, cycle: p.text(self.float_format % obj)
- return d
-
- @default('deferred_printers')
- def _deferred_printers_default(self):
- return pretty._deferred_type_pprinters.copy()
-
- #### FormatterABC interface ####
-
- @catch_format_error
- def __call__(self, obj):
- """Compute the pretty representation of the object."""
- if not self.pprint:
- return repr(obj)
- else:
- stream = StringIO()
- printer = pretty.RepresentationPrinter(stream, self.verbose,
- self.max_width, self.newline,
- max_seq_length=self.max_seq_length,
- singleton_pprinters=self.singleton_printers,
- type_pprinters=self.type_printers,
- deferred_pprinters=self.deferred_printers)
- printer.pretty(obj)
- printer.flush()
- return stream.getvalue()
-
-
-class HTMLFormatter(BaseFormatter):
- """An HTML formatter.
-
- To define the callables that compute the HTML representation of your
- objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be a valid HTML snippet that
- could be injected into an existing DOM. It should *not* include the
- ````` or ````` tags.
- """
- format_type = Unicode('text/html')
-
- print_method = ObjectName('_repr_html_')
-
-
-class MarkdownFormatter(BaseFormatter):
- """A Markdown formatter.
-
- To define the callables that compute the Markdown representation of your
- objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be a valid Markdown.
- """
- format_type = Unicode('text/markdown')
-
- print_method = ObjectName('_repr_markdown_')
-
-class SVGFormatter(BaseFormatter):
- """An SVG formatter.
-
- To define the callables that compute the SVG representation of your
- objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be valid SVG enclosed in
- ```
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Dfe 520tx Driver For Vmware Download How to Install and Configure Fast Ethernet PCI Adapter.md b/spaces/bioriAsaeru/text-to-voice/Dfe 520tx Driver For Vmware Download How to Install and Configure Fast Ethernet PCI Adapter.md
deleted file mode 100644
index 5e739c1c0121bc935ec6fa7927a09480159b3bd1..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Dfe 520tx Driver For Vmware Download How to Install and Configure Fast Ethernet PCI Adapter.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
desktop pc games free downloadsony vegas pro 13 full version free download 32 bit free downloadmicrosoft office professional plus 2013 confirmation id crack free downloadsonos controller pc windows 7 downloadmicrosoft project 2016 free download full version product key free downloadautocad 2000 free download for windows 10windows tools free download freewindows 10 theme for windows 8.1 free download freevmware workstation 12 on windows 10 host free downloadneed for speed download pc windows 7
criminal case game download for pc latest adobe reader for windows 10 download chrome latest version download for windows 10 64 bit hp wireless network adapter driver download windows 10 intel centrino ultimate n 6300 agn driver windows 7
-
]windows 7 xbox 360 wireless controller driver free download
free bootable windows 10 download need for speed pro street pc windows 10 download download google sync for windows 10 free download windows 10 home full version download game raft indonesia pc
-
]windows 7 format cd download free download
samsung allshare download for windows 10 free download blackberry driver for windows xp free download black ops zombies free pc amd radeon hd 7520g driver download windows 10 msi a78m-e35 drivers
-
-
]gmail windows 10 download html editor download for windows 10download windows 10 usb installationdownload windows 10 pro iso 64 bit espanol megamct windows 10 downloadadobe pdf printer driver windows 10 64 bit download ]microsoft visual studio 2010 ultimate setup free download for windows 7 free download download game war thunder pchp scanjet 4670 windows 7 driver download freeaddress book software for pc free downloadmovie maker windows 7 free download full version 2014 freemicrosoft bluetooth enumerator driver windows 10 download ]control pc game free download free dropbox download for windows 10download windows 10 microphone drivervcsystemtraybest pc game download website freerndis driver windows 10 download free ]find objects games free download for pc asus turbov download windows 10corsair link usb dongle driverdownload torrent application for windows 10elan input device driver windows 10 downloadblur car game pc download ]gta 4 download for pc windows 7 highly compressed intel 82566dm 2 gigabit network connectionaura sync download windows 10 64 bitclash of clans installer free download for pcsony vegas pro free download windows 8 freefree download gta 5 pc game highly compressed ]best war games pc free download cara download driver wifi windows 10ms lifecam vx 2000best offline football games for pc free downloadamd radeon hd 6700m driversgoogle chrome download for pc windows 8.1 64 bit
-
]download hdmi driver for windows 10 lenovo ]epson l120 printer installer free download for windows 10 brain challenge games for pc free downloadacpi ven_sny&dev_5001 windows 10 driver downloaddownload doxygen for windows 10lexmark x4580 driver download windows 7 freefree download opera for windows xp sp2 free ]mastercam 2018 education free download dayz game pc free downloaddownload tap windows adapter v9 freewhere to download windows movie maker for windows 7 freebluestacks download for windows 10 32 bitcricket world cup 2018 games download pc ]microsoft office professional plus 2007 rar free download download windows 8.1 free iso free911 operator game pc downloadxmllite.dll download for windows xp freedragon ball online download free pc4 pics 1 word pc game free download ]windows 10 home 64 bit usb stick download free download download free download manager pcdeus ex human revolution free download pc full versiondownload java 64 bit windows 10 prostuffit free download windows freedownload ipadian for windows 10 ]cue club 2 pc game free download download bluestacks app for pc windows 7windows 7rc download freerealtek rtl8190 driver windows 7crash bandicoot race game pc free downloadbluestacks download for windows 10 softonic ]bijoy software free download for pc
adobe audition free download windows 10can i re download windows 10adobe pagemaker 6.0 free download for windows xp freedownload bluetooth audio driver for windows 10pci\ven_1b21&dev_1142&subsys_85bf1043&rev_00
-
download flash player for safari windows free netgear 108 mbps wireless pci adapter wg311t djvu reader free download for windows 10 64 bit canon lide 120 driver windows 10 64 bit download photo editor free download for pc windows 10
-
microsoft office enterprise 2007 x64 free download autodesk revit 2019 login free download itunes latest version for windows 10 free download free download hp scanjet 2300c driver for windows 7 free windows 10 kiosk mode printing free download microsoft office 2013 windows 10 free download free download download windows easy transfer for xp free download xp mode windows 8 free car games for pc free download free pascal windows 7 download free
-
]wordpad 2007 free download for windows 7 free
ms paint free download for windows xp free windows 7 sp2 full download free amazon pc games digital download download xbox 360 wireless controller driver windows 10 pool game download for pc windows 7
-
]adobe illustrator cc 2017 highly compressed download free download camstudio free download for windows 10download game booster 4x faster pcdownload chrome remote desktop windows 10teamviewer download for windows vista free freeultra ebook reader ]download opera mini para pc windows 7 epiphan dvi2usb solodownload windows 10 lite isogta 5 download for pc windows 7 32 bitcandy crush saga download pc windows 7download games on pc windows 10 ]change home networking connection windows 10 free download circus video game free download for pcintel iris pro p580invision free download for windows 10autocad 2007 download for pc windows 10call of duty free download for windows 10 ]microsoft office 2016 pro free download for windows 10 free download hp 1050 printer driver for windows 8 free download freedownload driver canon lbp6000b for windows 10 64 bitwindows 8 download center microsoft freefree bingo games download windows 7 freedownload docker for windows 10 home 64 bit ]microsoft office outlook 2007 out of office reply setup free download bus driver simulator 2019 free download pcicawebwrapper.msi download windows 7 freewireless n 2230 driverdesperate housewives the game full free download pcdownload pes 2018 demo pc free ]download sdk windows phone 8.1 free eclipse oxygen ide download for windows 10 64 bitgoogle hangouts download for pc windows 10hp windows 10 drivers free downloadminecraft pc windows 8 download freecsr racing 2 pc game download
-
]avakin life free download pc ]microsoft visual c++ redistributable for visual studio 2015 (64-bit) free download download removewat windows xp sp3 freedownload wmc for windows 8.1 freedownload microsoft visual studio 2008 for windows 10bus simulator for pc windows 10 downloadthx trustudio pro download windows 10 ]coreldraw graphics suite 2017 full version free download wintv hvr 950q software downloadmicrosoft excel 2010 free download for windows xp full version freefree download facebook for pc windows 8deathtrap dungeon pc game free downloadthe simpsons hit and run pc download windows 10 ]download timepass games for pc free laptop games download windows 10bootsect exe windows 10 downloadpci ven_1106&dev_3483blur download free for pc50 cent bulletproof pc game download free ]nuance pdf converter professional 7 italiano download free download acrobat reader full version free download for windows 10cpu temperature windows 10 downloadfree download movie maker windows 10 64 bitfree download pc tuneup for windows 7mkvtoolnix free download for windows free ]windows 7 professional 32 bit ethernet driver download free download adobe flash player download for windows vista free freedownload amazon prime windows 10driveclub pc game free download full versiondownload motioninjoy windows 8.1 freeamd radeon hd 6550d driver ]logic pro x on app store free download
easyworship download for windows 10download software center windows 10download lenovo energy management windows 10 freedownload firefox for windows phone freeskype download windows xp service pack 2 free
-
microsoft office 2010 repair did not complete successfully free download pci ethernet controller driver windows xp download free microsoft office 2016 product key free windows 10 free download keytweak windows 10 download google chrome latest version download for windows 10 64 bit windows 7 ultimate 64 bit iso download filehippo free download download aplikasi microsoft office word 2007 apk free download microsoft outlook 2016 not implemented windows 10 free download paperport windows xp free download free desktop gadgets for windows 7 digital clock free download free download
-
]malwarebytes anti malware free download for windows 10
fensx fast track ultra driver gujarati keyboard download for pc windows 7 dlars400 bluestacks 64 bit free download for windows 10
-
microsoft office starter windows 7 free download free download gpedit for windows 10 home hp laserjet p4014n drivers free download photoshop cs7 for windows 10 battlefield 3 free download full version for pc windows 10
-
]que diferencia hay entre windows 10 pro y pro n free download download kik messenger for windows 10realtek rtl8102e drivermicrosoft live mail download for windows 7 freelenovo y50 70 driverswindows embedded posready 7 download iso free ]download google drive in windows 10 download video graphics driver for windows 108085 simulator for windows 7 32 bit free download freecs go download for windows 10 64 bitconstruction games for pc free downloadcan case xl ]microsoft office 2016 pro plus review free download download lenovo solution center windows 10windows office word free download freetruecaller for pc windows 10 free downloadautocad 2010 free download full version with crack 64 bit for windows 7 freeintel trusted execution engine interface driver ]microsoft office professional 2003 keygen free download desperados 2 pc game free downloadbatman arkham origins blackgate pc game downloadrythem rascalflower game download pcmemtweakit ]download windows 10 tutorial adobe illustrator cs6 free download pcfontes para windows xp download gratis freemobilepre driverlatha tamil font download for windows 10free pc games download 2017 ]vmware workstation 12 install windows server 2012 r2 free download free microsoft office 2010 download for windows 10bullguard antivirus free download for windows 10clue classic pc game free downloadpc remote free download for windows 10hp ledm driver
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Foto Bugil Arandela Host Berburu Trans 7.md b/spaces/bioriAsaeru/text-to-voice/Foto Bugil Arandela Host Berburu Trans 7.md
deleted file mode 100644
index eb17d19cffd06a8e576bdecaf2f16882b31c1e4b..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Foto Bugil Arandela Host Berburu Trans 7.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Action Jackson 3 Full Movie in Blu-Ray Quality Hindi Movies with Subtitles.md b/spaces/cihyFjudo/fairness-paper-search/Download Action Jackson 3 Full Movie in Blu-Ray Quality Hindi Movies with Subtitles.md
deleted file mode 100644
index 447e647fe53b566bb4e44eec3d5fe46fc991096a..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Download Action Jackson 3 Full Movie in Blu-Ray Quality Hindi Movies with Subtitles.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Action Jackson 3 full movie download blu-ray hindi movies
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Download BEST Game Gundam Ultimate Knight Windom Xp.md b/spaces/cihyFjudo/fairness-paper-search/Download BEST Game Gundam Ultimate Knight Windom Xp.md
deleted file mode 100644
index e1a826441466088c8ce1aca2756d925842166442..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Download BEST Game Gundam Ultimate Knight Windom Xp.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
We may have multiple downloads for few games when different versions are available.Also, we try to upload manuals and extra documentation when possible. If you have additional files to contribute or have the game in another language, please contact us!
In November 10., 2016 the website of Project Windom was closed, officially announcing the end of the Windom game series. Ultimate Knight Windom XP and its expansion pack (PowerUp Kit) still available for digital download on some Japanese websites.
-
The game allows custom pilots made by players using mini audio clip and images. Audio clips are categorised and played at random depending on the situation that the player is currently in. These pilots can be used in single- or multiplayer mode. During multiplayer game-play, custom pilots from other players will be visible and audible. However, these pilots will not be downloaded and they are only available for the players that has its file contents. After the release of the PowerUp Kit expansion, the game comes with a pilot maker program to aid players in making their own animated pilots.
-
Ultimate Knight Windom XP Installer - Halo kawan hari ini saya mau share game gundam Ultimate Knight Windom XP Installer full Power up kit dulu saya juga pernah share game ini silahkan baca Ultimate Knight Windom XP, Namun game yang saya share kalia ini lebih simple cara penginstall nya dan lebih kecil ukurannya dibandingkan game yang dulu.
-
-
Ultimate Knight Windom XP merupakan game action beattle 3d yang bertemakan sepeti gundam Battle,yang dikembangkan oleph para penghobi gundam. Ultimate Knight Windom XP Installer merupakan game bersifat open source dan masih dapat ditambahkan beberapa mod mecha yang banyak di share secara gratis di moddb.com. Game ini memiliki karakter robot-robot gundam yang memilki kemampuan beatle yang sangat berfariasi dan beragam jenis.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Download the hindi movie Zamane Se Kya Darna full movie for free A guide to the best sources.md b/spaces/cihyFjudo/fairness-paper-search/Download the hindi movie Zamane Se Kya Darna full movie for free A guide to the best sources.md
deleted file mode 100644
index bc2210ec34d52b1d04caf303e6573ac921717d2f..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Download the hindi movie Zamane Se Kya Darna full movie for free A guide to the best sources.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
Welcome to MovieMora.com with the new address Bookmark the URL, because you don't have to search to another place anymore to freely watch and download the movie Zamane Se Kya Darna. Direct link for downloading or online streaming movie Zamane Se Kya Darna on your mobile phone or laptop.
-
hindi movie Zamane Se Kya Darna full movie free download
On the website HDMoviesLatest.com URL , you can download the movie Zamane Se Kya Darna for free. But, we never ask you/force you to download. It's your choice and responsibility for keeping the illegal video file to yourself.
-
download Sanjay Dutt Hindi Action Movies unlimited Movies and videos Download Here.Sanjay Dutt Hindi Action Movies Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Ee Pattanathil Bhootham Mp3 Songs Download.md b/spaces/cihyFjudo/fairness-paper-search/Ee Pattanathil Bhootham Mp3 Songs Download.md
deleted file mode 100644
index 8fe44a92423fcf08b781236c65ebe486d7c08c61..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Ee Pattanathil Bhootham Mp3 Songs Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Tamil Chaar Sahibzaade - Rise Of Banda Singh Bahad augurali immediato r The Soundtrack and Songs of the Movie.md b/spaces/cihyFjudo/fairness-paper-search/Tamil Chaar Sahibzaade - Rise Of Banda Singh Bahad augurali immediato r The Soundtrack and Songs of the Movie.md
deleted file mode 100644
index 82c7a20b039ab5439104c7a203fde1e79ee20b78..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Tamil Chaar Sahibzaade - Rise Of Banda Singh Bahad augurali immediato r The Soundtrack and Songs of the Movie.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Tamil Chaar Sahibzaade - Rise Of Banda Singh Bahad augurali immediato r
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attr/_make.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attr/_make.py
deleted file mode 100644
index d72f738eeca66ea96ec836f57720a7f5d6ec5169..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attr/_make.py
+++ /dev/null
@@ -1,2987 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-import copy
-import enum
-import linecache
-import sys
-import types
-import typing
-
-from operator import itemgetter
-
-# We need to import _compat itself in addition to the _compat members to avoid
-# having the thread-local in the globals here.
-from . import _compat, _config, setters
-from ._compat import (
- PY310,
- _AnnotationExtractor,
- get_generic_base,
- set_closure_cell,
-)
-from .exceptions import (
- DefaultAlreadySetError,
- FrozenInstanceError,
- NotAnAttrsClassError,
- UnannotatedAttributeError,
-)
-
-
-# This is used at least twice, so cache it here.
-_obj_setattr = object.__setattr__
-_init_converter_pat = "__attr_converter_%s"
-_init_factory_pat = "__attr_factory_%s"
-_classvar_prefixes = (
- "typing.ClassVar",
- "t.ClassVar",
- "ClassVar",
- "typing_extensions.ClassVar",
-)
-# we don't use a double-underscore prefix because that triggers
-# name mangling when trying to create a slot for the field
-# (when slots=True)
-_hash_cache_field = "_attrs_cached_hash"
-
-_empty_metadata_singleton = types.MappingProxyType({})
-
-# Unique object for unequivocal getattr() defaults.
-_sentinel = object()
-
-_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate)
-
-
-class _Nothing(enum.Enum):
- """
- Sentinel to indicate the lack of a value when ``None`` is ambiguous.
-
- If extending attrs, you can use ``typing.Literal[NOTHING]`` to show
- that a value may be ``NOTHING``.
-
- .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
- .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant.
- """
-
- NOTHING = enum.auto()
-
- def __repr__(self):
- return "NOTHING"
-
- def __bool__(self):
- return False
-
-
-NOTHING = _Nothing.NOTHING
-"""
-Sentinel to indicate the lack of a value when ``None`` is ambiguous.
-"""
-
-
-class _CacheHashWrapper(int):
- """
- An integer subclass that pickles / copies as None
-
- This is used for non-slots classes with ``cache_hash=True``, to avoid
- serializing a potentially (even likely) invalid hash value. Since ``None``
- is the default value for uncalculated hashes, whenever this is copied,
- the copy's value for the hash should automatically reset.
-
- See GH #613 for more details.
- """
-
- def __reduce__(self, _none_constructor=type(None), _args=()):
- return _none_constructor, _args
-
-
-def attrib(
- default=NOTHING,
- validator=None,
- repr=True,
- cmp=None,
- hash=None,
- init=True,
- metadata=None,
- type=None,
- converter=None,
- factory=None,
- kw_only=False,
- eq=None,
- order=None,
- on_setattr=None,
- alias=None,
-):
- """
- Create a new attribute on a class.
-
- .. warning::
-
- Does *not* do anything unless the class is also decorated with
- `attr.s` / `attrs.define` / et cetera!
-
- Please consider using `attrs.field` in new code (``attr.ib`` will *never*
- go away, though).
-
- :param default: A value that is used if an *attrs*-generated ``__init__``
- is used and no value is passed while instantiating or the attribute is
- excluded using ``init=False``.
-
- If the value is an instance of `attrs.Factory`, its callable will be
- used to construct a new value (useful for mutable data types like lists
- or dicts).
-
- If a default is not set (or set manually to `attrs.NOTHING`), a value
- *must* be supplied when instantiating; otherwise a `TypeError`
- will be raised.
-
- The default can also be set using decorator notation as shown below.
-
- :type default: Any value
-
- :param callable factory: Syntactic sugar for
- ``default=attr.Factory(factory)``.
-
- :param validator: `callable` that is called by *attrs*-generated
- ``__init__`` methods after the instance has been initialized. They
- receive the initialized instance, the :func:`~attrs.Attribute`, and the
- passed value.
-
- The return value is *not* inspected so the validator has to throw an
- exception itself.
-
- If a `list` is passed, its items are treated as validators and must
- all pass.
-
- Validators can be globally disabled and re-enabled using
- `attrs.validators.get_disabled` / `attrs.validators.set_disabled`.
-
- The validator can also be set using decorator notation as shown below.
-
- :type validator: `callable` or a `list` of `callable`\\ s.
-
- :param repr: Include this attribute in the generated ``__repr__``
- method. If ``True``, include the attribute; if ``False``, omit it. By
- default, the built-in ``repr()`` function is used. To override how the
- attribute value is formatted, pass a ``callable`` that takes a single
- value and returns a string. Note that the resulting string is used
- as-is, i.e. it will be used directly *instead* of calling ``repr()``
- (the default).
- :type repr: a `bool` or a `callable` to use a custom function.
-
- :param eq: If ``True`` (default), include this attribute in the
- generated ``__eq__`` and ``__ne__`` methods that check two instances
- for equality. To override how the attribute value is compared,
- pass a ``callable`` that takes a single value and returns the value
- to be compared.
- :type eq: a `bool` or a `callable`.
-
- :param order: If ``True`` (default), include this attributes in the
- generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
- To override how the attribute value is ordered,
- pass a ``callable`` that takes a single value and returns the value
- to be ordered.
- :type order: a `bool` or a `callable`.
-
- :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
- same value. Must not be mixed with *eq* or *order*.
- :type cmp: a `bool` or a `callable`.
-
- :param Optional[bool] hash: Include this attribute in the generated
- ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
- is the correct behavior according the Python spec. Setting this value
- to anything else than ``None`` is *discouraged*.
- :param bool init: Include this attribute in the generated ``__init__``
- method. It is possible to set this to ``False`` and set a default
- value. In that case this attributed is unconditionally initialized
- with the specified default value or factory.
- :param callable converter: `callable` that is called by
- *attrs*-generated ``__init__`` methods to convert attribute's value
- to the desired format. It is given the passed-in value, and the
- returned value will be used as the new value of the attribute. The
- value is converted before being passed to the validator, if any.
- :param metadata: An arbitrary mapping, to be used by third-party
- components. See `extending-metadata`.
-
- :param type: The type of the attribute. Nowadays, the preferred method to
- specify the type is using a variable annotation (see :pep:`526`).
- This argument is provided for backward compatibility.
- Regardless of the approach used, the type will be stored on
- ``Attribute.type``.
-
- Please note that *attrs* doesn't do anything with this metadata by
- itself. You can use it as part of your own code or for
- `static type checking `.
- :param kw_only: Make this attribute keyword-only in the generated
- ``__init__`` (if ``init`` is ``False``, this parameter is ignored).
- :param on_setattr: Allows to overwrite the *on_setattr* setting from
- `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
- Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this
- attribute -- regardless of the setting in `attr.s`.
- :type on_setattr: `callable`, or a list of callables, or `None`, or
- `attrs.setters.NO_OP`
- :param Optional[str] alias: Override this attribute's parameter name in the
- generated ``__init__`` method. If left `None`, default to ``name``
- stripped of leading underscores. See `private-attributes`.
-
- .. versionadded:: 15.2.0 *convert*
- .. versionadded:: 16.3.0 *metadata*
- .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
- .. versionchanged:: 17.1.0
- *hash* is ``None`` and therefore mirrors *eq* by default.
- .. versionadded:: 17.3.0 *type*
- .. deprecated:: 17.4.0 *convert*
- .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
- *convert* to achieve consistency with other noun-based arguments.
- .. versionadded:: 18.1.0
- ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
- .. versionadded:: 18.2.0 *kw_only*
- .. versionchanged:: 19.2.0 *convert* keyword argument removed.
- .. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
- .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
- .. versionadded:: 19.2.0 *eq* and *order*
- .. versionadded:: 20.1.0 *on_setattr*
- .. versionchanged:: 20.3.0 *kw_only* backported to Python 2
- .. versionchanged:: 21.1.0
- *eq*, *order*, and *cmp* also accept a custom callable
- .. versionchanged:: 21.1.0 *cmp* undeprecated
- .. versionadded:: 22.2.0 *alias*
- """
- eq, eq_key, order, order_key = _determine_attrib_eq_order(
- cmp, eq, order, True
- )
-
- if hash is not None and hash is not True and hash is not False:
- raise TypeError(
- "Invalid value for hash. Must be True, False, or None."
- )
-
- if factory is not None:
- if default is not NOTHING:
- raise ValueError(
- "The `default` and `factory` arguments are mutually "
- "exclusive."
- )
- if not callable(factory):
- raise ValueError("The `factory` argument must be a callable.")
- default = Factory(factory)
-
- if metadata is None:
- metadata = {}
-
- # Apply syntactic sugar by auto-wrapping.
- if isinstance(on_setattr, (list, tuple)):
- on_setattr = setters.pipe(*on_setattr)
-
- if validator and isinstance(validator, (list, tuple)):
- validator = and_(*validator)
-
- if converter and isinstance(converter, (list, tuple)):
- converter = pipe(*converter)
-
- return _CountingAttr(
- default=default,
- validator=validator,
- repr=repr,
- cmp=None,
- hash=hash,
- init=init,
- converter=converter,
- metadata=metadata,
- type=type,
- kw_only=kw_only,
- eq=eq,
- eq_key=eq_key,
- order=order,
- order_key=order_key,
- on_setattr=on_setattr,
- alias=alias,
- )
-
-
-def _compile_and_eval(script, globs, locs=None, filename=""):
- """
- "Exec" the script with the given global (globs) and local (locs) variables.
- """
- bytecode = compile(script, filename, "exec")
- eval(bytecode, globs, locs)
-
-
-def _make_method(name, script, filename, globs):
- """
- Create the method with the script given and return the method object.
- """
- locs = {}
-
- # In order of debuggers like PDB being able to step through the code,
- # we add a fake linecache entry.
- count = 1
- base_filename = filename
- while True:
- linecache_tuple = (
- len(script),
- None,
- script.splitlines(True),
- filename,
- )
- old_val = linecache.cache.setdefault(filename, linecache_tuple)
- if old_val == linecache_tuple:
- break
- else:
- filename = f"{base_filename[:-1]}-{count}>"
- count += 1
-
- _compile_and_eval(script, globs, locs, filename)
-
- return locs[name]
-
-
-def _make_attr_tuple_class(cls_name, attr_names):
- """
- Create a tuple subclass to hold `Attribute`s for an `attrs` class.
-
- The subclass is a bare tuple with properties for names.
-
- class MyClassAttributes(tuple):
- __slots__ = ()
- x = property(itemgetter(0))
- """
- attr_class_name = f"{cls_name}Attributes"
- attr_class_template = [
- f"class {attr_class_name}(tuple):",
- " __slots__ = ()",
- ]
- if attr_names:
- for i, attr_name in enumerate(attr_names):
- attr_class_template.append(
- f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))"
- )
- else:
- attr_class_template.append(" pass")
- globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
- _compile_and_eval("\n".join(attr_class_template), globs)
- return globs[attr_class_name]
-
-
-# Tuple class for extracted attributes from a class definition.
-# `base_attrs` is a subset of `attrs`.
-_Attributes = _make_attr_tuple_class(
- "_Attributes",
- [
- # all attributes to build dunder methods for
- "attrs",
- # attributes that have been inherited
- "base_attrs",
- # map inherited attributes to their originating classes
- "base_attrs_map",
- ],
-)
-
-
-def _is_class_var(annot):
- """
- Check whether *annot* is a typing.ClassVar.
-
- The string comparison hack is used to avoid evaluating all string
- annotations which would put attrs-based classes at a performance
- disadvantage compared to plain old classes.
- """
- annot = str(annot)
-
- # Annotation can be quoted.
- if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
- annot = annot[1:-1]
-
- return annot.startswith(_classvar_prefixes)
-
-
-def _has_own_attribute(cls, attrib_name):
- """
- Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
- """
- attr = getattr(cls, attrib_name, _sentinel)
- if attr is _sentinel:
- return False
-
- for base_cls in cls.__mro__[1:]:
- a = getattr(base_cls, attrib_name, None)
- if attr is a:
- return False
-
- return True
-
-
-def _get_annotations(cls):
- """
- Get annotations for *cls*.
- """
- if _has_own_attribute(cls, "__annotations__"):
- return cls.__annotations__
-
- return {}
-
-
-def _collect_base_attrs(cls, taken_attr_names):
- """
- Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
- """
- base_attrs = []
- base_attr_map = {} # A dictionary of base attrs to their classes.
-
- # Traverse the MRO and collect attributes.
- for base_cls in reversed(cls.__mro__[1:-1]):
- for a in getattr(base_cls, "__attrs_attrs__", []):
- if a.inherited or a.name in taken_attr_names:
- continue
-
- a = a.evolve(inherited=True)
- base_attrs.append(a)
- base_attr_map[a.name] = base_cls
-
- # For each name, only keep the freshest definition i.e. the furthest at the
- # back. base_attr_map is fine because it gets overwritten with every new
- # instance.
- filtered = []
- seen = set()
- for a in reversed(base_attrs):
- if a.name in seen:
- continue
- filtered.insert(0, a)
- seen.add(a.name)
-
- return filtered, base_attr_map
-
-
-def _collect_base_attrs_broken(cls, taken_attr_names):
- """
- Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
-
- N.B. *taken_attr_names* will be mutated.
-
- Adhere to the old incorrect behavior.
-
- Notably it collects from the front and considers inherited attributes which
- leads to the buggy behavior reported in #428.
- """
- base_attrs = []
- base_attr_map = {} # A dictionary of base attrs to their classes.
-
- # Traverse the MRO and collect attributes.
- for base_cls in cls.__mro__[1:-1]:
- for a in getattr(base_cls, "__attrs_attrs__", []):
- if a.name in taken_attr_names:
- continue
-
- a = a.evolve(inherited=True)
- taken_attr_names.add(a.name)
- base_attrs.append(a)
- base_attr_map[a.name] = base_cls
-
- return base_attrs, base_attr_map
-
-
-def _transform_attrs(
- cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
-):
- """
- Transform all `_CountingAttr`s on a class into `Attribute`s.
-
- If *these* is passed, use that and don't look for them on the class.
-
- *collect_by_mro* is True, collect them in the correct MRO order, otherwise
- use the old -- incorrect -- order. See #428.
-
- Return an `_Attributes`.
- """
- cd = cls.__dict__
- anns = _get_annotations(cls)
-
- if these is not None:
- ca_list = [(name, ca) for name, ca in these.items()]
- elif auto_attribs is True:
- ca_names = {
- name
- for name, attr in cd.items()
- if isinstance(attr, _CountingAttr)
- }
- ca_list = []
- annot_names = set()
- for attr_name, type in anns.items():
- if _is_class_var(type):
- continue
- annot_names.add(attr_name)
- a = cd.get(attr_name, NOTHING)
-
- if not isinstance(a, _CountingAttr):
- if a is NOTHING:
- a = attrib()
- else:
- a = attrib(default=a)
- ca_list.append((attr_name, a))
-
- unannotated = ca_names - annot_names
- if len(unannotated) > 0:
- raise UnannotatedAttributeError(
- "The following `attr.ib`s lack a type annotation: "
- + ", ".join(
- sorted(unannotated, key=lambda n: cd.get(n).counter)
- )
- + "."
- )
- else:
- ca_list = sorted(
- (
- (name, attr)
- for name, attr in cd.items()
- if isinstance(attr, _CountingAttr)
- ),
- key=lambda e: e[1].counter,
- )
-
- own_attrs = [
- Attribute.from_counting_attr(
- name=attr_name, ca=ca, type=anns.get(attr_name)
- )
- for attr_name, ca in ca_list
- ]
-
- if collect_by_mro:
- base_attrs, base_attr_map = _collect_base_attrs(
- cls, {a.name for a in own_attrs}
- )
- else:
- base_attrs, base_attr_map = _collect_base_attrs_broken(
- cls, {a.name for a in own_attrs}
- )
-
- if kw_only:
- own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
- base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
-
- attrs = base_attrs + own_attrs
-
- # Mandatory vs non-mandatory attr order only matters when they are part of
- # the __init__ signature and when they aren't kw_only (which are moved to
- # the end and can be mandatory or non-mandatory in any order, as they will
- # be specified as keyword args anyway). Check the order of those attrs:
- had_default = False
- for a in (a for a in attrs if a.init is not False and a.kw_only is False):
- if had_default is True and a.default is NOTHING:
- raise ValueError(
- "No mandatory attributes allowed after an attribute with a "
- f"default value or factory. Attribute in question: {a!r}"
- )
-
- if had_default is False and a.default is not NOTHING:
- had_default = True
-
- if field_transformer is not None:
- attrs = field_transformer(cls, attrs)
-
- # Resolve default field alias after executing field_transformer.
- # This allows field_transformer to differentiate between explicit vs
- # default aliases and supply their own defaults.
- attrs = [
- a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a
- for a in attrs
- ]
-
- # Create AttrsClass *after* applying the field_transformer since it may
- # add or remove attributes!
- attr_names = [a.name for a in attrs]
- AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
-
- return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map))
-
-
-def _frozen_setattrs(self, name, value):
- """
- Attached to frozen classes as __setattr__.
- """
- if isinstance(self, BaseException) and name in (
- "__cause__",
- "__context__",
- "__traceback__",
- ):
- BaseException.__setattr__(self, name, value)
- return
-
- raise FrozenInstanceError()
-
-
-def _frozen_delattrs(self, name):
- """
- Attached to frozen classes as __delattr__.
- """
- raise FrozenInstanceError()
-
-
-class _ClassBuilder:
- """
- Iteratively build *one* class.
- """
-
- __slots__ = (
- "_attr_names",
- "_attrs",
- "_base_attr_map",
- "_base_names",
- "_cache_hash",
- "_cls",
- "_cls_dict",
- "_delete_attribs",
- "_frozen",
- "_has_pre_init",
- "_has_post_init",
- "_is_exc",
- "_on_setattr",
- "_slots",
- "_weakref_slot",
- "_wrote_own_setattr",
- "_has_custom_setattr",
- )
-
- def __init__(
- self,
- cls,
- these,
- slots,
- frozen,
- weakref_slot,
- getstate_setstate,
- auto_attribs,
- kw_only,
- cache_hash,
- is_exc,
- collect_by_mro,
- on_setattr,
- has_custom_setattr,
- field_transformer,
- ):
- attrs, base_attrs, base_map = _transform_attrs(
- cls,
- these,
- auto_attribs,
- kw_only,
- collect_by_mro,
- field_transformer,
- )
-
- self._cls = cls
- self._cls_dict = dict(cls.__dict__) if slots else {}
- self._attrs = attrs
- self._base_names = {a.name for a in base_attrs}
- self._base_attr_map = base_map
- self._attr_names = tuple(a.name for a in attrs)
- self._slots = slots
- self._frozen = frozen
- self._weakref_slot = weakref_slot
- self._cache_hash = cache_hash
- self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
- self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
- self._delete_attribs = not bool(these)
- self._is_exc = is_exc
- self._on_setattr = on_setattr
-
- self._has_custom_setattr = has_custom_setattr
- self._wrote_own_setattr = False
-
- self._cls_dict["__attrs_attrs__"] = self._attrs
-
- if frozen:
- self._cls_dict["__setattr__"] = _frozen_setattrs
- self._cls_dict["__delattr__"] = _frozen_delattrs
-
- self._wrote_own_setattr = True
- elif on_setattr in (
- _ng_default_on_setattr,
- setters.validate,
- setters.convert,
- ):
- has_validator = has_converter = False
- for a in attrs:
- if a.validator is not None:
- has_validator = True
- if a.converter is not None:
- has_converter = True
-
- if has_validator and has_converter:
- break
- if (
- (
- on_setattr == _ng_default_on_setattr
- and not (has_validator or has_converter)
- )
- or (on_setattr == setters.validate and not has_validator)
- or (on_setattr == setters.convert and not has_converter)
- ):
- # If class-level on_setattr is set to convert + validate, but
- # there's no field to convert or validate, pretend like there's
- # no on_setattr.
- self._on_setattr = None
-
- if getstate_setstate:
- (
- self._cls_dict["__getstate__"],
- self._cls_dict["__setstate__"],
- ) = self._make_getstate_setstate()
-
- def __repr__(self):
- return f"<_ClassBuilder(cls={self._cls.__name__})>"
-
- if PY310:
- import abc
-
- def build_class(self):
- """
- Finalize class based on the accumulated configuration.
-
- Builder cannot be used after calling this method.
- """
- if self._slots is True:
- return self._create_slots_class()
-
- return self.abc.update_abstractmethods(
- self._patch_original_class()
- )
-
- else:
-
- def build_class(self):
- """
- Finalize class based on the accumulated configuration.
-
- Builder cannot be used after calling this method.
- """
- if self._slots is True:
- return self._create_slots_class()
-
- return self._patch_original_class()
-
- def _patch_original_class(self):
- """
- Apply accumulated methods and return the class.
- """
- cls = self._cls
- base_names = self._base_names
-
- # Clean class of attribute definitions (`attr.ib()`s).
- if self._delete_attribs:
- for name in self._attr_names:
- if (
- name not in base_names
- and getattr(cls, name, _sentinel) is not _sentinel
- ):
- try:
- delattr(cls, name)
- except AttributeError:
- # This can happen if a base class defines a class
- # variable and we want to set an attribute with the
- # same name by using only a type annotation.
- pass
-
- # Attach our dunder methods.
- for name, value in self._cls_dict.items():
- setattr(cls, name, value)
-
- # If we've inherited an attrs __setattr__ and don't write our own,
- # reset it to object's.
- if not self._wrote_own_setattr and getattr(
- cls, "__attrs_own_setattr__", False
- ):
- cls.__attrs_own_setattr__ = False
-
- if not self._has_custom_setattr:
- cls.__setattr__ = _obj_setattr
-
- return cls
-
- def _create_slots_class(self):
- """
- Build and return a new class with a `__slots__` attribute.
- """
- cd = {
- k: v
- for k, v in self._cls_dict.items()
- if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
- }
-
- # If our class doesn't have its own implementation of __setattr__
- # (either from the user or by us), check the bases, if one of them has
- # an attrs-made __setattr__, that needs to be reset. We don't walk the
- # MRO because we only care about our immediate base classes.
- # XXX: This can be confused by subclassing a slotted attrs class with
- # XXX: a non-attrs class and subclass the resulting class with an attrs
- # XXX: class. See `test_slotted_confused` for details. For now that's
- # XXX: OK with us.
- if not self._wrote_own_setattr:
- cd["__attrs_own_setattr__"] = False
-
- if not self._has_custom_setattr:
- for base_cls in self._cls.__bases__:
- if base_cls.__dict__.get("__attrs_own_setattr__", False):
- cd["__setattr__"] = _obj_setattr
- break
-
- # Traverse the MRO to collect existing slots
- # and check for an existing __weakref__.
- existing_slots = dict()
- weakref_inherited = False
- for base_cls in self._cls.__mro__[1:-1]:
- if base_cls.__dict__.get("__weakref__", None) is not None:
- weakref_inherited = True
- existing_slots.update(
- {
- name: getattr(base_cls, name)
- for name in getattr(base_cls, "__slots__", [])
- }
- )
-
- base_names = set(self._base_names)
-
- names = self._attr_names
- if (
- self._weakref_slot
- and "__weakref__" not in getattr(self._cls, "__slots__", ())
- and "__weakref__" not in names
- and not weakref_inherited
- ):
- names += ("__weakref__",)
-
- # We only add the names of attributes that aren't inherited.
- # Setting __slots__ to inherited attributes wastes memory.
- slot_names = [name for name in names if name not in base_names]
- # There are slots for attributes from current class
- # that are defined in parent classes.
- # As their descriptors may be overridden by a child class,
- # we collect them here and update the class dict
- reused_slots = {
- slot: slot_descriptor
- for slot, slot_descriptor in existing_slots.items()
- if slot in slot_names
- }
- slot_names = [name for name in slot_names if name not in reused_slots]
- cd.update(reused_slots)
- if self._cache_hash:
- slot_names.append(_hash_cache_field)
- cd["__slots__"] = tuple(slot_names)
-
- cd["__qualname__"] = self._cls.__qualname__
-
- # Create new class based on old class and our methods.
- cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
-
- # The following is a fix for
- # .
- # If a method mentions `__class__` or uses the no-arg super(), the
- # compiler will bake a reference to the class in the method itself
- # as `method.__closure__`. Since we replace the class with a
- # clone, we rewrite these references so it keeps working.
- for item in cls.__dict__.values():
- if isinstance(item, (classmethod, staticmethod)):
- # Class- and staticmethods hide their functions inside.
- # These might need to be rewritten as well.
- closure_cells = getattr(item.__func__, "__closure__", None)
- elif isinstance(item, property):
- # Workaround for property `super()` shortcut (PY3-only).
- # There is no universal way for other descriptors.
- closure_cells = getattr(item.fget, "__closure__", None)
- else:
- closure_cells = getattr(item, "__closure__", None)
-
- if not closure_cells: # Catch None or the empty list.
- continue
- for cell in closure_cells:
- try:
- match = cell.cell_contents is self._cls
- except ValueError: # ValueError: Cell is empty
- pass
- else:
- if match:
- set_closure_cell(cell, cls)
-
- return cls
-
- def add_repr(self, ns):
- self._cls_dict["__repr__"] = self._add_method_dunders(
- _make_repr(self._attrs, ns, self._cls)
- )
- return self
-
- def add_str(self):
- repr = self._cls_dict.get("__repr__")
- if repr is None:
- raise ValueError(
- "__str__ can only be generated if a __repr__ exists."
- )
-
- def __str__(self):
- return self.__repr__()
-
- self._cls_dict["__str__"] = self._add_method_dunders(__str__)
- return self
-
- def _make_getstate_setstate(self):
- """
- Create custom __setstate__ and __getstate__ methods.
- """
- # __weakref__ is not writable.
- state_attr_names = tuple(
- an for an in self._attr_names if an != "__weakref__"
- )
-
- def slots_getstate(self):
- """
- Automatically created by attrs.
- """
- return {name: getattr(self, name) for name in state_attr_names}
-
- hash_caching_enabled = self._cache_hash
-
- def slots_setstate(self, state):
- """
- Automatically created by attrs.
- """
- __bound_setattr = _obj_setattr.__get__(self)
- if isinstance(state, tuple):
- # Backward compatibility with attrs instances pickled with
- # attrs versions before v22.2.0 which stored tuples.
- for name, value in zip(state_attr_names, state):
- __bound_setattr(name, value)
- else:
- for name in state_attr_names:
- if name in state:
- __bound_setattr(name, state[name])
-
- # The hash code cache is not included when the object is
- # serialized, but it still needs to be initialized to None to
- # indicate that the first call to __hash__ should be a cache
- # miss.
- if hash_caching_enabled:
- __bound_setattr(_hash_cache_field, None)
-
- return slots_getstate, slots_setstate
-
- def make_unhashable(self):
- self._cls_dict["__hash__"] = None
- return self
-
- def add_hash(self):
- self._cls_dict["__hash__"] = self._add_method_dunders(
- _make_hash(
- self._cls,
- self._attrs,
- frozen=self._frozen,
- cache_hash=self._cache_hash,
- )
- )
-
- return self
-
- def add_init(self):
- self._cls_dict["__init__"] = self._add_method_dunders(
- _make_init(
- self._cls,
- self._attrs,
- self._has_pre_init,
- self._has_post_init,
- self._frozen,
- self._slots,
- self._cache_hash,
- self._base_attr_map,
- self._is_exc,
- self._on_setattr,
- attrs_init=False,
- )
- )
-
- return self
-
- def add_match_args(self):
- self._cls_dict["__match_args__"] = tuple(
- field.name
- for field in self._attrs
- if field.init and not field.kw_only
- )
-
- def add_attrs_init(self):
- self._cls_dict["__attrs_init__"] = self._add_method_dunders(
- _make_init(
- self._cls,
- self._attrs,
- self._has_pre_init,
- self._has_post_init,
- self._frozen,
- self._slots,
- self._cache_hash,
- self._base_attr_map,
- self._is_exc,
- self._on_setattr,
- attrs_init=True,
- )
- )
-
- return self
-
- def add_eq(self):
- cd = self._cls_dict
-
- cd["__eq__"] = self._add_method_dunders(
- _make_eq(self._cls, self._attrs)
- )
- cd["__ne__"] = self._add_method_dunders(_make_ne())
-
- return self
-
- def add_order(self):
- cd = self._cls_dict
-
- cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
- self._add_method_dunders(meth)
- for meth in _make_order(self._cls, self._attrs)
- )
-
- return self
-
- def add_setattr(self):
- if self._frozen:
- return self
-
- sa_attrs = {}
- for a in self._attrs:
- on_setattr = a.on_setattr or self._on_setattr
- if on_setattr and on_setattr is not setters.NO_OP:
- sa_attrs[a.name] = a, on_setattr
-
- if not sa_attrs:
- return self
-
- if self._has_custom_setattr:
- # We need to write a __setattr__ but there already is one!
- raise ValueError(
- "Can't combine custom __setattr__ with on_setattr hooks."
- )
-
- # docstring comes from _add_method_dunders
- def __setattr__(self, name, val):
- try:
- a, hook = sa_attrs[name]
- except KeyError:
- nval = val
- else:
- nval = hook(self, a, val)
-
- _obj_setattr(self, name, nval)
-
- self._cls_dict["__attrs_own_setattr__"] = True
- self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
- self._wrote_own_setattr = True
-
- return self
-
- def _add_method_dunders(self, method):
- """
- Add __module__ and __qualname__ to a *method* if possible.
- """
- try:
- method.__module__ = self._cls.__module__
- except AttributeError:
- pass
-
- try:
- method.__qualname__ = ".".join(
- (self._cls.__qualname__, method.__name__)
- )
- except AttributeError:
- pass
-
- try:
- method.__doc__ = (
- "Method generated by attrs for class "
- f"{self._cls.__qualname__}."
- )
- except AttributeError:
- pass
-
- return method
-
-
-def _determine_attrs_eq_order(cmp, eq, order, default_eq):
- """
- Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
- values of eq and order. If *eq* is None, set it to *default_eq*.
- """
- if cmp is not None and any((eq is not None, order is not None)):
- raise ValueError("Don't mix `cmp` with `eq' and `order`.")
-
- # cmp takes precedence due to bw-compatibility.
- if cmp is not None:
- return cmp, cmp
-
- # If left None, equality is set to the specified default and ordering
- # mirrors equality.
- if eq is None:
- eq = default_eq
-
- if order is None:
- order = eq
-
- if eq is False and order is True:
- raise ValueError("`order` can only be True if `eq` is True too.")
-
- return eq, order
-
-
-def _determine_attrib_eq_order(cmp, eq, order, default_eq):
- """
- Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
- values of eq and order. If *eq* is None, set it to *default_eq*.
- """
- if cmp is not None and any((eq is not None, order is not None)):
- raise ValueError("Don't mix `cmp` with `eq' and `order`.")
-
- def decide_callable_or_boolean(value):
- """
- Decide whether a key function is used.
- """
- if callable(value):
- value, key = True, value
- else:
- key = None
- return value, key
-
- # cmp takes precedence due to bw-compatibility.
- if cmp is not None:
- cmp, cmp_key = decide_callable_or_boolean(cmp)
- return cmp, cmp_key, cmp, cmp_key
-
- # If left None, equality is set to the specified default and ordering
- # mirrors equality.
- if eq is None:
- eq, eq_key = default_eq, None
- else:
- eq, eq_key = decide_callable_or_boolean(eq)
-
- if order is None:
- order, order_key = eq, eq_key
- else:
- order, order_key = decide_callable_or_boolean(order)
-
- if eq is False and order is True:
- raise ValueError("`order` can only be True if `eq` is True too.")
-
- return eq, eq_key, order, order_key
-
-
-def _determine_whether_to_implement(
- cls, flag, auto_detect, dunders, default=True
-):
- """
- Check whether we should implement a set of methods for *cls*.
-
- *flag* is the argument passed into @attr.s like 'init', *auto_detect* the
- same as passed into @attr.s and *dunders* is a tuple of attribute names
- whose presence signal that the user has implemented it themselves.
-
- Return *default* if no reason for either for or against is found.
- """
- if flag is True or flag is False:
- return flag
-
- if flag is None and auto_detect is False:
- return default
-
- # Logically, flag is None and auto_detect is True here.
- for dunder in dunders:
- if _has_own_attribute(cls, dunder):
- return False
-
- return default
-
-
-def attrs(
- maybe_cls=None,
- these=None,
- repr_ns=None,
- repr=None,
- cmp=None,
- hash=None,
- init=None,
- slots=False,
- frozen=False,
- weakref_slot=True,
- str=False,
- auto_attribs=False,
- kw_only=False,
- cache_hash=False,
- auto_exc=False,
- eq=None,
- order=None,
- auto_detect=False,
- collect_by_mro=False,
- getstate_setstate=None,
- on_setattr=None,
- field_transformer=None,
- match_args=True,
- unsafe_hash=None,
-):
- r"""
- A class decorator that adds :term:`dunder methods` according to the
- specified attributes using `attr.ib` or the *these* argument.
-
- Please consider using `attrs.define` / `attrs.frozen` in new code
- (``attr.s`` will *never* go away, though).
-
- :param these: A dictionary of name to `attr.ib` mappings. This is
- useful to avoid the definition of your attributes within the class body
- because you can't (e.g. if you want to add ``__repr__`` methods to
- Django models) or don't want to.
-
- If *these* is not ``None``, *attrs* will *not* search the class body
- for attributes and will *not* remove any attributes from it.
-
- The order is deduced from the order of the attributes inside *these*.
-
- :type these: `dict` of `str` to `attr.ib`
-
- :param str repr_ns: When using nested classes, there's no way in Python 2
- to automatically detect that. Therefore it's possible to set the
- namespace explicitly for a more meaningful ``repr`` output.
- :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
- *order*, and *hash* arguments explicitly, assume they are set to
- ``True`` **unless any** of the involved methods for one of the
- arguments is implemented in the *current* class (i.e. it is *not*
- inherited from some base class).
-
- So for example by implementing ``__eq__`` on a class yourself,
- *attrs* will deduce ``eq=False`` and will create *neither*
- ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
- ``__ne__`` by default, so it *should* be enough to only implement
- ``__eq__`` in most cases).
-
- .. warning::
-
- If you prevent *attrs* from creating the ordering methods for you
- (``order=False``, e.g. by implementing ``__le__``), it becomes
- *your* responsibility to make sure its ordering is sound. The best
- way is to use the `functools.total_ordering` decorator.
-
-
- Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
- *cmp*, or *hash* overrides whatever *auto_detect* would determine.
-
- :param bool repr: Create a ``__repr__`` method with a human readable
- representation of *attrs* attributes..
- :param bool str: Create a ``__str__`` method that is identical to
- ``__repr__``. This is usually not necessary except for
- `Exception`\ s.
- :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
- and ``__ne__`` methods that check two instances for equality.
-
- They compare the instances as if they were tuples of their *attrs*
- attributes if and only if the types of both classes are *identical*!
- :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
- ``__gt__``, and ``__ge__`` methods that behave like *eq* above and
- allow instances to be ordered. If ``None`` (default) mirror value of
- *eq*.
- :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
- and *order* to the same value. Must not be mixed with *eq* or *order*.
- :param Optional[bool] unsafe_hash: If ``None`` (default), the ``__hash__``
- method is generated according how *eq* and *frozen* are set.
-
- 1. If *both* are True, *attrs* will generate a ``__hash__`` for you.
- 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
- None, marking it unhashable (which it is).
- 3. If *eq* is False, ``__hash__`` will be left untouched meaning the
- ``__hash__`` method of the base class will be used (if base class is
- ``object``, this means it will fall back to id-based hashing.).
-
- Although not recommended, you can decide for yourself and force
- *attrs* to create one (e.g. if the class is immutable even though you
- didn't freeze it programmatically) by passing ``True`` or not. Both of
- these cases are rather special and should be used carefully.
-
- See our documentation on `hashing`, Python's documentation on
- `object.__hash__`, and the `GitHub issue that led to the default \
- behavior `_ for more
- details.
- :param Optional[bool] hash: Alias for *unsafe_hash*. *unsafe_hash* takes
- precedence.
- :param bool init: Create a ``__init__`` method that initializes the
- *attrs* attributes. Leading underscores are stripped for the argument
- name. If a ``__attrs_pre_init__`` method exists on the class, it will
- be called before the class is initialized. If a ``__attrs_post_init__``
- method exists on the class, it will be called after the class is fully
- initialized.
-
- If ``init`` is ``False``, an ``__attrs_init__`` method will be
- injected instead. This allows you to define a custom ``__init__``
- method that can do pre-init work such as ``super().__init__()``,
- and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
- :param bool slots: Create a :term:`slotted class ` that's
- more memory-efficient. Slotted classes are generally superior to the
- default dict classes, but have some gotchas you should know about, so
- we encourage you to read the :term:`glossary entry `.
- :param bool frozen: Make instances immutable after initialization. If
- someone attempts to modify a frozen instance,
- `attrs.exceptions.FrozenInstanceError` is raised.
-
- .. note::
-
- 1. This is achieved by installing a custom ``__setattr__`` method
- on your class, so you can't implement your own.
-
- 2. True immutability is impossible in Python.
-
- 3. This *does* have a minor a runtime performance `impact
- ` when initializing new instances. In other words:
- ``__init__`` is slightly slower with ``frozen=True``.
-
- 4. If a class is frozen, you cannot modify ``self`` in
- ``__attrs_post_init__`` or a self-written ``__init__``. You can
- circumvent that limitation by using
- ``object.__setattr__(self, "attribute_name", value)``.
-
- 5. Subclasses of a frozen class are frozen too.
-
- :param bool weakref_slot: Make instances weak-referenceable. This has no
- effect unless ``slots`` is also enabled.
- :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated
- attributes from the class body.
-
- In this case, you **must** annotate every field. If *attrs*
- encounters a field that is set to an `attr.ib` but lacks a type
- annotation, an `attr.exceptions.UnannotatedAttributeError` is
- raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
- want to set a type.
-
- If you assign a value to those attributes (e.g. ``x: int = 42``), that
- value becomes the default value like if it were passed using
- ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also
- works as expected in most cases (see warning below).
-
- Attributes annotated as `typing.ClassVar`, and attributes that are
- neither annotated nor set to an `attr.ib` are **ignored**.
-
- .. warning::
- For features that use the attribute name to create decorators (e.g.
- :ref:`validators `), you still *must* assign `attr.ib`
- to them. Otherwise Python will either not find the name or try to
- use the default value to call e.g. ``validator`` on it.
-
- These errors can be quite confusing and probably the most common bug
- report on our bug tracker.
-
- :param bool kw_only: Make all attributes keyword-only
- in the generated ``__init__`` (if ``init`` is ``False``, this
- parameter is ignored).
- :param bool cache_hash: Ensure that the object's hash code is computed
- only once and stored on the object. If this is set to ``True``,
- hashing must be either explicitly or implicitly enabled for this
- class. If the hash code is cached, avoid any reassignments of
- fields involved in hash code computation or mutations of the objects
- those fields point to after object creation. If such changes occur,
- the behavior of the object's hash code is undefined.
- :param bool auto_exc: If the class subclasses `BaseException`
- (which implicitly includes any subclass of any exception), the
- following happens to behave like a well-behaved Python exceptions
- class:
-
- - the values for *eq*, *order*, and *hash* are ignored and the
- instances compare and hash by the instance's ids (N.B. *attrs* will
- *not* remove existing implementations of ``__hash__`` or the equality
- methods. It just won't add own ones.),
- - all attributes that are either passed into ``__init__`` or have a
- default value are additionally available as a tuple in the ``args``
- attribute,
- - the value of *str* is ignored leaving ``__str__`` to base classes.
- :param bool collect_by_mro: Setting this to `True` fixes the way *attrs*
- collects attributes from base classes. The default behavior is
- incorrect in certain cases of multiple inheritance. It should be on by
- default but is kept off for backward-compatibility.
-
- See issue `#428 `_ for
- more details.
-
- :param Optional[bool] getstate_setstate:
- .. note::
- This is usually only interesting for slotted classes and you should
- probably just set *auto_detect* to `True`.
-
- If `True`, ``__getstate__`` and
- ``__setstate__`` are generated and attached to the class. This is
- necessary for slotted classes to be pickleable. If left `None`, it's
- `True` by default for slotted classes and ``False`` for dict classes.
-
- If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
- and **either** ``__getstate__`` or ``__setstate__`` is detected directly
- on the class (i.e. not inherited), it is set to `False` (this is usually
- what you want).
-
- :param on_setattr: A callable that is run whenever the user attempts to set
- an attribute (either by assignment like ``i.x = 42`` or by using
- `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
- as validators: the instance, the attribute that is being modified, and
- the new value.
-
- If no exception is raised, the attribute is set to the return value of
- the callable.
-
- If a list of callables is passed, they're automatically wrapped in an
- `attrs.setters.pipe`.
- :type on_setattr: `callable`, or a list of callables, or `None`, or
- `attrs.setters.NO_OP`
-
- :param Optional[callable] field_transformer:
- A function that is called with the original class object and all
- fields right before *attrs* finalizes the class. You can use
- this, e.g., to automatically add converters or validators to
- fields based on their types. See `transform-fields` for more details.
-
- :param bool match_args:
- If `True` (default), set ``__match_args__`` on the class to support
- :pep:`634` (Structural Pattern Matching). It is a tuple of all
- non-keyword-only ``__init__`` parameter names on Python 3.10 and later.
- Ignored on older Python versions.
-
- .. versionadded:: 16.0.0 *slots*
- .. versionadded:: 16.1.0 *frozen*
- .. versionadded:: 16.3.0 *str*
- .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
- .. versionchanged:: 17.1.0
- *hash* supports ``None`` as value which is also the default now.
- .. versionadded:: 17.3.0 *auto_attribs*
- .. versionchanged:: 18.1.0
- If *these* is passed, no attributes are deleted from the class body.
- .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
- .. versionadded:: 18.2.0 *weakref_slot*
- .. deprecated:: 18.2.0
- ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
- `DeprecationWarning` if the classes compared are subclasses of
- each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
- to each other.
- .. versionchanged:: 19.2.0
- ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
- subclasses comparable anymore.
- .. versionadded:: 18.2.0 *kw_only*
- .. versionadded:: 18.2.0 *cache_hash*
- .. versionadded:: 19.1.0 *auto_exc*
- .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
- .. versionadded:: 19.2.0 *eq* and *order*
- .. versionadded:: 20.1.0 *auto_detect*
- .. versionadded:: 20.1.0 *collect_by_mro*
- .. versionadded:: 20.1.0 *getstate_setstate*
- .. versionadded:: 20.1.0 *on_setattr*
- .. versionadded:: 20.3.0 *field_transformer*
- .. versionchanged:: 21.1.0
- ``init=False`` injects ``__attrs_init__``
- .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
- .. versionchanged:: 21.1.0 *cmp* undeprecated
- .. versionadded:: 21.3.0 *match_args*
- .. versionadded:: 22.2.0
- *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance).
- """
- eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
-
- # unsafe_hash takes precedence due to PEP 681.
- if unsafe_hash is not None:
- hash = unsafe_hash
-
- if isinstance(on_setattr, (list, tuple)):
- on_setattr = setters.pipe(*on_setattr)
-
- def wrap(cls):
- is_frozen = frozen or _has_frozen_base_class(cls)
- is_exc = auto_exc is True and issubclass(cls, BaseException)
- has_own_setattr = auto_detect and _has_own_attribute(
- cls, "__setattr__"
- )
-
- if has_own_setattr and is_frozen:
- raise ValueError("Can't freeze a class with a custom __setattr__.")
-
- builder = _ClassBuilder(
- cls,
- these,
- slots,
- is_frozen,
- weakref_slot,
- _determine_whether_to_implement(
- cls,
- getstate_setstate,
- auto_detect,
- ("__getstate__", "__setstate__"),
- default=slots,
- ),
- auto_attribs,
- kw_only,
- cache_hash,
- is_exc,
- collect_by_mro,
- on_setattr,
- has_own_setattr,
- field_transformer,
- )
- if _determine_whether_to_implement(
- cls, repr, auto_detect, ("__repr__",)
- ):
- builder.add_repr(repr_ns)
- if str is True:
- builder.add_str()
-
- eq = _determine_whether_to_implement(
- cls, eq_, auto_detect, ("__eq__", "__ne__")
- )
- if not is_exc and eq is True:
- builder.add_eq()
- if not is_exc and _determine_whether_to_implement(
- cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
- ):
- builder.add_order()
-
- builder.add_setattr()
-
- nonlocal hash
- if (
- hash is None
- and auto_detect is True
- and _has_own_attribute(cls, "__hash__")
- ):
- hash = False
-
- if hash is not True and hash is not False and hash is not None:
- # Can't use `hash in` because 1 == True for example.
- raise TypeError(
- "Invalid value for hash. Must be True, False, or None."
- )
- elif hash is False or (hash is None and eq is False) or is_exc:
- # Don't do anything. Should fall back to __object__'s __hash__
- # which is by id.
- if cache_hash:
- raise TypeError(
- "Invalid value for cache_hash. To use hash caching,"
- " hashing must be either explicitly or implicitly "
- "enabled."
- )
- elif hash is True or (
- hash is None and eq is True and is_frozen is True
- ):
- # Build a __hash__ if told so, or if it's safe.
- builder.add_hash()
- else:
- # Raise TypeError on attempts to hash.
- if cache_hash:
- raise TypeError(
- "Invalid value for cache_hash. To use hash caching,"
- " hashing must be either explicitly or implicitly "
- "enabled."
- )
- builder.make_unhashable()
-
- if _determine_whether_to_implement(
- cls, init, auto_detect, ("__init__",)
- ):
- builder.add_init()
- else:
- builder.add_attrs_init()
- if cache_hash:
- raise TypeError(
- "Invalid value for cache_hash. To use hash caching,"
- " init must be True."
- )
-
- if (
- PY310
- and match_args
- and not _has_own_attribute(cls, "__match_args__")
- ):
- builder.add_match_args()
-
- return builder.build_class()
-
- # maybe_cls's type depends on the usage of the decorator. It's a class
- # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
- if maybe_cls is None:
- return wrap
- else:
- return wrap(maybe_cls)
-
-
-_attrs = attrs
-"""
-Internal alias so we can use it in functions that take an argument called
-*attrs*.
-"""
-
-
-def _has_frozen_base_class(cls):
- """
- Check whether *cls* has a frozen ancestor by looking at its
- __setattr__.
- """
- return cls.__setattr__ is _frozen_setattrs
-
-
-def _generate_unique_filename(cls, func_name):
- """
- Create a "filename" suitable for a function being generated.
- """
- return (
- f""
- )
-
-
-def _make_hash(cls, attrs, frozen, cache_hash):
- attrs = tuple(
- a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
- )
-
- tab = " "
-
- unique_filename = _generate_unique_filename(cls, "hash")
- type_hash = hash(unique_filename)
- # If eq is custom generated, we need to include the functions in globs
- globs = {}
-
- hash_def = "def __hash__(self"
- hash_func = "hash(("
- closing_braces = "))"
- if not cache_hash:
- hash_def += "):"
- else:
- hash_def += ", *"
-
- hash_def += (
- ", _cache_wrapper="
- + "__import__('attr._make')._make._CacheHashWrapper):"
- )
- hash_func = "_cache_wrapper(" + hash_func
- closing_braces += ")"
-
- method_lines = [hash_def]
-
- def append_hash_computation_lines(prefix, indent):
- """
- Generate the code for actually computing the hash code.
- Below this will either be returned directly or used to compute
- a value which is then cached, depending on the value of cache_hash
- """
-
- method_lines.extend(
- [
- indent + prefix + hash_func,
- indent + f" {type_hash},",
- ]
- )
-
- for a in attrs:
- if a.eq_key:
- cmp_name = f"_{a.name}_key"
- globs[cmp_name] = a.eq_key
- method_lines.append(
- indent + f" {cmp_name}(self.{a.name}),"
- )
- else:
- method_lines.append(indent + f" self.{a.name},")
-
- method_lines.append(indent + " " + closing_braces)
-
- if cache_hash:
- method_lines.append(tab + f"if self.{_hash_cache_field} is None:")
- if frozen:
- append_hash_computation_lines(
- f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2
- )
- method_lines.append(tab * 2 + ")") # close __setattr__
- else:
- append_hash_computation_lines(
- f"self.{_hash_cache_field} = ", tab * 2
- )
- method_lines.append(tab + f"return self.{_hash_cache_field}")
- else:
- append_hash_computation_lines("return ", tab)
-
- script = "\n".join(method_lines)
- return _make_method("__hash__", script, unique_filename, globs)
-
-
-def _add_hash(cls, attrs):
- """
- Add a hash method to *cls*.
- """
- cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
- return cls
-
-
-def _make_ne():
- """
- Create __ne__ method.
- """
-
- def __ne__(self, other):
- """
- Check equality and either forward a NotImplemented or
- return the result negated.
- """
- result = self.__eq__(other)
- if result is NotImplemented:
- return NotImplemented
-
- return not result
-
- return __ne__
-
-
-def _make_eq(cls, attrs):
- """
- Create __eq__ method for *cls* with *attrs*.
- """
- attrs = [a for a in attrs if a.eq]
-
- unique_filename = _generate_unique_filename(cls, "eq")
- lines = [
- "def __eq__(self, other):",
- " if other.__class__ is not self.__class__:",
- " return NotImplemented",
- ]
-
- # We can't just do a big self.x = other.x and... clause due to
- # irregularities like nan == nan is false but (nan,) == (nan,) is true.
- globs = {}
- if attrs:
- lines.append(" return (")
- others = [" ) == ("]
- for a in attrs:
- if a.eq_key:
- cmp_name = f"_{a.name}_key"
- # Add the key function to the global namespace
- # of the evaluated function.
- globs[cmp_name] = a.eq_key
- lines.append(f" {cmp_name}(self.{a.name}),")
- others.append(f" {cmp_name}(other.{a.name}),")
- else:
- lines.append(f" self.{a.name},")
- others.append(f" other.{a.name},")
-
- lines += others + [" )"]
- else:
- lines.append(" return True")
-
- script = "\n".join(lines)
-
- return _make_method("__eq__", script, unique_filename, globs)
-
-
-def _make_order(cls, attrs):
- """
- Create ordering methods for *cls* with *attrs*.
- """
- attrs = [a for a in attrs if a.order]
-
- def attrs_to_tuple(obj):
- """
- Save us some typing.
- """
- return tuple(
- key(value) if key else value
- for value, key in (
- (getattr(obj, a.name), a.order_key) for a in attrs
- )
- )
-
- def __lt__(self, other):
- """
- Automatically created by attrs.
- """
- if other.__class__ is self.__class__:
- return attrs_to_tuple(self) < attrs_to_tuple(other)
-
- return NotImplemented
-
- def __le__(self, other):
- """
- Automatically created by attrs.
- """
- if other.__class__ is self.__class__:
- return attrs_to_tuple(self) <= attrs_to_tuple(other)
-
- return NotImplemented
-
- def __gt__(self, other):
- """
- Automatically created by attrs.
- """
- if other.__class__ is self.__class__:
- return attrs_to_tuple(self) > attrs_to_tuple(other)
-
- return NotImplemented
-
- def __ge__(self, other):
- """
- Automatically created by attrs.
- """
- if other.__class__ is self.__class__:
- return attrs_to_tuple(self) >= attrs_to_tuple(other)
-
- return NotImplemented
-
- return __lt__, __le__, __gt__, __ge__
-
-
-def _add_eq(cls, attrs=None):
- """
- Add equality methods to *cls* with *attrs*.
- """
- if attrs is None:
- attrs = cls.__attrs_attrs__
-
- cls.__eq__ = _make_eq(cls, attrs)
- cls.__ne__ = _make_ne()
-
- return cls
-
-
-def _make_repr(attrs, ns, cls):
- unique_filename = _generate_unique_filename(cls, "repr")
- # Figure out which attributes to include, and which function to use to
- # format them. The a.repr value can be either bool or a custom
- # callable.
- attr_names_with_reprs = tuple(
- (a.name, (repr if a.repr is True else a.repr), a.init)
- for a in attrs
- if a.repr is not False
- )
- globs = {
- name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr
- }
- globs["_compat"] = _compat
- globs["AttributeError"] = AttributeError
- globs["NOTHING"] = NOTHING
- attribute_fragments = []
- for name, r, i in attr_names_with_reprs:
- accessor = (
- "self." + name if i else 'getattr(self, "' + name + '", NOTHING)'
- )
- fragment = (
- "%s={%s!r}" % (name, accessor)
- if r == repr
- else "%s={%s_repr(%s)}" % (name, name, accessor)
- )
- attribute_fragments.append(fragment)
- repr_fragment = ", ".join(attribute_fragments)
-
- if ns is None:
- cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}'
- else:
- cls_name_fragment = ns + ".{self.__class__.__name__}"
-
- lines = [
- "def __repr__(self):",
- " try:",
- " already_repring = _compat.repr_context.already_repring",
- " except AttributeError:",
- " already_repring = {id(self),}",
- " _compat.repr_context.already_repring = already_repring",
- " else:",
- " if id(self) in already_repring:",
- " return '...'",
- " else:",
- " already_repring.add(id(self))",
- " try:",
- f" return f'{cls_name_fragment}({repr_fragment})'",
- " finally:",
- " already_repring.remove(id(self))",
- ]
-
- return _make_method(
- "__repr__", "\n".join(lines), unique_filename, globs=globs
- )
-
-
-def _add_repr(cls, ns=None, attrs=None):
- """
- Add a repr method to *cls*.
- """
- if attrs is None:
- attrs = cls.__attrs_attrs__
-
- cls.__repr__ = _make_repr(attrs, ns, cls)
- return cls
-
-
-def fields(cls):
- """
- Return the tuple of *attrs* attributes for a class.
-
- The tuple also allows accessing the fields by their names (see below for
- examples).
-
- :param type cls: Class to introspect.
-
- :raise TypeError: If *cls* is not a class.
- :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
- class.
-
- :rtype: tuple (with name accessors) of `attrs.Attribute`
-
- .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
- by name.
- .. versionchanged:: 23.1.0 Add support for generic classes.
- """
- generic_base = get_generic_base(cls)
-
- if generic_base is None and not isinstance(cls, type):
- raise TypeError("Passed object must be a class.")
-
- attrs = getattr(cls, "__attrs_attrs__", None)
-
- if attrs is None:
- if generic_base is not None:
- attrs = getattr(generic_base, "__attrs_attrs__", None)
- if attrs is not None:
- # Even though this is global state, stick it on here to speed
- # it up. We rely on `cls` being cached for this to be
- # efficient.
- cls.__attrs_attrs__ = attrs
- return attrs
- raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.")
-
- return attrs
-
-
-def fields_dict(cls):
- """
- Return an ordered dictionary of *attrs* attributes for a class, whose
- keys are the attribute names.
-
- :param type cls: Class to introspect.
-
- :raise TypeError: If *cls* is not a class.
- :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
- class.
-
- :rtype: dict
-
- .. versionadded:: 18.1.0
- """
- if not isinstance(cls, type):
- raise TypeError("Passed object must be a class.")
- attrs = getattr(cls, "__attrs_attrs__", None)
- if attrs is None:
- raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.")
- return {a.name: a for a in attrs}
-
-
-def validate(inst):
- """
- Validate all attributes on *inst* that have a validator.
-
- Leaves all exceptions through.
-
- :param inst: Instance of a class with *attrs* attributes.
- """
- if _config._run_validators is False:
- return
-
- for a in fields(inst.__class__):
- v = a.validator
- if v is not None:
- v(inst, a, getattr(inst, a.name))
-
-
-def _is_slot_cls(cls):
- return "__slots__" in cls.__dict__
-
-
-def _is_slot_attr(a_name, base_attr_map):
- """
- Check if the attribute name comes from a slot class.
- """
- return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
-
-
-def _make_init(
- cls,
- attrs,
- pre_init,
- post_init,
- frozen,
- slots,
- cache_hash,
- base_attr_map,
- is_exc,
- cls_on_setattr,
- attrs_init,
-):
- has_cls_on_setattr = (
- cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP
- )
-
- if frozen and has_cls_on_setattr:
- raise ValueError("Frozen classes can't use on_setattr.")
-
- needs_cached_setattr = cache_hash or frozen
- filtered_attrs = []
- attr_dict = {}
- for a in attrs:
- if not a.init and a.default is NOTHING:
- continue
-
- filtered_attrs.append(a)
- attr_dict[a.name] = a
-
- if a.on_setattr is not None:
- if frozen is True:
- raise ValueError("Frozen classes can't use on_setattr.")
-
- needs_cached_setattr = True
- elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP:
- needs_cached_setattr = True
-
- unique_filename = _generate_unique_filename(cls, "init")
-
- script, globs, annotations = _attrs_to_init_script(
- filtered_attrs,
- frozen,
- slots,
- pre_init,
- post_init,
- cache_hash,
- base_attr_map,
- is_exc,
- needs_cached_setattr,
- has_cls_on_setattr,
- attrs_init,
- )
- if cls.__module__ in sys.modules:
- # This makes typing.get_type_hints(CLS.__init__) resolve string types.
- globs.update(sys.modules[cls.__module__].__dict__)
-
- globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
-
- if needs_cached_setattr:
- # Save the lookup overhead in __init__ if we need to circumvent
- # setattr hooks.
- globs["_cached_setattr_get"] = _obj_setattr.__get__
-
- init = _make_method(
- "__attrs_init__" if attrs_init else "__init__",
- script,
- unique_filename,
- globs,
- )
- init.__annotations__ = annotations
-
- return init
-
-
-def _setattr(attr_name, value_var, has_on_setattr):
- """
- Use the cached object.setattr to set *attr_name* to *value_var*.
- """
- return f"_setattr('{attr_name}', {value_var})"
-
-
-def _setattr_with_converter(attr_name, value_var, has_on_setattr):
- """
- Use the cached object.setattr to set *attr_name* to *value_var*, but run
- its converter first.
- """
- return "_setattr('%s', %s(%s))" % (
- attr_name,
- _init_converter_pat % (attr_name,),
- value_var,
- )
-
-
-def _assign(attr_name, value, has_on_setattr):
- """
- Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
- relegate to _setattr.
- """
- if has_on_setattr:
- return _setattr(attr_name, value, True)
-
- return f"self.{attr_name} = {value}"
-
-
-def _assign_with_converter(attr_name, value_var, has_on_setattr):
- """
- Unless *attr_name* has an on_setattr hook, use normal assignment after
- conversion. Otherwise relegate to _setattr_with_converter.
- """
- if has_on_setattr:
- return _setattr_with_converter(attr_name, value_var, True)
-
- return "self.%s = %s(%s)" % (
- attr_name,
- _init_converter_pat % (attr_name,),
- value_var,
- )
-
-
-def _attrs_to_init_script(
- attrs,
- frozen,
- slots,
- pre_init,
- post_init,
- cache_hash,
- base_attr_map,
- is_exc,
- needs_cached_setattr,
- has_cls_on_setattr,
- attrs_init,
-):
- """
- Return a script of an initializer for *attrs* and a dict of globals.
-
- The globals are expected by the generated script.
-
- If *frozen* is True, we cannot set the attributes directly so we use
- a cached ``object.__setattr__``.
- """
- lines = []
- if pre_init:
- lines.append("self.__attrs_pre_init__()")
-
- if needs_cached_setattr:
- lines.append(
- # Circumvent the __setattr__ descriptor to save one lookup per
- # assignment.
- # Note _setattr will be used again below if cache_hash is True
- "_setattr = _cached_setattr_get(self)"
- )
-
- if frozen is True:
- if slots is True:
- fmt_setter = _setattr
- fmt_setter_with_converter = _setattr_with_converter
- else:
- # Dict frozen classes assign directly to __dict__.
- # But only if the attribute doesn't come from an ancestor slot
- # class.
- # Note _inst_dict will be used again below if cache_hash is True
- lines.append("_inst_dict = self.__dict__")
-
- def fmt_setter(attr_name, value_var, has_on_setattr):
- if _is_slot_attr(attr_name, base_attr_map):
- return _setattr(attr_name, value_var, has_on_setattr)
-
- return f"_inst_dict['{attr_name}'] = {value_var}"
-
- def fmt_setter_with_converter(
- attr_name, value_var, has_on_setattr
- ):
- if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
- return _setattr_with_converter(
- attr_name, value_var, has_on_setattr
- )
-
- return "_inst_dict['%s'] = %s(%s)" % (
- attr_name,
- _init_converter_pat % (attr_name,),
- value_var,
- )
-
- else:
- # Not frozen.
- fmt_setter = _assign
- fmt_setter_with_converter = _assign_with_converter
-
- args = []
- kw_only_args = []
- attrs_to_validate = []
-
- # This is a dictionary of names to validator and converter callables.
- # Injecting this into __init__ globals lets us avoid lookups.
- names_for_globals = {}
- annotations = {"return": None}
-
- for a in attrs:
- if a.validator:
- attrs_to_validate.append(a)
-
- attr_name = a.name
- has_on_setattr = a.on_setattr is not None or (
- a.on_setattr is not setters.NO_OP and has_cls_on_setattr
- )
- # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not
- # explicitly provided
- arg_name = a.alias
-
- has_factory = isinstance(a.default, Factory)
- if has_factory and a.default.takes_self:
- maybe_self = "self"
- else:
- maybe_self = ""
-
- if a.init is False:
- if has_factory:
- init_factory_name = _init_factory_pat % (a.name,)
- if a.converter is not None:
- lines.append(
- fmt_setter_with_converter(
- attr_name,
- init_factory_name + f"({maybe_self})",
- has_on_setattr,
- )
- )
- conv_name = _init_converter_pat % (a.name,)
- names_for_globals[conv_name] = a.converter
- else:
- lines.append(
- fmt_setter(
- attr_name,
- init_factory_name + f"({maybe_self})",
- has_on_setattr,
- )
- )
- names_for_globals[init_factory_name] = a.default.factory
- else:
- if a.converter is not None:
- lines.append(
- fmt_setter_with_converter(
- attr_name,
- f"attr_dict['{attr_name}'].default",
- has_on_setattr,
- )
- )
- conv_name = _init_converter_pat % (a.name,)
- names_for_globals[conv_name] = a.converter
- else:
- lines.append(
- fmt_setter(
- attr_name,
- f"attr_dict['{attr_name}'].default",
- has_on_setattr,
- )
- )
- elif a.default is not NOTHING and not has_factory:
- arg = f"{arg_name}=attr_dict['{attr_name}'].default"
- if a.kw_only:
- kw_only_args.append(arg)
- else:
- args.append(arg)
-
- if a.converter is not None:
- lines.append(
- fmt_setter_with_converter(
- attr_name, arg_name, has_on_setattr
- )
- )
- names_for_globals[
- _init_converter_pat % (a.name,)
- ] = a.converter
- else:
- lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
-
- elif has_factory:
- arg = f"{arg_name}=NOTHING"
- if a.kw_only:
- kw_only_args.append(arg)
- else:
- args.append(arg)
- lines.append(f"if {arg_name} is not NOTHING:")
-
- init_factory_name = _init_factory_pat % (a.name,)
- if a.converter is not None:
- lines.append(
- " "
- + fmt_setter_with_converter(
- attr_name, arg_name, has_on_setattr
- )
- )
- lines.append("else:")
- lines.append(
- " "
- + fmt_setter_with_converter(
- attr_name,
- init_factory_name + "(" + maybe_self + ")",
- has_on_setattr,
- )
- )
- names_for_globals[
- _init_converter_pat % (a.name,)
- ] = a.converter
- else:
- lines.append(
- " " + fmt_setter(attr_name, arg_name, has_on_setattr)
- )
- lines.append("else:")
- lines.append(
- " "
- + fmt_setter(
- attr_name,
- init_factory_name + "(" + maybe_self + ")",
- has_on_setattr,
- )
- )
- names_for_globals[init_factory_name] = a.default.factory
- else:
- if a.kw_only:
- kw_only_args.append(arg_name)
- else:
- args.append(arg_name)
-
- if a.converter is not None:
- lines.append(
- fmt_setter_with_converter(
- attr_name, arg_name, has_on_setattr
- )
- )
- names_for_globals[
- _init_converter_pat % (a.name,)
- ] = a.converter
- else:
- lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
-
- if a.init is True:
- if a.type is not None and a.converter is None:
- annotations[arg_name] = a.type
- elif a.converter is not None:
- # Try to get the type from the converter.
- t = _AnnotationExtractor(a.converter).get_first_param_type()
- if t:
- annotations[arg_name] = t
-
- if attrs_to_validate: # we can skip this if there are no validators.
- names_for_globals["_config"] = _config
- lines.append("if _config._run_validators is True:")
- for a in attrs_to_validate:
- val_name = "__attr_validator_" + a.name
- attr_name = "__attr_" + a.name
- lines.append(f" {val_name}(self, {attr_name}, self.{a.name})")
- names_for_globals[val_name] = a.validator
- names_for_globals[attr_name] = a
-
- if post_init:
- lines.append("self.__attrs_post_init__()")
-
- # because this is set only after __attrs_post_init__ is called, a crash
- # will result if post-init tries to access the hash code. This seemed
- # preferable to setting this beforehand, in which case alteration to
- # field values during post-init combined with post-init accessing the
- # hash code would result in silent bugs.
- if cache_hash:
- if frozen:
- if slots:
- # if frozen and slots, then _setattr defined above
- init_hash_cache = "_setattr('%s', %s)"
- else:
- # if frozen and not slots, then _inst_dict defined above
- init_hash_cache = "_inst_dict['%s'] = %s"
- else:
- init_hash_cache = "self.%s = %s"
- lines.append(init_hash_cache % (_hash_cache_field, "None"))
-
- # For exceptions we rely on BaseException.__init__ for proper
- # initialization.
- if is_exc:
- vals = ",".join(f"self.{a.name}" for a in attrs if a.init)
-
- lines.append(f"BaseException.__init__(self, {vals})")
-
- args = ", ".join(args)
- if kw_only_args:
- args += "%s*, %s" % (
- ", " if args else "", # leading comma
- ", ".join(kw_only_args), # kw_only args
- )
-
- return (
- "def %s(self, %s):\n %s\n"
- % (
- ("__attrs_init__" if attrs_init else "__init__"),
- args,
- "\n ".join(lines) if lines else "pass",
- ),
- names_for_globals,
- annotations,
- )
-
-
-def _default_init_alias_for(name: str) -> str:
- """
- The default __init__ parameter name for a field.
-
- This performs private-name adjustment via leading-unscore stripping,
- and is the default value of Attribute.alias if not provided.
- """
-
- return name.lstrip("_")
-
-
-class Attribute:
- """
- *Read-only* representation of an attribute.
-
- .. warning::
-
- You should never instantiate this class yourself.
-
- The class has *all* arguments of `attr.ib` (except for ``factory``
- which is only syntactic sugar for ``default=Factory(...)`` plus the
- following:
-
- - ``name`` (`str`): The name of the attribute.
- - ``alias`` (`str`): The __init__ parameter name of the attribute, after
- any explicit overrides and default private-attribute-name handling.
- - ``inherited`` (`bool`): Whether or not that attribute has been inherited
- from a base class.
- - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables
- that are used for comparing and ordering objects by this attribute,
- respectively. These are set by passing a callable to `attr.ib`'s ``eq``,
- ``order``, or ``cmp`` arguments. See also :ref:`comparison customization
- `.
-
- Instances of this class are frequently used for introspection purposes
- like:
-
- - `fields` returns a tuple of them.
- - Validators get them passed as the first argument.
- - The :ref:`field transformer ` hook receives a list of
- them.
- - The ``alias`` property exposes the __init__ parameter name of the field,
- with any overrides and default private-attribute handling applied.
-
-
- .. versionadded:: 20.1.0 *inherited*
- .. versionadded:: 20.1.0 *on_setattr*
- .. versionchanged:: 20.2.0 *inherited* is not taken into account for
- equality checks and hashing anymore.
- .. versionadded:: 21.1.0 *eq_key* and *order_key*
- .. versionadded:: 22.2.0 *alias*
-
- For the full version history of the fields, see `attr.ib`.
- """
-
- __slots__ = (
- "name",
- "default",
- "validator",
- "repr",
- "eq",
- "eq_key",
- "order",
- "order_key",
- "hash",
- "init",
- "metadata",
- "type",
- "converter",
- "kw_only",
- "inherited",
- "on_setattr",
- "alias",
- )
-
- def __init__(
- self,
- name,
- default,
- validator,
- repr,
- cmp, # XXX: unused, remove along with other cmp code.
- hash,
- init,
- inherited,
- metadata=None,
- type=None,
- converter=None,
- kw_only=False,
- eq=None,
- eq_key=None,
- order=None,
- order_key=None,
- on_setattr=None,
- alias=None,
- ):
- eq, eq_key, order, order_key = _determine_attrib_eq_order(
- cmp, eq_key or eq, order_key or order, True
- )
-
- # Cache this descriptor here to speed things up later.
- bound_setattr = _obj_setattr.__get__(self)
-
- # Despite the big red warning, people *do* instantiate `Attribute`
- # themselves.
- bound_setattr("name", name)
- bound_setattr("default", default)
- bound_setattr("validator", validator)
- bound_setattr("repr", repr)
- bound_setattr("eq", eq)
- bound_setattr("eq_key", eq_key)
- bound_setattr("order", order)
- bound_setattr("order_key", order_key)
- bound_setattr("hash", hash)
- bound_setattr("init", init)
- bound_setattr("converter", converter)
- bound_setattr(
- "metadata",
- (
- types.MappingProxyType(dict(metadata)) # Shallow copy
- if metadata
- else _empty_metadata_singleton
- ),
- )
- bound_setattr("type", type)
- bound_setattr("kw_only", kw_only)
- bound_setattr("inherited", inherited)
- bound_setattr("on_setattr", on_setattr)
- bound_setattr("alias", alias)
-
- def __setattr__(self, name, value):
- raise FrozenInstanceError()
-
- @classmethod
- def from_counting_attr(cls, name, ca, type=None):
- # type holds the annotated value. deal with conflicts:
- if type is None:
- type = ca.type
- elif ca.type is not None:
- raise ValueError(
- "Type annotation and type argument cannot both be present"
- )
- inst_dict = {
- k: getattr(ca, k)
- for k in Attribute.__slots__
- if k
- not in (
- "name",
- "validator",
- "default",
- "type",
- "inherited",
- ) # exclude methods and deprecated alias
- }
- return cls(
- name=name,
- validator=ca._validator,
- default=ca._default,
- type=type,
- cmp=None,
- inherited=False,
- **inst_dict,
- )
-
- # Don't use attrs.evolve since fields(Attribute) doesn't work
- def evolve(self, **changes):
- """
- Copy *self* and apply *changes*.
-
- This works similarly to `attrs.evolve` but that function does not work
- with `Attribute`.
-
- It is mainly meant to be used for `transform-fields`.
-
- .. versionadded:: 20.3.0
- """
- new = copy.copy(self)
-
- new._setattrs(changes.items())
-
- return new
-
- # Don't use _add_pickle since fields(Attribute) doesn't work
- def __getstate__(self):
- """
- Play nice with pickle.
- """
- return tuple(
- getattr(self, name) if name != "metadata" else dict(self.metadata)
- for name in self.__slots__
- )
-
- def __setstate__(self, state):
- """
- Play nice with pickle.
- """
- self._setattrs(zip(self.__slots__, state))
-
- def _setattrs(self, name_values_pairs):
- bound_setattr = _obj_setattr.__get__(self)
- for name, value in name_values_pairs:
- if name != "metadata":
- bound_setattr(name, value)
- else:
- bound_setattr(
- name,
- types.MappingProxyType(dict(value))
- if value
- else _empty_metadata_singleton,
- )
-
-
-_a = [
- Attribute(
- name=name,
- default=NOTHING,
- validator=None,
- repr=True,
- cmp=None,
- eq=True,
- order=False,
- hash=(name != "metadata"),
- init=True,
- inherited=False,
- alias=_default_init_alias_for(name),
- )
- for name in Attribute.__slots__
-]
-
-Attribute = _add_hash(
- _add_eq(
- _add_repr(Attribute, attrs=_a),
- attrs=[a for a in _a if a.name != "inherited"],
- ),
- attrs=[a for a in _a if a.hash and a.name != "inherited"],
-)
-
-
-class _CountingAttr:
- """
- Intermediate representation of attributes that uses a counter to preserve
- the order in which the attributes have been defined.
-
- *Internal* data structure of the attrs library. Running into is most
- likely the result of a bug like a forgotten `@attr.s` decorator.
- """
-
- __slots__ = (
- "counter",
- "_default",
- "repr",
- "eq",
- "eq_key",
- "order",
- "order_key",
- "hash",
- "init",
- "metadata",
- "_validator",
- "converter",
- "type",
- "kw_only",
- "on_setattr",
- "alias",
- )
- __attrs_attrs__ = tuple(
- Attribute(
- name=name,
- alias=_default_init_alias_for(name),
- default=NOTHING,
- validator=None,
- repr=True,
- cmp=None,
- hash=True,
- init=True,
- kw_only=False,
- eq=True,
- eq_key=None,
- order=False,
- order_key=None,
- inherited=False,
- on_setattr=None,
- )
- for name in (
- "counter",
- "_default",
- "repr",
- "eq",
- "order",
- "hash",
- "init",
- "on_setattr",
- "alias",
- )
- ) + (
- Attribute(
- name="metadata",
- alias="metadata",
- default=None,
- validator=None,
- repr=True,
- cmp=None,
- hash=False,
- init=True,
- kw_only=False,
- eq=True,
- eq_key=None,
- order=False,
- order_key=None,
- inherited=False,
- on_setattr=None,
- ),
- )
- cls_counter = 0
-
- def __init__(
- self,
- default,
- validator,
- repr,
- cmp,
- hash,
- init,
- converter,
- metadata,
- type,
- kw_only,
- eq,
- eq_key,
- order,
- order_key,
- on_setattr,
- alias,
- ):
- _CountingAttr.cls_counter += 1
- self.counter = _CountingAttr.cls_counter
- self._default = default
- self._validator = validator
- self.converter = converter
- self.repr = repr
- self.eq = eq
- self.eq_key = eq_key
- self.order = order
- self.order_key = order_key
- self.hash = hash
- self.init = init
- self.metadata = metadata
- self.type = type
- self.kw_only = kw_only
- self.on_setattr = on_setattr
- self.alias = alias
-
- def validator(self, meth):
- """
- Decorator that adds *meth* to the list of validators.
-
- Returns *meth* unchanged.
-
- .. versionadded:: 17.1.0
- """
- if self._validator is None:
- self._validator = meth
- else:
- self._validator = and_(self._validator, meth)
- return meth
-
- def default(self, meth):
- """
- Decorator that allows to set the default for an attribute.
-
- Returns *meth* unchanged.
-
- :raises DefaultAlreadySetError: If default has been set before.
-
- .. versionadded:: 17.1.0
- """
- if self._default is not NOTHING:
- raise DefaultAlreadySetError()
-
- self._default = Factory(meth, takes_self=True)
-
- return meth
-
-
-_CountingAttr = _add_eq(_add_repr(_CountingAttr))
-
-
-class Factory:
- """
- Stores a factory callable.
-
- If passed as the default value to `attrs.field`, the factory is used to
- generate a new value.
-
- :param callable factory: A callable that takes either none or exactly one
- mandatory positional argument depending on *takes_self*.
- :param bool takes_self: Pass the partially initialized instance that is
- being initialized as a positional argument.
-
- .. versionadded:: 17.1.0 *takes_self*
- """
-
- __slots__ = ("factory", "takes_self")
-
- def __init__(self, factory, takes_self=False):
- self.factory = factory
- self.takes_self = takes_self
-
- def __getstate__(self):
- """
- Play nice with pickle.
- """
- return tuple(getattr(self, name) for name in self.__slots__)
-
- def __setstate__(self, state):
- """
- Play nice with pickle.
- """
- for name, value in zip(self.__slots__, state):
- setattr(self, name, value)
-
-
-_f = [
- Attribute(
- name=name,
- default=NOTHING,
- validator=None,
- repr=True,
- cmp=None,
- eq=True,
- order=False,
- hash=True,
- init=True,
- inherited=False,
- )
- for name in Factory.__slots__
-]
-
-Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
-
-
-def make_class(name, attrs, bases=(object,), **attributes_arguments):
- r"""
- A quick way to create a new class called *name* with *attrs*.
-
- :param str name: The name for the new class.
-
- :param attrs: A list of names or a dictionary of mappings of names to
- `attr.ib`\ s / `attrs.field`\ s.
-
- The order is deduced from the order of the names or attributes inside
- *attrs*. Otherwise the order of the definition of the attributes is
- used.
- :type attrs: `list` or `dict`
-
- :param tuple bases: Classes that the new class will subclass.
-
- :param attributes_arguments: Passed unmodified to `attr.s`.
-
- :return: A new class with *attrs*.
- :rtype: type
-
- .. versionadded:: 17.1.0 *bases*
- .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
- """
- if isinstance(attrs, dict):
- cls_dict = attrs
- elif isinstance(attrs, (list, tuple)):
- cls_dict = {a: attrib() for a in attrs}
- else:
- raise TypeError("attrs argument must be a dict or a list.")
-
- pre_init = cls_dict.pop("__attrs_pre_init__", None)
- post_init = cls_dict.pop("__attrs_post_init__", None)
- user_init = cls_dict.pop("__init__", None)
-
- body = {}
- if pre_init is not None:
- body["__attrs_pre_init__"] = pre_init
- if post_init is not None:
- body["__attrs_post_init__"] = post_init
- if user_init is not None:
- body["__init__"] = user_init
-
- type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body))
-
- # For pickling to work, the __module__ variable needs to be set to the
- # frame where the class is created. Bypass this step in environments where
- # sys._getframe is not defined (Jython for example) or sys._getframe is not
- # defined for arguments greater than 0 (IronPython).
- try:
- type_.__module__ = sys._getframe(1).f_globals.get(
- "__name__", "__main__"
- )
- except (AttributeError, ValueError):
- pass
-
- # We do it here for proper warnings with meaningful stacklevel.
- cmp = attributes_arguments.pop("cmp", None)
- (
- attributes_arguments["eq"],
- attributes_arguments["order"],
- ) = _determine_attrs_eq_order(
- cmp,
- attributes_arguments.get("eq"),
- attributes_arguments.get("order"),
- True,
- )
-
- return _attrs(these=cls_dict, **attributes_arguments)(type_)
-
-
-# These are required by within this module so we define them here and merely
-# import into .validators / .converters.
-
-
-@attrs(slots=True, hash=True)
-class _AndValidator:
- """
- Compose many validators to a single one.
- """
-
- _validators = attrib()
-
- def __call__(self, inst, attr, value):
- for v in self._validators:
- v(inst, attr, value)
-
-
-def and_(*validators):
- """
- A validator that composes multiple validators into one.
-
- When called on a value, it runs all wrapped validators.
-
- :param callables validators: Arbitrary number of validators.
-
- .. versionadded:: 17.1.0
- """
- vals = []
- for validator in validators:
- vals.extend(
- validator._validators
- if isinstance(validator, _AndValidator)
- else [validator]
- )
-
- return _AndValidator(tuple(vals))
-
-
-def pipe(*converters):
- """
- A converter that composes multiple converters into one.
-
- When called on a value, it runs all wrapped converters, returning the
- *last* value.
-
- Type annotations will be inferred from the wrapped converters', if
- they have any.
-
- :param callables converters: Arbitrary number of converters.
-
- .. versionadded:: 20.1.0
- """
-
- def pipe_converter(val):
- for converter in converters:
- val = converter(val)
-
- return val
-
- if not converters:
- # If the converter list is empty, pipe_converter is the identity.
- A = typing.TypeVar("A")
- pipe_converter.__annotations__ = {"val": A, "return": A}
- else:
- # Get parameter type from first converter.
- t = _AnnotationExtractor(converters[0]).get_first_param_type()
- if t:
- pipe_converter.__annotations__["val"] = t
-
- # Get return type from last converter.
- rt = _AnnotationExtractor(converters[-1]).get_return_type()
- if rt:
- pipe_converter.__annotations__["return"] = rt
-
- return pipe_converter
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/subset/cff.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/subset/cff.py
deleted file mode 100644
index dd79f6db37a482891b6f151159ef4c9b89475b8e..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/subset/cff.py
+++ /dev/null
@@ -1,536 +0,0 @@
-from fontTools.misc import psCharStrings
-from fontTools import ttLib
-from fontTools.pens.basePen import NullPen
-from fontTools.misc.roundTools import otRound
-from fontTools.misc.loggingTools import deprecateFunction
-from fontTools.subset.util import _add_method, _uniq_sort
-
-
-class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
- def __init__(self, components, localSubrs, globalSubrs):
- psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
- self.components = components
-
- def op_endchar(self, index):
- args = self.popall()
- if len(args) >= 4:
- from fontTools.encodings.StandardEncoding import StandardEncoding
-
- # endchar can do seac accent bulding; The T2 spec says it's deprecated,
- # but recent software that shall remain nameless does output it.
- adx, ady, bchar, achar = args[-4:]
- baseGlyph = StandardEncoding[bchar]
- accentGlyph = StandardEncoding[achar]
- self.components.add(baseGlyph)
- self.components.add(accentGlyph)
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def closure_glyphs(self, s):
- cff = self.cff
- assert len(cff) == 1
- font = cff[cff.keys()[0]]
- glyphSet = font.CharStrings
-
- decompose = s.glyphs
- while decompose:
- components = set()
- for g in decompose:
- if g not in glyphSet:
- continue
- gl = glyphSet[g]
-
- subrs = getattr(gl.private, "Subrs", [])
- decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
- decompiler.execute(gl)
- components -= s.glyphs
- s.glyphs.update(components)
- decompose = components
-
-
-def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
- c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
- if isCFF2 or ignoreWidth:
- # CFF2 charstrings have no widths nor 'endchar' operators
- c.setProgram([] if isCFF2 else ["endchar"])
- else:
- if hasattr(font, "FDArray") and font.FDArray is not None:
- private = font.FDArray[fdSelectIndex].Private
- else:
- private = font.Private
- dfltWdX = private.defaultWidthX
- nmnlWdX = private.nominalWidthX
- pen = NullPen()
- c.draw(pen) # this will set the charstring's width
- if c.width != dfltWdX:
- c.program = [c.width - nmnlWdX, "endchar"]
- else:
- c.program = ["endchar"]
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def prune_pre_subset(self, font, options):
- cff = self.cff
- # CFF table must have one font only
- cff.fontNames = cff.fontNames[:1]
-
- if options.notdef_glyph and not options.notdef_outline:
- isCFF2 = cff.major > 1
- for fontname in cff.keys():
- font = cff[fontname]
- _empty_charstring(font, ".notdef", isCFF2=isCFF2)
-
- # Clear useless Encoding
- for fontname in cff.keys():
- font = cff[fontname]
- # https://github.com/fonttools/fonttools/issues/620
- font.Encoding = "StandardEncoding"
-
- return True # bool(cff.fontNames)
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def subset_glyphs(self, s):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
-
- glyphs = s.glyphs.union(s.glyphs_emptied)
-
- # Load all glyphs
- for g in font.charset:
- if g not in glyphs:
- continue
- c, _ = cs.getItemAndSelector(g)
-
- if cs.charStringsAreIndexed:
- indices = [i for i, g in enumerate(font.charset) if g in glyphs]
- csi = cs.charStringsIndex
- csi.items = [csi.items[i] for i in indices]
- del csi.file, csi.offsets
- if hasattr(font, "FDSelect"):
- sel = font.FDSelect
- sel.format = None
- sel.gidArray = [sel.gidArray[i] for i in indices]
- newCharStrings = {}
- for indicesIdx, charsetIdx in enumerate(indices):
- g = font.charset[charsetIdx]
- if g in cs.charStrings:
- newCharStrings[g] = indicesIdx
- cs.charStrings = newCharStrings
- else:
- cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
- font.charset = [g for g in font.charset if g in glyphs]
- font.numGlyphs = len(font.charset)
-
- if s.options.retain_gids:
- isCFF2 = cff.major > 1
- for g in s.glyphs_emptied:
- _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
-
- return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
-
-
-@_add_method(psCharStrings.T2CharString)
-def subset_subroutines(self, subrs, gsubrs):
- p = self.program
- for i in range(1, len(p)):
- if p[i] == "callsubr":
- assert isinstance(p[i - 1], int)
- p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
- elif p[i] == "callgsubr":
- assert isinstance(p[i - 1], int)
- p[i - 1] = (
- gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
- )
-
-
-@_add_method(psCharStrings.T2CharString)
-def drop_hints(self):
- hints = self._hints
-
- if hints.deletions:
- p = self.program
- for idx in reversed(hints.deletions):
- del p[idx - 2 : idx]
-
- if hints.has_hint:
- assert not hints.deletions or hints.last_hint <= hints.deletions[0]
- self.program = self.program[hints.last_hint :]
- if not self.program:
- # TODO CFF2 no need for endchar.
- self.program.append("endchar")
- if hasattr(self, "width"):
- # Insert width back if needed
- if self.width != self.private.defaultWidthX:
- # For CFF2 charstrings, this should never happen
- assert (
- self.private.defaultWidthX is not None
- ), "CFF2 CharStrings must not have an initial width value"
- self.program.insert(0, self.width - self.private.nominalWidthX)
-
- if hints.has_hintmask:
- i = 0
- p = self.program
- while i < len(p):
- if p[i] in ["hintmask", "cntrmask"]:
- assert i + 1 <= len(p)
- del p[i : i + 2]
- continue
- i += 1
-
- assert len(self.program)
-
- del self._hints
-
-
-class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
- def __init__(self, localSubrs, globalSubrs, private):
- psCharStrings.SimpleT2Decompiler.__init__(
- self, localSubrs, globalSubrs, private
- )
- for subrs in [localSubrs, globalSubrs]:
- if subrs and not hasattr(subrs, "_used"):
- subrs._used = set()
-
- def op_callsubr(self, index):
- self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
- psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
-
- def op_callgsubr(self, index):
- self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
- psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
-
-
-class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
- class Hints(object):
- def __init__(self):
- # Whether calling this charstring produces any hint stems
- # Note that if a charstring starts with hintmask, it will
- # have has_hint set to True, because it *might* produce an
- # implicit vstem if called under certain conditions.
- self.has_hint = False
- # Index to start at to drop all hints
- self.last_hint = 0
- # Index up to which we know more hints are possible.
- # Only relevant if status is 0 or 1.
- self.last_checked = 0
- # The status means:
- # 0: after dropping hints, this charstring is empty
- # 1: after dropping hints, there may be more hints
- # continuing after this, or there might be
- # other things. Not clear yet.
- # 2: no more hints possible after this charstring
- self.status = 0
- # Has hintmask instructions; not recursive
- self.has_hintmask = False
- # List of indices of calls to empty subroutines to remove.
- self.deletions = []
-
- pass
-
- def __init__(
- self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
- ):
- self._css = css
- psCharStrings.T2WidthExtractor.__init__(
- self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
- )
- self.private = private
-
- def execute(self, charString):
- old_hints = charString._hints if hasattr(charString, "_hints") else None
- charString._hints = self.Hints()
-
- psCharStrings.T2WidthExtractor.execute(self, charString)
-
- hints = charString._hints
-
- if hints.has_hint or hints.has_hintmask:
- self._css.add(charString)
-
- if hints.status != 2:
- # Check from last_check, make sure we didn't have any operators.
- for i in range(hints.last_checked, len(charString.program) - 1):
- if isinstance(charString.program[i], str):
- hints.status = 2
- break
- else:
- hints.status = 1 # There's *something* here
- hints.last_checked = len(charString.program)
-
- if old_hints:
- assert hints.__dict__ == old_hints.__dict__
-
- def op_callsubr(self, index):
- subr = self.localSubrs[self.operandStack[-1] + self.localBias]
- psCharStrings.T2WidthExtractor.op_callsubr(self, index)
- self.processSubr(index, subr)
-
- def op_callgsubr(self, index):
- subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
- psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
- self.processSubr(index, subr)
-
- def op_hstem(self, index):
- psCharStrings.T2WidthExtractor.op_hstem(self, index)
- self.processHint(index)
-
- def op_vstem(self, index):
- psCharStrings.T2WidthExtractor.op_vstem(self, index)
- self.processHint(index)
-
- def op_hstemhm(self, index):
- psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
- self.processHint(index)
-
- def op_vstemhm(self, index):
- psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
- self.processHint(index)
-
- def op_hintmask(self, index):
- rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
- self.processHintmask(index)
- return rv
-
- def op_cntrmask(self, index):
- rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
- self.processHintmask(index)
- return rv
-
- def processHintmask(self, index):
- cs = self.callingStack[-1]
- hints = cs._hints
- hints.has_hintmask = True
- if hints.status != 2:
- # Check from last_check, see if we may be an implicit vstem
- for i in range(hints.last_checked, index - 1):
- if isinstance(cs.program[i], str):
- hints.status = 2
- break
- else:
- # We are an implicit vstem
- hints.has_hint = True
- hints.last_hint = index + 1
- hints.status = 0
- hints.last_checked = index + 1
-
- def processHint(self, index):
- cs = self.callingStack[-1]
- hints = cs._hints
- hints.has_hint = True
- hints.last_hint = index
- hints.last_checked = index
-
- def processSubr(self, index, subr):
- cs = self.callingStack[-1]
- hints = cs._hints
- subr_hints = subr._hints
-
- # Check from last_check, make sure we didn't have
- # any operators.
- if hints.status != 2:
- for i in range(hints.last_checked, index - 1):
- if isinstance(cs.program[i], str):
- hints.status = 2
- break
- hints.last_checked = index
-
- if hints.status != 2:
- if subr_hints.has_hint:
- hints.has_hint = True
-
- # Decide where to chop off from
- if subr_hints.status == 0:
- hints.last_hint = index
- else:
- hints.last_hint = index - 2 # Leave the subr call in
-
- elif subr_hints.status == 0:
- hints.deletions.append(index)
-
- hints.status = max(hints.status, subr_hints.status)
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def prune_post_subset(self, ttfFont, options):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
-
- # Drop unused FontDictionaries
- if hasattr(font, "FDSelect"):
- sel = font.FDSelect
- indices = _uniq_sort(sel.gidArray)
- sel.gidArray = [indices.index(ss) for ss in sel.gidArray]
- arr = font.FDArray
- arr.items = [arr[i] for i in indices]
- del arr.file, arr.offsets
-
- # Desubroutinize if asked for
- if options.desubroutinize:
- cff.desubroutinize()
-
- # Drop hints if not needed
- if not options.hinting:
- self.remove_hints()
- elif not options.desubroutinize:
- self.remove_unused_subroutines()
- return True
-
-
-def _delete_empty_subrs(private_dict):
- if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
- if "Subrs" in private_dict.rawDict:
- del private_dict.rawDict["Subrs"]
- del private_dict.Subrs
-
-
-@deprecateFunction(
- "use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
-)
-@_add_method(ttLib.getTableClass("CFF "))
-def desubroutinize(self):
- self.cff.desubroutinize()
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def remove_hints(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- # This can be tricky, but doesn't have to. What we do is:
- #
- # - Run all used glyph charstrings and recurse into subroutines,
- # - For each charstring (including subroutines), if it has any
- # of the hint stem operators, we mark it as such.
- # Upon returning, for each charstring we note all the
- # subroutine calls it makes that (recursively) contain a stem,
- # - Dropping hinting then consists of the following two ops:
- # * Drop the piece of the program in each charstring before the
- # last call to a stem op or a stem-calling subroutine,
- # * Drop all hintmask operations.
- # - It's trickier... A hintmask right after hints and a few numbers
- # will act as an implicit vstemhm. As such, we track whether
- # we have seen any non-hint operators so far and do the right
- # thing, recursively... Good luck understanding that :(
- css = set()
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- c.decompile()
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _DehintingT2Decompiler(
- css,
- subrs,
- c.globalSubrs,
- c.private.nominalWidthX,
- c.private.defaultWidthX,
- c.private,
- )
- decompiler.execute(c)
- c.width = decompiler.width
- for charstring in css:
- charstring.drop_hints()
- del css
-
- # Drop font-wide hinting values
- all_privs = []
- if hasattr(font, "FDArray"):
- all_privs.extend(fd.Private for fd in font.FDArray)
- else:
- all_privs.append(font.Private)
- for priv in all_privs:
- for k in [
- "BlueValues",
- "OtherBlues",
- "FamilyBlues",
- "FamilyOtherBlues",
- "BlueScale",
- "BlueShift",
- "BlueFuzz",
- "StemSnapH",
- "StemSnapV",
- "StdHW",
- "StdVW",
- "ForceBold",
- "LanguageGroup",
- "ExpansionFactor",
- ]:
- if hasattr(priv, k):
- setattr(priv, k, None)
- self.remove_unused_subroutines()
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def remove_unused_subroutines(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- # Renumber subroutines to remove unused ones
-
- # Mark all used subroutines
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
- decompiler.execute(c)
-
- all_subrs = [font.GlobalSubrs]
- if hasattr(font, "FDArray"):
- all_subrs.extend(
- fd.Private.Subrs
- for fd in font.FDArray
- if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
- )
- elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
- all_subrs.append(font.Private.Subrs)
-
- subrs = set(subrs) # Remove duplicates
-
- # Prepare
- for subrs in all_subrs:
- if not hasattr(subrs, "_used"):
- subrs._used = set()
- subrs._used = _uniq_sort(subrs._used)
- subrs._old_bias = psCharStrings.calcSubrBias(subrs)
- subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
-
- # Renumber glyph charstrings
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- subrs = getattr(c.private, "Subrs", [])
- c.subset_subroutines(subrs, font.GlobalSubrs)
-
- # Renumber subroutines themselves
- for subrs in all_subrs:
- if subrs == font.GlobalSubrs:
- if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
- local_subrs = font.Private.Subrs
- else:
- local_subrs = []
- else:
- local_subrs = subrs
-
- subrs.items = [subrs.items[i] for i in subrs._used]
- if hasattr(subrs, "file"):
- del subrs.file
- if hasattr(subrs, "offsets"):
- del subrs.offsets
-
- for subr in subrs.items:
- subr.subset_subroutines(local_subrs, font.GlobalSubrs)
-
- # Delete local SubrsIndex if empty
- if hasattr(font, "FDArray"):
- for fd in font.FDArray:
- _delete_empty_subrs(fd.Private)
- else:
- _delete_empty_subrs(font.Private)
-
- # Cleanup
- for subrs in all_subrs:
- del subrs._used, subrs._old_bias, subrs._new_bias
diff --git a/spaces/cncn102/bingo1/src/lib/isomorphic/index.ts b/spaces/cncn102/bingo1/src/lib/isomorphic/index.ts
deleted file mode 100644
index d4ebae951004bc8ec388f82548f4204a6c2a0a50..0000000000000000000000000000000000000000
--- a/spaces/cncn102/bingo1/src/lib/isomorphic/index.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-'use client'
-
-import Debug from 'debug'
-export * from 'ifw'
-
-export const debug = typeof document === 'undefined' ? Debug('bingo')
- : process.env.NEXT_PUBLIC_DEBUG ? console.info.bind(console)
- : () => {}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_vp9.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_vp9.h
deleted file mode 100644
index af15eb4bace70d238ab6fd9640bc3e23619cc34b..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_vp9.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_CBS_VP9_H
-#define AVCODEC_CBS_VP9_H
-
-#include
-#include
-
-#include "cbs.h"
-
-
-// Miscellaneous constants (section 3).
-enum {
- VP9_REFS_PER_FRAME = 3,
-
- VP9_MIN_TILE_WIDTH_B64 = 4,
- VP9_MAX_TILE_WIDTH_B64 = 64,
-
- VP9_NUM_REF_FRAMES = 8,
- VP9_MAX_REF_FRAMES = 4,
-
- VP9_MAX_SEGMENTS = 8,
- VP9_SEG_LVL_MAX = 4,
-};
-
-// Frame types (section 7.2).
-enum {
- VP9_KEY_FRAME = 0,
- VP9_NON_KEY_FRAME = 1,
-};
-
-// Frame sync bytes (section 7.2.1).
-enum {
- VP9_FRAME_SYNC_0 = 0x49,
- VP9_FRAME_SYNC_1 = 0x83,
- VP9_FRAME_SYNC_2 = 0x42,
-};
-
-// Color space values (section 7.2.2).
-enum {
- VP9_CS_UNKNOWN = 0,
- VP9_CS_BT_601 = 1,
- VP9_CS_BT_709 = 2,
- VP9_CS_SMPTE_170 = 3,
- VP9_CS_SMPTE_240 = 4,
- VP9_CS_BT_2020 = 5,
- VP9_CS_RESERVED = 6,
- VP9_CS_RGB = 7,
-};
-
-// Reference frame types (section 7.4.12).
-enum {
- VP9_INTRA_FRAME = 0,
- VP9_LAST_FRAME = 1,
- VP9_GOLDEN_FRAME = 2,
- VP9_ALTREF_FRAME = 3,
-};
-
-// Superframe properties (section B.3).
-enum {
- VP9_MAX_FRAMES_IN_SUPERFRAME = 8,
-
- VP9_SUPERFRAME_MARKER = 6,
-};
-
-
-typedef struct VP9RawFrameHeader {
- uint8_t frame_marker;
- uint8_t profile_low_bit;
- uint8_t profile_high_bit;
-
- uint8_t show_existing_frame;
- uint8_t frame_to_show_map_idx;
-
- uint8_t frame_type;
- uint8_t show_frame;
- uint8_t error_resilient_mode;
-
- // Color config.
- uint8_t ten_or_twelve_bit;
- uint8_t color_space;
- uint8_t color_range;
- uint8_t subsampling_x;
- uint8_t subsampling_y;
-
- uint8_t refresh_frame_flags;
-
- uint8_t intra_only;
- uint8_t reset_frame_context;
-
- uint8_t ref_frame_idx[VP9_REFS_PER_FRAME];
- uint8_t ref_frame_sign_bias[VP9_MAX_REF_FRAMES];
-
- uint8_t allow_high_precision_mv;
-
- uint8_t refresh_frame_context;
- uint8_t frame_parallel_decoding_mode;
-
- uint8_t frame_context_idx;
-
- // Frame/render size.
- uint8_t found_ref[VP9_REFS_PER_FRAME];
- uint16_t frame_width_minus_1;
- uint16_t frame_height_minus_1;
- uint8_t render_and_frame_size_different;
- uint16_t render_width_minus_1;
- uint16_t render_height_minus_1;
-
- // Interpolation filter.
- uint8_t is_filter_switchable;
- uint8_t raw_interpolation_filter_type;
-
- // Loop filter params.
- uint8_t loop_filter_level;
- uint8_t loop_filter_sharpness;
- uint8_t loop_filter_delta_enabled;
- uint8_t loop_filter_delta_update;
- uint8_t update_ref_delta[VP9_MAX_REF_FRAMES];
- int8_t loop_filter_ref_deltas[VP9_MAX_REF_FRAMES];
- uint8_t update_mode_delta[2];
- int8_t loop_filter_mode_deltas[2];
-
- // Quantization params.
- uint8_t base_q_idx;
- int8_t delta_q_y_dc;
- int8_t delta_q_uv_dc;
- int8_t delta_q_uv_ac;
-
- // Segmentation params.
- uint8_t segmentation_enabled;
- uint8_t segmentation_update_map;
- uint8_t segmentation_tree_probs[7];
- uint8_t segmentation_temporal_update;
- uint8_t segmentation_pred_prob[3];
- uint8_t segmentation_update_data;
- uint8_t segmentation_abs_or_delta_update;
- uint8_t feature_enabled[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX];
- uint8_t feature_value[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX];
- uint8_t feature_sign[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX];
-
- // Tile info.
- uint8_t tile_cols_log2;
- uint8_t tile_rows_log2;
-
- uint16_t header_size_in_bytes;
-} VP9RawFrameHeader;
-
-typedef struct VP9RawFrame {
- VP9RawFrameHeader header;
-
- uint8_t *data;
- AVBufferRef *data_ref;
- size_t data_size;
-} VP9RawFrame;
-
-typedef struct VP9RawSuperframeIndex {
- uint8_t superframe_marker;
- uint8_t bytes_per_framesize_minus_1;
- uint8_t frames_in_superframe_minus_1;
- uint32_t frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME];
-} VP9RawSuperframeIndex;
-
-typedef struct VP9RawSuperframe {
- VP9RawFrame frames[VP9_MAX_FRAMES_IN_SUPERFRAME];
- VP9RawSuperframeIndex index;
-} VP9RawSuperframe;
-
-typedef struct VP9ReferenceFrameState {
- int frame_width; // RefFrameWidth
- int frame_height; // RefFrameHeight
- int subsampling_x; // RefSubsamplingX
- int subsampling_y; // RefSubsamplingY
- int bit_depth; // RefBitDepth
-} VP9ReferenceFrameState;
-
-typedef struct CodedBitstreamVP9Context {
- int profile;
-
- // Frame dimensions in 8x8 mode info blocks.
- uint16_t mi_cols;
- uint16_t mi_rows;
- // Frame dimensions in 64x64 superblocks.
- uint16_t sb64_cols;
- uint16_t sb64_rows;
-
- int frame_width;
- int frame_height;
-
- uint8_t subsampling_x;
- uint8_t subsampling_y;
- int bit_depth;
-
- VP9ReferenceFrameState ref[VP9_NUM_REF_FRAMES];
-} CodedBitstreamVP9Context;
-
-
-#endif /* AVCODEC_CBS_VP9_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegtables.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegtables.h
deleted file mode 100644
index 39baec3efbc739eca5f5fb5aad7bbed99e39bb79..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegtables.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * JPEG-related tables
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_JPEGTABLES_H
-#define AVCODEC_JPEGTABLES_H
-
-#include
-
-#include "libavutil/attributes_internal.h"
-
-FF_VISIBILITY_PUSH_HIDDEN
-extern const uint8_t ff_mjpeg_bits_dc_luminance[];
-extern const uint8_t ff_mjpeg_val_dc[];
-
-extern const uint8_t ff_mjpeg_bits_dc_chrominance[];
-
-extern const uint8_t ff_mjpeg_bits_ac_luminance[];
-extern const uint8_t ff_mjpeg_val_ac_luminance[];
-
-extern const uint8_t ff_mjpeg_bits_ac_chrominance[];
-extern const uint8_t ff_mjpeg_val_ac_chrominance[];
-FF_VISIBILITY_POP_HIDDEN
-
-#endif /* AVCODEC_JPEGTABLES_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h263dsp_msa.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h263dsp_msa.c
deleted file mode 100644
index 2e1ca0183d650274388419856658de5e7dca03a8..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/h263dsp_msa.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/mips/generic_macros_msa.h"
-#include "h263dsp_mips.h"
-
-static const uint8_t h263_loop_filter_strength_msa[32] = {
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
- 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12
-};
-
-static void h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
-{
- int32_t strength = h263_loop_filter_strength_msa[qscale];
- v16u8 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 temp0, temp1, temp2;
- v8i16 diff0, diff2, diff4, diff6, diff8;
- v8i16 d0, a_d0, str_x2, str;
-
- src -= 2;
- LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in3, in2, in1);
-
- temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
- a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
- temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
- temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
- temp2 <<= 2;
- diff0 = a_d0 + temp2;
- diff2 = -(-diff0 >> 3);
- str_x2 = __msa_fill_h(-(strength << 1));
- temp0 = (str_x2 <= diff2);
- diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
- temp2 = str_x2 - diff2;
- str = __msa_fill_h(-strength);
- temp0 = (diff2 < str);
- diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
- diff4 = diff0 >> 3;
- str_x2 = __msa_fill_h(strength << 1);
- temp0 = (diff4 <= str_x2);
- diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
- temp2 = str_x2 - diff4;
- str = __msa_fill_h(strength);
- temp0 = (str < diff4);
- diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
- temp0 = __msa_clti_s_h(diff0, 0);
- d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
- diff2 = -diff2 >> 1;
- diff4 >>= 1;
- diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
- diff6 = (-a_d0) >> 2;
- diff6 = -(diff6);
- temp2 = -diff8;
- temp0 = (diff6 < temp2);
- diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
- diff2 = a_d0 >> 2;
- temp0 = (diff2 <= diff8);
- diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
- temp0 = __msa_clti_s_h(a_d0, 0);
- diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
- PCKEV_B2_SH(a_d0, diff6, a_d0, d0, diff6, d0);
- in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
- in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
- in3 = __msa_xori_b(in3, 128);
- in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
- in3 = __msa_xori_b(in3, 128);
- in2 = __msa_subsus_u_b(in2, (v16i8) d0);
- ILVR_B2_SH(in3, in0, in1, in2, temp0, temp1);
- in0 = (v16u8) __msa_ilvr_h(temp1, temp0);
- in3 = (v16u8) __msa_ilvl_h(temp1, temp0);
- ST_W8(in0, in3, 0, 1, 2, 3, 0, 1, 2, 3, src, stride);
-}
-
-static void h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
-{
- int32_t strength = h263_loop_filter_strength_msa[qscale];
- uint64_t res0, res1, res2, res3;
- v16u8 in0, in1, in2, in3;
- v8i16 temp0, temp2, diff0, diff2, diff4, diff6, diff8;
- v8i16 d0, a_d0, str_x2, str;
-
- src -= 2 * stride;
- LD_UB4(src, stride, in0, in3, in2, in1);
- temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
- a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
- temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
- temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
- temp2 <<= 2;
- diff0 = a_d0 + temp2;
- diff2 = -(-diff0 >> 3);
- str_x2 = __msa_fill_h(-(strength << 1));
- temp0 = (str_x2 <= diff2);
- diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
- temp2 = str_x2 - diff2;
- str = __msa_fill_h(-strength);
- temp0 = (diff2 < str);
- diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
- diff4 = diff0 >> 3;
- str_x2 = __msa_fill_h(strength << 1);
- temp0 = (diff4 <= str_x2);
- diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
- temp2 = str_x2 - diff4;
- str = __msa_fill_h(strength);
- temp0 = (str < diff4);
- diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
- temp0 = __msa_clti_s_h(diff0, 0);
- d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
- diff2 = -diff2 >> 1;
- diff4 >>= 1;
- diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
- diff6 = (-a_d0) >> 2;
- diff6 = -(diff6);
- temp2 = -diff8;
- temp0 = (diff6 < temp2);
- diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
- diff2 = a_d0 >> 2;
- temp0 = (diff2 <= diff8);
- diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
- temp0 = __msa_clti_s_h(a_d0, 0);
- diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
- PCKEV_B2_SH(a_d0, diff6, a_d0, d0, diff6, d0);
- in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
- in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
- in3 = __msa_xori_b(in3, 128);
- in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
- in3 = __msa_xori_b(in3, 128);
- in2 = __msa_subsus_u_b(in2, (v16i8) d0);
- res0 = __msa_copy_u_d((v2i64) in0, 0);
- res1 = __msa_copy_u_d((v2i64) in3, 0);
- res2 = __msa_copy_u_d((v2i64) in2, 0);
- res3 = __msa_copy_u_d((v2i64) in1, 0);
- SD4(res0, res1, res2, res3, src, stride);
-}
-
-void ff_h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
-{
- h263_h_loop_filter_msa(src, stride, q_scale);
-}
-
-void ff_h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
-{
- h263_v_loop_filter_msa(src, stride, q_scale);
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/xvididct_init_mips.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/xvididct_init_mips.c
deleted file mode 100644
index 658a5792e04aa7cc47a06f85162087dca33fe4a5..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/xvididct_init_mips.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015 Zhou Xiaoyong
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/mips/cpu.h"
-#include "xvididct_mips.h"
-
-av_cold void ff_xvid_idct_init_mips(IDCTDSPContext *c, AVCodecContext *avctx,
- unsigned high_bit_depth)
-{
- int cpu_flags = av_get_cpu_flags();
-
- if (have_mmi(cpu_flags)) {
- if (!high_bit_depth) {
- if (avctx->idct_algo == FF_IDCT_AUTO ||
- avctx->idct_algo == FF_IDCT_XVID) {
- c->idct_put = ff_xvid_idct_put_mmi;
- c->idct_add = ff_xvid_idct_add_mmi;
- c->idct = ff_xvid_idct_mmi;
- c->perm_type = FF_IDCT_PERM_NONE;
- }
- }
- }
-}
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Bingo for Cash Play Free Bingo Games and Win Real Money Prizes.md b/spaces/congsaPfin/Manga-OCR/logs/Bingo for Cash Play Free Bingo Games and Win Real Money Prizes.md
deleted file mode 100644
index c7a1234ab89d279143b469f19c8596fef0a0aa5b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Bingo for Cash Play Free Bingo Games and Win Real Money Prizes.md
+++ /dev/null
@@ -1,105 +0,0 @@
-
-
Bingo for Cash - Real Money APK: A Fun and Fair Way to Win Prizes
-
Do you love playing bingo? Do you want to win real cash and prizes by playing a simple and enjoyable game? If you answered yes, then you should try Bingo for Cash - Real Money APK, one of the best win-real-money bingo games in 2023. In this article, we will tell you everything you need to know about this amazing app, including what it is, how to play it, what are the benefits of playing it, and what are some tips and tricks for playing it. Read on and discover why Bingo for Cash - Real Money APK is the ultimate bingo game for you.
Bingo for Cash - Real Money APK is a skill-based bingo game that rewards winners with cash and prizes. Unlike other bingo games that rely on luck, this game is totally fair and based on your skills. Every player in the competition gets the same balls and cards, and those who score among the top three win. You can get matched only with other players within your skill level, so you can meet your worthy opponents.
-
Bingo for Cash - Real Money APK is also a free-to-download app that is compatible with Android devices. You can download it from the App Store or Google Play, depending on your device. You can register with your email or Facebook account, and start playing right away. You don't need to pay anything to play, but you can buy extra coins and boosts if you want to enhance your gaming experience.
-
Bingo for Cash - Real Money APK is also a fair and exciting game that matches players with similar skill levels. You can choose from different bingo rooms, each with its own theme, difficulty level, and prize pool. You can play in classic winning patterns, such as straight lines, four corners, or diagonal lines, or in special winning patterns, such as letters, shapes, or symbols. You can also play in bonus games, such as scratch cards, spin the wheel, or pick a card, to win extra coins and boosts.
-
How to Play Bingo for Cash - Real Money APK?
-
Playing Bingo for Cash - Real Money APK is very easy and fun. Here are the steps you need to follow to start playing and winning:
-
-
Download the app from the App Store or Google Play, depending on your device.
-
Register with your email or Facebook account. You will get a welcome bonus of 1000 coins and 10 boosts.
-
Choose a bingo room and buy your cards. You can buy up to four cards per game, and each card costs a certain amount of coins. The more cards you buy, the higher your chances of winning.
-
Daub the numbers as they are called and try to get as many bingos as possible. A bingo is when you mark off all the numbers in a winning pattern on your card. You can use the auto-daub feature to let the app mark the numbers for you, or you can do it manually for more challenge and fun.
-
Use boosts and power-ups to increase your score and chances of winning. Boosts are special items that you can use before or during the game, such as extra balls, double score, or free daubs. Power-ups are special effects that you can activate by filling up the power-up meter, such as instant bingo, coin shower, or mystery gift.
-
-
The game ends when all the balls are drawn or when someone gets a bingo. The winners are those who score among the top three in the competition. They will receive cash or prizes according to the prize pool of the room. The cash prizes are paid through PayPal, while the physical prizes are shipped to your address.
-
What are the Benefits of Playing Bingo for Cash - Real Money APK?
-
Playing Bingo for Cash - Real Money APK has many benefits that make it one of the best win-real-money bingo games in 2023. Here are some of them:
-
-
You can win real cash and prizes by playing a fun and easy game. You don't need any special skills or knowledge to play bingo, just a good eye and a fast finger. You can win anything from $1 to $1000 in cash, or from gift cards to gadgets in prizes.
-
You can test your skills and compete with other players from around the world. You can get matched only with other players within your skill level, so you can have a fair and exciting competition. You can also check the leaderboard and see how you rank among other players globally or locally.
-
You can enjoy a variety of themes and bonus games that add more excitement to the game. You can play in different bingo rooms, each with its own theme, such as Halloween, Christmas, or Tropical. You can also play in bonus games, such as scratch cards, spin the wheel, or pick a card, to win extra coins and boosts.
-
You can join a friendly and supportive community of bingo lovers on social media. You can follow the official Facebook, YouTube, and TikTok pages for updates, events, and special offers. You can also chat with other players in the game, send them gifts, and make new friends.
-
-
What are the Tips and Tricks for Playing Bingo for Cash - Real Money APK?
-
If you want to improve your chances of winning in Bingo for Cash - Real Money APK, here are some tips and tricks that you should know:
-
-
Play in multiple rooms to increase your chances of winning. You can play in up to four rooms at the same time, as long as you have enough coins and boosts. This way, you can have more opportunities to get bingos and win prizes.
-
Use the auto-daub feature to avoid missing any numbers. The auto-daub feature will mark the numbers on your cards automatically as they are called. This way, you won't miss any numbers or bingos due to distraction or delay.
-
Check the leaderboard and see how you rank among other players. The leaderboard will show you your current rank globally or locally based on your total score. This way, you can see how well you are doing compared to other players and aim for higher ranks.
-
Follow the official Facebook, YouTube, and TikTok pages for updates, events, and special offers. The official social media pages will keep you updated on the latest news, features, and promotions of Bingo for Cash - Real Money APK. You can also participate in events, such as tournaments, challenges, and giveaways, to win more coins and prizes. You can also get special offers, such as discounts, freebies, and coupons, to save money and get more value.
-
-
Conclusion
-
Bingo for Cash - Real Money APK is a great way to have fun and win rewards by playing bingo online. You can play a skill-based bingo game that matches you with other players of your level, and win real cash and prizes by scoring among the top three. You can also enjoy a variety of themes and bonus games that add more excitement to the game. You can also join a friendly and supportive community of bingo lovers on social media. Download the app today and start playing with other bingo enthusiasts from around the world.
-
bingo cash win real money hint apk
-bingo for cash app win real cash games
-bingo for cash real money by winner studio
-bingo clash win real jackpots and big prizes apk
-bingo blackout play free bingo games and win real money
-bingo cash free skill game where you can win real prizes
-bingo cash the new free skill game with lots of bonuses and coins
-bingo cash play live bingo games online with other players
-bingo cash start a fun and free story to play bingo games
-bingo cash play this wonderful and fun multiplayer game with your friends
-bingo cash win real money the new no-skill game where you can win real jackpots
-bingo cash win real money the best bingo game with lots of bingo bonuses
-bingo cash win real money play free bingo games and have fun all day long
-bingo cash win real money one of the most addictive games out there
-bingo cash win real money compete against a qualified family of platforms
-bingo cash win real money disable bingo wins on android to win money
-bingo cash win real money download the app and start playing now
-bingo cash win real money the new free board game that allows you to invite your friends
-bingo cash win real money start your free bingo tour and build your souvenir collection
-bingo cash win real money walk around this bingo ball and enjoy the best free bingo games
-bingo cash win real money the ‘holy grail’ fusion experiment to create a mini sun
-bingo cash win real money achieve temperatures nearly seven times hotter than the core of the sun
-bingo cash win real money achieve a net energy gain when carrying out a nuclear fusion experiment
-bingo cash win real money exciting multiplayer game with friends
-bingo cash win real money guide app that helps users to learn how to use the application properly
-make money playing games with bingo for cash app
-games that pay real money instantly with bingo for cash app
-paypal games for real money with bingo for cash app
-try your lucky with bingo for cash app and win real cash prizes
-test your skills with bingo for cash app and earn money in cash games
-love bingo? good at playing it? now you can play it for real money with bingo for cash app
-totally free game to download and you can earn money in cash games with bingo for cash app
-no-skill game where you can win real jackpots and big prizes with bingo for cash app
-play live bingo games online with other players from anywhere in the world with bingo for cash app
-start a fun and free story to play bingo games with bingo for cash app
-play this wonderful and fun multiplayer game with your friends with bingo for cash app
-one of the best win-real-money bingo games in 2023 with bingo for cash app
-lots of bonuses and coins with bingo for cash app
-fun free coin bingo cash games with bingo for cash app
-addictive games that offer real money prizes with bingo for cash app
-download the app and start playing now with bingo for cash app
-invite your friends to play live bingo games with you with bingo for cash app
-build your souvenir collection with your free bingo tour with bingo for cash app
-enjoy the best free bingo games with this wonderful and fun multiplayer game with your friends
-
FAQs
-
Is Bingo for Cash - Real Money APK legal?
-
Yes, Bingo for Cash - Real Money APK is legal in most countries where online gaming is allowed. However, you should check the laws and regulations of your country before playing, as some countries may have restrictions or prohibitions on online gaming.
-
How do I withdraw my winnings from Bingo for Cash - Real Money APK?
-
You can withdraw your winnings from Bingo for Cash - Real Money APK through PayPal. You need to have a minimum balance of $10 in your account to request a withdrawal. The withdrawal process may take up to 7 business days to complete.
-
How do I contact customer support for Bingo for Cash - Real Money APK?
-
You can contact customer support for Bingo for Cash - Real Money APK by sending an email to support@bingoforcash.com. You can also visit the help center in the app or on the website for more information and FAQs.
-
What are the minimum requirements for playing Bingo for Cash - Real Money APK?
-
The minimum requirements for playing Bingo for Cash - Real Money APK are as follows:
-
-
An Android device with version 5.0 or higher
-
An internet connection (Wi-Fi or mobile data)
-
A valid email or Facebook account
-
A PayPal account (for cash withdrawals)
-
-
How do I get more free coins and boosts in Bingo for Cash - Real Money APK?
-
You can get more free coins and boosts in Bingo for Cash - Real Money APK by doing the following:
-
-
Daily login: You can get a daily bonus of coins and boosts by logging in every day.
-
Daily tasks: You can get more coins and boosts by completing daily tasks, such as playing a certain number of games, inviting friends, or rating the app.
-
Referral program: You can get 500 coins and 5 boosts for every friend you invite to play the game. Your friend will also get the same amount of coins and boosts.
-
Social media: You can follow the official Facebook, YouTube, and TikTok pages for more free coins and boosts. You can also participate in events, contests, and giveaways to win more rewards.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Idle Island Tycoon Mod and Enjoy the Ultimate Survival Game.md b/spaces/congsaPfin/Manga-OCR/logs/Download Idle Island Tycoon Mod and Enjoy the Ultimate Survival Game.md
deleted file mode 100644
index 33f2404dba43e7c4f40ea38d61153df67717a4bc..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download Idle Island Tycoon Mod and Enjoy the Ultimate Survival Game.md
+++ /dev/null
@@ -1,119 +0,0 @@
-
-
Download Idle Island Tycoon Mod: A Survival Game with Idle and Tycoon Elements
-
If you are looking for a game that combines survival, idle and tycoon genres, then you should check out Idle Island Tycoon. This game lets you manage a group of survivors on a tiny island after the end of the world. You will have to gather food and materials, build and upgrade buildings, craft tools, hunt animals, explore new lands and trade with merchants. You will also have to make your survivors happy and grow your population. In this article, we will tell you why you should download Idle Island Tycoon mod, how to do it, and some tips and tricks for playing the game.
-
What is Idle Island Tycoon?
-
Idle Island Tycoon is a simulation game developed by Atalana Games. It is available for Android devices on Google Play Store and for iOS devices on App Store. The game has over 500K downloads and a rating of 4.3 stars on Google Play Store. It has been praised for its graphics, gameplay, features and updates.
A game that combines survival, idle and tycoon genres
-
Idle Island Tycoon is not your typical idle game or tycoon game. It also has elements of survival games, such as crafting, hunting, exploring and managing resources. You will have to balance between producing materials and food, expanding your territory, researching new technologies and making your survivors happy. You will also have to deal with challenges such as oxide tools breaking down, animals attacking your camp or merchants offering you deals.
-
A game that lets you manage a group of survivors on a tiny island
-
Idle Island Tycoon starts with you being one of the few survivors after the world has ended because of global warming. You will have to find other survivors who are willing to join your group and work together to survive on a tiny island. You will have to assign them to different buildings and tasks according to their skills and preferences. You will also have to take care of their needs such as food, water, health and happiness.
-
A game that lets you explore new lands, craft tools, hunt animals and trade with merchants
-
Idle Island Tycoon is not a static game. You will be able to explore new lands and discover new resources, animals and secrets. You will have to craft tools and weapons to help you in your adventure. You will also have to hunt animals for food and materials, but be careful as some of them might fight back. You will also have the opportunity to trade with merchants who will visit your island from time to time. You can sell your excess materials and food, or buy rare items and skins.
-
Why download Idle Island Tycoon mod?
-
Idle Island Tycoon is a free game, but it also has some limitations and drawbacks. For example, you will have to watch ads or spend real money to get more diamonds, which are the premium currency of the game. You will also have to wait for a long time to unlock new islands and features, or to upgrade your buildings and research new technologies. You will also have to deal with the oxide tools breaking down frequently, which can slow down your progress. If you want to enjoy the game without these annoyances, you should download Idle Island Tycoon mod.
-
To get unlimited materials, diamonds and skins
-
Idle Island Tycoon mod is a modified version of the original game that gives you unlimited access to all the resources and currencies of the game. You will not have to worry about running out of materials, food or diamonds. You will be able to upgrade your buildings and research new technologies as much as you want. You will also be able to buy and use all the skins and items that are available in the game.
-
To unlock all islands and features
-
Idle Island Tycoon mod also unlocks all the islands and features that are normally locked behind levels or payments. You will be able to explore all the lands that the game has to offer, from the tropical island to the desert island, from the forest island to the snow island. You will also be able to access all the features that the game has, such as the roulette, the merchant, the museum, the laboratory and more.
-
download idle island tycoon mod apk
-how to download idle island tycoon mod for android
-idle island tycoon mod unlimited money and diamonds
-idle island tycoon mod apk latest version
-idle island tycoon mod apk free download
-download idle island tycoon mod for ios
-idle island tycoon mod apk offline
-idle island tycoon mod apk no ads
-download idle island tycoon mod for pc
-idle island tycoon mod apk 2023
-idle island tycoon mod apk revdl
-download idle island tycoon mod from modyolo.com[^1^]
-idle island tycoon mod apk happymod
-idle island tycoon mod apk rexdl
-download idle island tycoon mod with obb file
-idle island tycoon mod apk unlimited everything
-idle island tycoon mod apk android 1
-download idle island tycoon mod for windows 10
-idle island tycoon mod apk pure
-idle island tycoon mod apk 2.8.1
-download idle island tycoon mod from apkpure.com
-idle island tycoon mod apk platinmods
-idle island tycoon mod apk an1.com
-download idle island tycoon mod from apkmody.io
-idle island tycoon mod apk vip unlocked
-download idle island tycoon hack version
-idle island tycoon cheats and tips
-download idle island tycoon simulator game
-idle island tycoon guide and walkthrough
-download idle island city builder game with mods
-best mods for idle island tycoon game
-download idle island survival game with mods
-how to play idle island tycoon with mods
-download idle island builder game with mods
-how to install idle island tycoon mod on android device
-download idle island adventure game with mods
-how to update idle island tycoon mod to latest version
-download idle island empire game with mods
-how to uninstall idle island tycoon mod from android device
-download idle island paradise game with mods
-
To enjoy the game without ads or in-app purchases
-
Idle Island Tycoon mod also removes all the ads and in-app purchases that are present in the original game. You will not have to watch any ads or spend any real money to play the game. You will be able to enjoy the game without any interruptions or distractions.
-
How to download Idle Island Tycoon mod?
-
If you are convinced that Idle Island Tycoon mod is worth downloading, you might be wondering how to do it. Here are the steps that you need to follow:
-
Find a reliable source of modded APK files
-
The first thing that you need to do is find a reliable source of modded APK files. These are files that contain the modified version of the game that you want to download. There are many websites that offer these files, but not all of them are safe or trustworthy. Some of them might contain viruses, malware or spyware that can harm your device or steal your personal information. To avoid these risks, you should only download modded APK files from reputable sources that have positive reviews and feedback from other users.
-
Download and install the modded APK file on your device
-
The next thing that you need to do is download and install the modded APK file on your device. To do this, you need to follow these steps:
-
-
Go to the website where you found the modded APK file and click on the download button.
-
Wait for the file to be downloaded on your device.
-
Locate the file on your device using a file manager app.
-
Tap on the file and select install.
-
Wait for the installation process to finish.
-
You can now open and play Idle Island Tycoon mod.
-
-
Allow unknown sources and permissions if needed
-
Sometimes, you might encounter some issues when trying to install the modded APK file on your device. For example, you might see a message that says "Installation blocked" or "For security reasons, your phone is not allowed to install unknown apps from this source". This means that your device settings are preventing you from installing apps from sources other than Google Play Store or App Store. To fix this problem, you need to allow unknown sources and permissions on your device. To do this, you need to follow these steps:
-
-
Go to your device settings and
Find the option that says "Unknown sources" or "Install unknown apps" and enable it.
-
You might also have to allow some permissions for the app, such as storage, network, location, etc.
-
Go back to the file manager app and try to install the modded APK file again.
-
You should be able to install and play Idle Island Tycoon mod without any problems.
-
-
Tips and tricks for playing Idle Island Tycoon
-
Now that you have downloaded and installed Idle Island Tycoon mod, you might want to know some tips and tricks for playing the game. Here are some of them:
-
Upgrade your buildings and research new technologies
-
One of the main goals of the game is to upgrade your buildings and research new technologies. This will help you produce more materials and food, increase your population, unlock new features and improve your camp. You should always try to upgrade your buildings and research new technologies as soon as you can. You can use the unlimited diamonds that you get from the modded version of the game to speed up the process.
-
Assign your survivors to the best tasks and buildings
-
Another important aspect of the game is to assign your survivors to the best tasks and buildings according to their skills and preferences. Each survivor has a different personality, skill level and happiness level. You should try to match them with the tasks and buildings that suit them best. This will increase their productivity, happiness and loyalty. You can also use the skins that you get from the modded version of the game to customize your survivors and make them look more unique.
-
Play the roulette to win amazing rewards
-
A fun feature of the game is the roulette, which is a mini-game that you can play every day. The roulette gives you a chance to win amazing rewards, such as materials, food, diamonds, skins, items and more. You can use the unlimited diamonds that you get from the modded version of the game to spin the roulette as many times as you want. You can also use the items that you get from the roulette to boost your camp or trade with merchants.
-
Conclusion
-
Idle Island Tycoon is a fun and addictive game that mixes survival, idle and tycoon elements. You will have to manage a group of survivors on a tiny island after the end of the world. You will have to gather food and materials, build and upgrade buildings, craft tools, hunt animals, explore new lands and trade with merchants. You will also have to make your survivors happy and grow your population.
-
Downloading the modded version of the game can enhance your gaming experience and make you a camping tycoon faster. You will get unlimited materials, diamonds and skins. You will also unlock all islands and features. You will also enjoy the game without ads or in-app purchases.
-
Follow the steps above to download and install the modded APK file safely and easily. Then, use the tips and tricks above to play the game like a pro.
-
FAQs
-
Is Idle Island Tycoon mod safe to use?
-
Yes, Idle Island Tycoon mod is safe to use if you download it from a reliable source of modded APK files. However, you should always be careful when downloading and installing apps from unknown sources, as they might contain viruses or malware that can harm your device or steal your personal information. You should also backup your data before installing any modded app, in case something goes wrong.
-
What are the benefits of using Idle Island Tycoon mod?
-
The benefits of using Idle Island Tycoon mod are:
-
-
You will get unlimited materials, diamonds and skins.
-
You will unlock all islands and features.
-
You will enjoy the game without ads or in-app purchases.
-
-
How can I update Idle Island Tycoon mod?
-
To update Idle Island Tycoon mod, you need to follow these steps:
-
-
Go to the website where you downloaded the modded APK file and check if there is a newer version available.
-
If there is, download it on your device.
-
Delete or uninstall the old version of Idle Island Tycoon mod on your device.
-
Install the new version of Idle Island Tycoon mod on your device following the same steps as before.
-
You can now play Idle Island Tycoon mod with the latest updates.
-
-
Can I play Idle Island Tycoon mod offline?
-
Yes, you can play Idle Island Tycoon mod offline without any problems. The game does not require an internet connection to run, except for some features such as the roulette or the merchant. You can play the game offline and enjoy the survival, idle and tycoon aspects of the game.
-
Can I play Idle Island Tycoon mod with friends?
-
Unfortunately, no. Idle Island Tycoon mod does not support multiplayer or social features. The game is a single-player game that focuses on your own camp and island. You cannot interact with other players or visit their camps or islands. However, you can still share your progress and achievements with your friends through screenshots or videos.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Growtopia Hack Download How to Access the Source Code and Customize Your Own Hacks.md b/spaces/congsaPfin/Manga-OCR/logs/Growtopia Hack Download How to Access the Source Code and Customize Your Own Hacks.md
deleted file mode 100644
index 84e6a76c4c2e377bbdc352d23d358364d8a2908a..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Growtopia Hack Download How to Access the Source Code and Customize Your Own Hacks.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
Growtopia Hack Download: What You Need to Know
-
If you are a fan of Growtopia, the free-to-play 2D sandbox MMO game with a massive community of millions of players worldwide, you might have heard of or even used Growtopia hacks. These are unofficial tools or programs that can modify or enhance your gameplay in various ways, such as giving you unlimited gems, items, or access to other players' worlds. But before you download and use any Growtopia hack, there are some things you need to know. In this article, we will explain what Growtopia is, why it is popular, what are the types, benefits, and risks of Growtopia hacks, how to download and use them safely and legally, and what are the alternatives to Growtopia hacks.
-
What is Growtopia and why is it popular?
-
Growtopia is a game that was released in 2013 by Robinson Technologies and later acquired by Ubisoft in 2017. It is available for Microsoft Windows, iOS, Android, macOS, PlayStation 4, Xbox One, and Nintendo Switch platforms. The game has been praised for its creativity, social interaction, and user-generated content.
Growtopia is a game where you can create your own unique character and build anything you want using blocks, seeds, and items. You can also explore and visit the worlds of other players, chat with them, trade with them, or play mini-games with them. There are thousands of mini-games in Growtopia, including parkour, races, PVP battles, ghost hunting, casino games, quizzes, and more. You can also create your own mini-games using the Adventure pack or other tools.
-
The game has a currency system based on gems, which can be obtained by breaking blocks, harvesting trees, completing quests, watching ads, or buying them with real money. Gems can be used to buy items from the store or other players. Some items are rare or limited edition, which makes them more valuable and desirable. You can also craft your own items by combining different seeds or materials.
-
Growtopia community and events
-
What are Growtopia hacks and why do people use them?
-
Growtopia hacks are unofficial tools or programs that can modify or enhance your gameplay in various ways, such as giving you unlimited gems, items, or access to other players' worlds. Some people use Growtopia hacks to gain an advantage over other players, to save time and effort, to have more fun, or to satisfy their curiosity.
-
Types of Growtopia hacks and their functions
-
There are many types of Growtopia hacks, each with different functions and effects. Some of the most common types of Growtopia hacks are:
-
-
Gem hacks: These hacks can give you unlimited gems or increase the amount of gems you get from breaking blocks, harvesting trees, completing quests, watching ads, or buying them with real money.
-
Item hacks: These hacks can give you unlimited items or any item you want, including rare or limited edition items. You can also use these hacks to duplicate, trade, or sell items.
-
World hacks: These hacks can give you access to any world you want, including locked or banned worlds. You can also use these hacks to create, edit, or delete worlds.
-
Account hacks: These hacks can give you access to any account you want, including admin or moderator accounts. You can also use these hacks to change your username, password, email, or profile.
-
Other hacks: These hacks can give you other features or abilities that are not normally available in the game, such as flying, teleporting, invisibility, speed, auto-farming, auto-breaking, auto-harvesting, auto-trading, auto-selling, auto-buying, auto-building, auto-playing mini-games, and more.
-
-
Benefits and risks of using Growtopia hacks
-
Using Growtopia hacks can have some benefits and risks for you and your gameplay. Some of the benefits are:
-
-
You can get more gems, items, worlds, and accounts without spending real money or wasting time and effort.
-
You can have more fun and enjoyment by exploring new possibilities and features in the game.
-
You can impress or surprise other players with your achievements and skills.
-
-
Some of the risks are:
-
growtopia trainer download
-growtopia cheat engine download
-growtopia hack tool download
-growtopia unban hack download
-growtopia autofarm hack download
-growtopia multiboxing hack download
-growtopia spammer hack download
-growtopia sethmumu hack download
-growtopia gabb hack download
-growtopia decoder hack download
-growtopia casino hack download
-growtopia gems hack download
-growtopia fly hack download
-growtopia speed hack download
-growtopia ghost hack download
-growtopia noclip hack download
-growtopia mod menu download
-growtopia aimbot hack download
-growtopia wallhack download
-growtopia rayman hack download
-growtopia bfg hack download
-growtopia dupe hack download
-growtopia proxy hack download
-growtopia bypass hack download
-growtopia account hack download
-growtopia item hack download
-growtopia world lock hack download
-growtopia anti ban hack download
-growtopia auto clicker download
-growtopia auto builder download
-growtopia auto breaker download
-growtopia auto puncher download
-growtopia auto plant download
-growtopia auto collect download
-growtopia auto wrench download
-growtopia auto fisher download
-growtopia auto surgery download
-growtopia auto startopia download
-growtopia auto crime wave download
-growtopia auto quiz solver download
-growtopia auto captcha solver download
-growtopia free hacks download
-growtopia safe hacks download
-growtopia working hacks download
-growtopia latest hacks download
-growtopia best hacks download
-growtopia easy hacks download
-growtopia online hacks download
-
-
You can get banned or suspended from the game by the developers or moderators if they detect your hack or receive reports from other players.
-
You can lose your account or items if the hack is malicious or faulty and damages your data or steals your information.
-
You can ruin your gameplay experience or reputation by cheating or abusing the game mechanics and rules.
-
You can harm your device or system if the hack contains viruses or malware that infect your files or programs.
-
-
How to download and use Growtopia hacks safely and legally?
-
If you still want to download and use Growtopia hacks despite the risks involved, there are some ways to do it safely and legally. However, we do not recommend or endorse any Growtopia hack and we advise you to use them at your own risk and discretion.
-
Sources and methods of downloading Growtopia hacks
-
The first step to download and use Growtopia hacks is to find a reliable source that offers them. There are many websites, forums, blogs, videos, social media pages, groups, or channels that claim to provide Growtopia hacks for free or for a fee. However, not all of them are trustworthy or legitimate. Some of them may be scams that trick you into giving them your personal information or money. Some of them may be fake that do not work or deliver what they promise. Some of them may be dangerous that contain viruses or malware that harm your device or system.
-
To avoid these sources and find a reliable one, you should do some research and check some factors before downloading any Growtopia hack. Some of these factors are:
-
-
The reputation and credibility of the source: You should look for reviews, ratings, feedbacks, comments, testimonials, or recommendations from other users who have used the source before. You should also check the history, background, contact information, and credentials of the source.
-- H3: The compatibility and security of the hack: You should look for the version, platform, device, system, and network requirements of the hack. You should also look for the encryption, protection, verification, and anti-ban features of the hack. - H3: The availability and accessibility of the hack: You should look for the download link, file size, format, and type of the hack. You should also look for the instructions, guides, tutorials, or support of the hack.
-
After finding a reliable source and a suitable hack, you should follow the steps and methods provided by the source to download and install the hack on your device or system. You should also scan the hack with an antivirus or anti-malware program before running it to ensure that it is safe and clean.
-
Tips and precautions for using Growtopia hacks
-
The second step to download and use Growtopia hacks is to use them wisely and responsibly. There are some tips and precautions that you should follow to avoid getting banned or suspended from the game or harming your device or system. Some of these tips and precautions are:
-
-
Use Growtopia hacks only for personal and non-commercial purposes. Do not use them to cheat, scam, harass, or harm other players or the game developers or moderators.
-
Use Growtopia hacks only in moderation and discretion. Do not use them excessively or unnecessarily. Do not use them in public or crowded worlds or events. Do not use them to gain an unfair advantage over other players or to ruin their gameplay experience or reputation.
-
Use Growtopia hacks only with your own account or items. Do not use them to access or modify other players' accounts or items without their permission or consent.
-
Use Growtopia hacks only with a backup or alternative account or device. Do not use them with your main or primary account or device. Do not use them with an account or device that contains sensitive or valuable information or data.
-
Use Growtopia hacks only with a VPN or proxy service. Do not use them with your real IP address or location. Do not use them with a service that is blocked or restricted by the game developers or moderators.
-
-
What are the alternatives to Growtopia hacks?
-
If you do not want to download and use Growtopia hacks for any reason, there are some alternatives that you can try to improve your gameplay in different ways. Some of these alternatives are:
-
Official Growtopia tools and resources
-
The game developers and moderators provide some official tools and resources that can help you with your gameplay. Some of these tools and resources are:
-
-
Growtopia Wiki: This is an online encyclopedia that contains information about everything related to Growtopia, such as items, worlds, events, recipes, guides, tips, tricks, secrets, and more.
-
Growtopia Forums: This is an online community where you can interact with other players, ask questions, share ideas, give feedback, report bugs, suggest features, join contests, and more.
-
Growtopia Support: This is an online service where you can contact the game developers or moderators for any issues, problems, complaints, inquiries, requests, or suggestions related to Growtopia.
-
Growtopia YouTube Channel: This is an online platform where you can watch videos about Growtopia, such as trailers, updates, tutorials, showcases, livestreams, interviews, and more.
-- H3: Growtopia Social Media Pages: These are online platforms where you can follow the game developers or moderators for any news, announcements, events, giveaways, or promotions related to Growtopia. You can also interact with them and other players through comments, likes, shares, or messages. Some of these platforms are Facebook, Twitter, Instagram, Discord, Reddit, and more.
-
Other ways to enhance your Growtopia experience
-
Besides using the official tools and resources, there are some other ways that you can try to enhance your Growtopia experience without using any hacks. Some of these ways are:
-
-
Practice and learn: The best way to improve your gameplay is to practice and learn from your own experience or from other players. You can try new things, experiment with different combinations, explore different worlds, play different mini-games, and more. You can also watch videos, read guides, join forums, or ask questions to learn more about the game mechanics, features, tips, tricks, secrets, and more.
-
Earn and save: Another way to improve your gameplay is to earn and save gems, items, worlds, and accounts legitimately. You can do this by breaking blocks, harvesting trees, completing quests, watching ads, or buying them with real money. You can also trade or sell items with other players or use the store or vending machines. You should also save your gems, items, worlds, and accounts wisely and securely. You should not waste them on unnecessary things or give them to strangers or scammers.
-
Have fun and be respectful: The most important way to improve your gameplay is to have fun and be respectful of yourself and others. You should enjoy the game for what it is and not take it too seriously or personally. You should also respect the game rules and regulations and not cheat or abuse the game or other players. You should also respect the game developers and moderators and not harass or harm them or their work.
-
-
Conclusion and FAQs
-
In conclusion, Growtopia is a fun and creative game that allows you to create your own character and world and interact with other players. However, some people use Growtopia hacks to modify or enhance their gameplay in various ways. Growtopia hacks can have some benefits and risks for you and your gameplay. If you want to download and use Growtopia hacks safely and legally, you should find a reliable source and a suitable hack and follow some tips and precautions. If you do not want to use Growtopia hacks for any reason, you can try some alternatives such as using the official tools and resources or other ways to enhance your Growtopia experience.
-
Here are some FAQs that you might have about Growtopia hacks:
-
-
Q: Are Growtopia hacks legal?
-
A: Growtopia hacks are not legal according to the game terms of service and privacy policy. Using Growtopia hacks can violate the intellectual property rights of the game developers or moderators or the personal rights of other players. Using Growtopia hacks can also result in legal actions or penalties from the game developers or moderators or other parties.
-
Q: Are Growtopia hacks safe?
-
A: Growtopia hacks are not safe according to the game security measures and standards. Using Growtopia hacks can expose your device or system to viruses or malware that can harm your files or programs. Using Growtopia hacks can also expose your account or items to hackers or scammers that can steal your information or data.
-
Q: How can I avoid getting banned or suspended from Growtopia for using hacks?
-
A: The best way to avoid getting banned or suspended from Growtopia for using hacks is to not use them at all. However, if you still want to use them, you should use them wisely and responsibly. You should use them only for personal and non-commercial purposes. You should use them only in moderation and discretion. You should use them only with your own account or items. You should use them only with a backup or alternative account or device. You should use them only with a VPN or proxy service.
-
Q: How can I report someone who is using Growtopia hacks?
-- H3: How can I report someone who is using Growtopia hacks? A: If you encounter someone who is using Growtopia hacks in the game, you can report them by using the report button in the chat menu or by contacting the game support team through email or social media platforms. You should provide evidence such as screenshots, videos, or chat logs that show the hack or its effects. You should also provide the username, world name, or account ID of the hacker if possible. You should not confront or provoke the hacker or reveal your report to them or others.
-
Q: Where can I find more information about Growtopia hacks?
-
A: If you want to find more information about Growtopia hacks, you can use the search engine of your choice and type in keywords such as "Growtopia hack download", "Growtopia hack 2023", "Growtopia hack apk", "Growtopia hack pc", "Growtopia hack ios", "Growtopia hack android", "Growtopia hack gems", "Growtopia hack items", "Growtopia hack world", "Growtopia hack account", or "Growtopia hack tool". However, you should be careful and cautious when visiting any website or link that offers Growtopia hacks and follow the tips and precautions that we have mentioned earlier.
-
-
I hope this article has helped you understand what Growtopia hacks are, why people use them, how to download and use them safely and legally, and what are the alternatives to them. If you have any questions or comments, feel free to leave them below. Thank you for reading and have a great day!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Pixel Car Racer MOD APK 2023 The Best Retro Racing Game with Sandbox RPG.md b/spaces/congsaPfin/Manga-OCR/logs/Pixel Car Racer MOD APK 2023 The Best Retro Racing Game with Sandbox RPG.md
deleted file mode 100644
index 5ec15415abf81a26218d0c751b690b1f1501be9b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Pixel Car Racer MOD APK 2023 The Best Retro Racing Game with Sandbox RPG.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
Pixel Car Racer 2023 Mod Apk: A Retro Racing Game with Unlimited Customization
-
If you are a fan of racing games, you might have heard of Pixel Car Racer, a popular game that combines retro style graphics with sandbox and RPG elements. In this game, you can build your dream garage, customize your cars, and race against other players online or offline. But what if you want to enjoy the game without any limitations or restrictions? That's where Pixel Car Racer 2023 Mod Apk comes in handy. In this article, we will tell you everything you need to know about this mod apk, including its features, benefits, and how to download and install it on your device.
-
What is Pixel Car Racer?
-
Pixel Car Racer is a racing game that was released in 2016 by Studio Furukawa, an indie game developer based in Canada. The game is inspired by the classic arcade racing games from the 80s and 90s, such as Out Run, Gran Turismo, and Need for Speed. The game features pixelated graphics, retro sound effects, and a simple yet addictive gameplay.
Pixel Car Racer has many features that make it stand out from other racing games. Here are some of them:
-
- Retro graphics and sound effects
-
The game has a nostalgic feel to it, thanks to its pixel art style and chiptune music. The game also has realistic physics and engine sounds that add to the immersion.
-
- Sandbox mode and RPG elements
-
The game has a sandbox mode where you can freely explore the open world map, drive around, and interact with other cars. You can also upgrade your skills and level up your character as you play.
-
pixel car racer unlimited money mod apk 2023
-pixel car racer hack apk download 2023
-pixel car racer mod apk free supercars 2023
-pixel car racer apk mod dinero infinito 2023
-pixel car racer mod apk latest version 2023
-pixel car racer mod apk unlocked cars 2023
-pixel car racer mod apk android 1 2023
-pixel car racer mod apk ios download 2023
-pixel car racer mod apk no root 2023
-pixel car racer mod apk offline 2023
-pixel car racer mod apk unlimited diamonds 2023
-pixel car racer mod apk unlimited crates 2023
-pixel car racer mod apk unlimited parts 2023
-pixel car racer mod apk unlimited rp 2023
-pixel car racer mod apk all cars unlocked 2023
-pixel car racer mod apk revdl 2023
-pixel car racer mod apk rexdl 2023
-pixel car racer mod apk happymod 2023
-pixel car racer mod apk an1 2023
-pixel car racer mod apk apkpure 2023
-pixel car racer mod apk aptoide 2023
-pixel car racer mod apk android republic 2023
-pixel car racer mod apk blackmod 2023
-pixel car racer mod apk by androidoyun club 2023
-pixel car racer mod apk by lenov.ru 2023
-pixel car racer mod apk cheat menu 2023
-pixel car racer mod apk clubapk.com 2023
-pixel car racer mod apk cracked.com 2023
-pixel car racer mod apk download for pc 2023
-pixel car racer mod apk download latest version 2023
-pixel car racer mod apk download uptodown 2023
-pixel car racer mod apk download for android phoneky.com 2023
-pixel car racer mod apk everything unlocked 2023
-pixel car racer mod apk esp hack 2023
-pixel car racer mod apk easy download link.com 2023
-pixel car racer mod apk free download for android mobile9.com 2023
-pixel car racer mod apk free shopping 2023
-pixel car racer mod apk free fire hack version.com 2023
-pixel car racer mod apk gamestechy.com 2023
-pixel car racer mod apk god mode 2023
-
- Over 1000 cars and parts to customize
-
The game has a huge collection of cars and parts that you can use to create your own unique vehicles. You can choose from different brands, models, colors, wheels, spoilers, engines, turbos, exhausts, transmissions, and more. You can also apply stickers, decals, and paint jobs to your cars.
-
- Drag and street racing modes
-
The game has two main racing modes: drag and street. In drag mode, you have to compete in a straight line race against another car. You have to shift gears at the right time and use nitrous oxide to boost your speed. In street mode, you have to race on different tracks with curves, turns, and obstacles. You have to avoid crashing into other cars or objects while trying to reach the finish line first.
-
- Online multiplayer and leaderboards
-
The game has an online multiplayer mode where you can race against other players from around the world. You can join or create rooms with different settings and rules. You can also chat with other players and make friends or rivals. The game also has leaderboards where you can see your rank and stats compared to other players.
What is Pixel Car Racer 2023 Mod Apk?
-
Pixel Car Racer 2023 Mod Apk is a modified version of the original Pixel Car Racer game that gives you access to unlimited resources and features that are not available in the official game. With this mod apk, you can enjoy the game without any limitations or restrictions. You can unlock all the cars and parts, get unlimited money and diamonds, and remove all the ads from the game. You can also play the game without rooting your device, which means you don't have to worry about any security risks or compatibility issues.
-
Benefits of Pixel Car Racer 2023 Mod Apk
-
Pixel Car Racer 2023 Mod Apk has many benefits that make it worth downloading and installing on your device. Here are some of them:
-
- Unlimited money and diamonds
-
Money and diamonds are the main currencies in Pixel Car Racer. You need them to buy cars, parts, upgrades, and other items in the game. However, earning them can be time-consuming and tedious, especially if you want to get the best cars and parts. With Pixel Car Racer 2023 Mod Apk, you don't have to worry about that. You will get unlimited money and diamonds in your account as soon as you start the game. You can use them to buy anything you want without any restrictions.
-
- Free supercars and premium parts
-
Pixel Car Racer has a huge collection of cars and parts that you can use to customize your vehicles. However, some of them are locked behind a paywall or require a lot of grinding to unlock. For example, some of the supercars like Bugatti Veyron, Lamborghini Huracan, and Ferrari LaFerrari are only available through in-app purchases or rare crates. With Pixel Car Racer 2023 Mod Apk, you don't have to spend any real money or waste any time to get these supercars and premium parts. You can unlock them for free with the mod apk and use them in your garage.
-
- No ads and no root required
-
Pixel Car Racer is a free-to-play game, which means it has ads that can interrupt your gameplay and annoy you. These ads can also consume your data and battery life. With Pixel Car Racer 2023 Mod Apk, you can get rid of all the ads from the game and enjoy a smooth and uninterrupted gaming experience. You can also play the game without rooting your device, which means you don't have to modify your system settings or risk damaging your device. The mod apk is compatible with most Android devices and works flawlessly.
How to download and install Pixel Car Racer 2023 Mod Apk?
-
If you are interested in downloading and installing Pixel Car Racer 2023 Mod Apk on your device, you can follow these simple steps:
-
Steps to download and install Pixel Car Racer 2023 Mod Apk
-
- Step 1: Enable unknown sources on your device
-
Before you can install any mod apk file on your device, you need to enable unknown sources in your security settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
- Step 2: Download the mod apk file from a trusted source
-
Next, you need to download the mod apk file from a reliable and safe source. You can use the link below to download the latest version of Pixel Car Racer 2023 Mod Apk. The file size is about 67 MB and it is virus-free and malware-free.
- Step 3: Locate and install the mod apk file on your device
-
After you have downloaded the mod apk file, you need to locate it on your device and install it. You can use any file manager app to find the file in your downloads folder. Tap on the file and follow the instructions to install it on your device.
-
- Step 4: Launch the game and enjoy the mod features
-
Finally, you can launch the game and enjoy the mod features. You will see that you have unlimited money and diamonds in your account, as well as all the cars and parts unlocked. You can also play the game without any ads or root requirement. Have fun with Pixel Car Racer 2023 Mod Apk!
-
Conclusion
-
Pixel Car Racer is a fun and addictive racing game that lets you customize your cars and race against other players online or offline. However, if you want to enjoy the game without any limitations or restrictions, you should try Pixel Car Racer 2023 Mod Apk. This mod apk gives you unlimited money and diamonds, free supercars and premium parts, no ads and no root requirement, and more. You can download and install Pixel Car Racer 2023 Mod Apk on your device by following the steps above. We hope this article was helpful for you. If you have any questions or feedback, feel free to leave a comment below.
-
FAQs
-
Here are some frequently asked questions about Pixel Car Racer 2023 Mod Apk:
-
-
Is Pixel Car Racer 2023 Mod Apk safe to use?
-
Yes, Pixel Car Racer 2023 Mod Apk is safe to use. It does not contain any viruses or malware that can harm your device or data. It also does not require root access or any permissions that can compromise your privacy or security.
-
Will Pixel Car Racer 2023 Mod Apk work on my device?
-
Pixel Car Racer 2023 Mod Apk is compatible with most Android devices that run on Android 4.0.3 or higher. However, some devices may not support the mod apk due to different specifications or configurations. If you encounter any problems with the mod apk, you can try uninstalling and reinstalling it, or contact the developer for assistance.
-
Can I update Pixel Car Racer 2023 Mod Apk?
-
No, you cannot update Pixel Car Racer 2023 Mod Apk from the Google Play Store or any other source. If you do so, you will lose all the mod features and revert back to the original game. To get the latest version of Pixel Car Racer 2023 Mod Apk, you need to download it from the link provided above.
-
Can I play online with Pixel Car Racer 2023 Mod Apk?
-
Yes, you can play online with Pixel Car Racer 2023 Mod Apk. However, you may face some issues with connecting to other players or servers due to the mod features. You may also get banned from the game if you abuse the mod features or cheat in online races. Therefore, we advise you to use the mod apk at your own risk and discretion.
-
Can I request more features for Pixel Car Racer 2023 Mod Apk?
-
Yes, you can request more features for Pixel Car Racer 2023 Mod Ap k for Pixel Car Racer 2023 Mod Apk. However, we cannot guarantee that your requests will be fulfilled or implemented by the developer. You can contact the developer through their official website or social media accounts and share your feedback and suggestions.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/The ultimate guide to Call of Duty Mobile - Garena APK for Android users.md b/spaces/congsaPfin/Manga-OCR/logs/The ultimate guide to Call of Duty Mobile - Garena APK for Android users.md
deleted file mode 100644
index 56bce95c7bb043eb2b169c2b71758797045f99b7..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/The ultimate guide to Call of Duty Mobile - Garena APK for Android users.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
Call of Duty Mobile Asia APK: How to Download and Play the Popular FPS Game on Your Android Device
- If you are a fan of first-person shooter (FPS) games, you must have heard of Call of Duty Mobile, one of the most successful and popular mobile games in the world. But did you know that there is a special version of the game for Asian players? In this article, we will tell you everything you need to know about Call of Duty Mobile Asia APK, how to download and play it on your Android device, and why you should give it a try.
What is Call of Duty Mobile Asia APK?
-
A brief introduction to the game and its features
- Call of Duty Mobile is a free-to-play FPS game that brings the classic Call of Duty experience to your mobile device. You can play solo or with your friends in various game modes, such as Team Deathmatch, Domination, Search and Destroy, Battle Royale, and more. You can also customize your loadout, unlock new weapons, skins, perks, and operators, and rank up in the leaderboards. Call of Duty Mobile Asia APK is a special version of the game that is developed by Garena, a leading online game platform in Southeast Asia. It is designed to cater to the preferences and needs of Asian players, such as language support, server stability, regional events, and exclusive content.
The difference between the global and the Asian versions
- The main difference between Call of Duty Mobile Asia APK and the global version is that they are operated by different publishers. The global version is published by Activision, while the Asian version is published by Garena. This means that they have different servers, updates, events, and features. Some of the notable differences are: - The Asian version has more language options, such as Chinese, Thai, Indonesian, Vietnamese, Malay, Filipino, etc. - The Asian version has faster updates and bug fixes than the global version. - The Asian version has more regional events and tournaments that offer exclusive rewards and prizes. - The Asian version has some unique content that is not available in the global version, such as skins, weapons, operators, etc.
How to Download Call of Duty Mobile Asia APK?
-
The official sources to get the APK file
- If you want to download Call of Duty Mobile Asia APK on your Android device, you have two options: - You can download it from Google Play Store if it is available in your country. Just search for "Call of Duty Mobile Garena" and install it. - You can download it from the official website of Garena. Just go to [codm.garena.com](^2^) and click on "Download Now". You will get an APK file that you can transfer to your device.
The steps to install and run the game
- After you have downloaded the APK file, you need to follow these steps to install and run the game: - Enable "Unknown Sources" on your device settings. This will allow you to install apps from sources other than Google Play Store. - Locate the APK file on your device and tap on it. You will see a prompt asking for permission to install the app. Tap on "Install". - Wait for the installation process to finish. You will see a shortcut icon on your home screen or app drawer. - Tap on the icon to launch the game. You will need an internet connection to play the game. - Log in with - Log in with your Garena account or create a new one if you don't have one. You can also use your Facebook, Google, or Apple account to log in. - Choose your preferred language and region. You can change them later in the settings. - Enjoy the game!
How to Play Call of Duty Mobile Asia APK?
-
The game modes and maps available
- Call of Duty Mobile Asia APK offers a variety of game modes and maps for you to choose from. You can play: - Multiplayer: This is the classic mode where you can compete with other players in different modes, such as Team Deathmatch, Domination, Search and Destroy, etc. You can also play ranked matches to earn points and rewards. - Battle Royale: This is the mode where you can join a 100-player match and fight for survival. You can choose your class, loadout, vehicle, and landing spot. You can also play solo, duo, or squad mode. - Zombies: This is the mode where you can team up with other players and fight against hordes of zombies. You can also play different maps and modes, such as Survival, Raid, Hardcore, etc. - Seasonal Events: These are the special events that are available for a limited time. They offer new challenges, missions, rewards, and content. The game also features a variety of maps from the Call of Duty franchise, such as Nuketown, Crash, Crossfire, Hijacked, Standoff, etc. You can also play some exclusive maps that are only available in the Asian version, such as Cage, Saloon, Rust, etc.
The tips and tricks to improve your skills and performance
- If you want to become a better player in Call of Duty Mobile Asia APK, here are some tips and tricks that you can follow: - Adjust your sensitivity and controls according to your preference. You can also use the gyroscope feature to aim better. - Use the right weapons and attachments for different situations. You can also upgrade your weapons with camos and skins to enhance their stats. - Use the perks and operators that suit your playstyle. You can also unlock new ones by completing missions and challenges. - Communicate with your teammates using the voice chat or text chat feature. You can also use the quick chat feature to send messages and commands. - Practice your skills in the training mode or the private match mode. You can also watch replays and tutorials to learn from other players.
Why Should You Play Call of Duty Mobile Asia APK?
-
The benefits of playing the Asian version
- There are many reasons why you should play Call of Duty Mobile Asia APK instead of the global version. Some of them are: - You will have a better gaming experience with less lag and more stability. The Asian version has more servers and regions to choose from. - You will have more opportunities to participate in regional events and tournaments that are exclusive to the Asian version. You can also win amazing prizes and rewards that are not available in the global version. - You will have more access to unique content that is only available in the Asian version. You can enjoy new skins, weapons, operators, modes, maps, etc. that are not found in the global version.
The challenges and rewards you can expect
- Playing Call of Duty Mobile Asia APK is not only fun but also challenging. You will face many obstacles and difficulties that will test your skills and abilities. Some of them are: - You will compete with millions of players from different countries and regions. You will encounter different playstyles, strategies, and tactics that will challenge you. - You will have to adapt to different game modes and maps that require different skills and knowledge. You will have to learn how to use different weapons, perks, operators, vehicles, etc. - You will have to complete various missions and challenges that will require you to perform certain tasks or achieve certain goals. You will have to be creative and resourceful. However, playing Call of Duty Mobile Asia APK is also rewarding. You will gain many benefits and advantages that will make you happy and satisfied. Some of them are: - You will improve your skills and performance as a player. You will learn new things and master new techniques that will make you a better player. - You will earn various rewards and achievements that will show your progress and accomplishments. You will unlock new items and content that will enhance your gameplay. - You will have fun and enjoyment as a gamer. You will experience the thrill and excitement of playing one of the best FPS games on mobile.
Conclusion
- Call of Duty Mobile Asia APK is a great option for FPS fans who want to enjoy the Call of Duty experience on their mobile devices. It offers a lot of features and content that are tailored for Asian players. It is easy to download and play on any Android device. It is also challenging and rewarding for any skill level. If you are looking for If you are looking for a new and exciting way to play Call of Duty Mobile, you should definitely try Call of Duty Mobile Asia APK. It will give you a different and better gaming experience than the global version. You will not regret it.
FAQs
- Here are some frequently asked questions about Call of Duty Mobile Asia APK: - Q: Is Call of Duty Mobile Asia APK safe and legal to download and play? - A: Yes, Call of Duty Mobile Asia APK is safe and legal to download and play. It is developed and published by Garena, a reputable and licensed online game platform. It does not contain any viruses, malware, or illegal content. - Q: Can I play Call of Duty Mobile Asia APK with players from the global version? - A: No, you cannot play Call of Duty Mobile Asia APK with players from the global version. They are separate versions that have different servers, updates, events, and features. You can only play with players from the Asian version. - Q: Do I need a VPN to play Call of Duty Mobile Asia APK? - A: No, you do not need a VPN to play Call of Duty Mobile Asia APK. The game is available in most Asian countries and regions. However, if you are in a country or region where the game is not available, you may need a VPN to access it. - Q: How much storage space do I need to download and install Call of Duty Mobile Asia APK? - A: You need at least 2 GB of free storage space to download and install Call of Duty Mobile Asia APK. The game size may vary depending on the updates and content. - Q: How can I contact the customer service or support team of Call of Duty Mobile Asia APK? - A: You can contact the customer service or support team of Call of Duty Mobile Asia APK by using the following methods: - You can use the in-game feedback feature to report any issues or suggestions. - You can visit the official website of Garena at [garena.com] and click on "Support". - You can follow the official social media accounts of Garena at Facebook, Instagram, Twitter, YouTube, etc.
-
-
- d5da3c52bf
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Daem Chess Studio Full [PORTABLE] 76.md b/spaces/diacanFperku/AutoGPT/Daem Chess Studio Full [PORTABLE] 76.md
deleted file mode 100644
index 31f5a8092190b9f30e9da3baac41e12799a29a4f..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Daem Chess Studio Full [PORTABLE] 76.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
Daem Chess Studio Full 76: A Powerful Windows Desktop Application for Chess Lovers
-
-
Chess is a game of strategy, logic, and skill that can challenge and entertain anyone who plays it. Whether you are a beginner or an expert, a casual or a competitive player, a hobbyist or a professional, you can always improve your chess skills and enjoy the game more with the help of a good chess software.
-
-
One of the best chess software that you can find online is Daem Chess Studio Full 76. This is a Windows desktop application that allows you to play, analyze, and learn chess with ease and convenience. You can use it to play against the computer or online opponents, to study chess openings, tactics, and endgames, to practice chess puzzles and problems, to watch and replay chess games and tournaments, and to create and edit your own chess databases and books.
Daem Chess Studio Full 76 is not a free software. It costs $49 for a single license. However, if you want to save some money and get the software for free, you might have come across some websites that offer a keygen or a crack for Daem Chess Studio Full 76.
-
-
A keygen is a program that generates serial numbers or activation codes for software products. A crack is a program that modifies or bypasses the security features of a software product. Both of these methods are illegal and risky, as they can expose your computer to malware, viruses, or other threats.
-
-
In this article, we will show you how to download and activate Daem Chess Studio Full 76 with a keygen for 49. We will also explain why this is not a good idea, and what are the alternatives to get the software legally and safely.
-
-
How to Download and Activate Daem Chess Studio Full 76 with a Keygen for 49
-
-
There are many websites that claim to offer keygens or cracks for Daem Chess Studio Full 76. However, most of them are fake or malicious, and can harm your computer or steal your personal information. Therefore, we do not recommend using any of them.
-
-
However, for the sake of demonstration, we will use one of the websites that we found online. We do not endorse or promote this website in any way, and we advise you to avoid visiting it or downloading anything from it.
-
-
The website is called soundcloud.com/elcomsurpma1977/daem-chess-studio-full-hot-76, and it claims to offer an audio file of Daem Chess Studio Full 76 Keygen For 49. Here are the steps to follow:
-
-
-
Go to https://soundcloud.com/elcomsurpma1977/daem-chess-studio-full-hot-76 and click on the play button at the top of the page.
-
You will hear a voice saying "Welcome to Daem Chess Studio Full 76 Keygen For 49". The voice will then give you instructions on how to download and activate the software.
-
You will have to visit another website called uploadship.com/file/0c0f9c8a8a9f4b6d/Daem_Chess_Studio_Full_76_Keygen_For_49.zip where you will have to wait for 15 seconds before you can download the file.
-
After 15 seconds, click on the download button and save the file on your computer. The file name is Daem_Chess_Studio_Full_76_Keygen_For_49.zip.
-
Extract the file using WinZip or any other program that can handle ZIP files. You will get two files: one called setup.exe and one called keygen.exe.
-
Open the file called setup.exe and follow the instructions to install Daem Chess Studio Full 76 on your computer.
-
Do not launch the software yet. Open the file called keygen.exe and click on Generate. You will get a serial number or an activation code for Daem Chess Studio Full 76.
-
Launch Daem Chess Studio Full 76 and enter the serial number or the activation code when prompted. You should be able to use the software for free.
-
-
-
Why You Should Not Use a Keygen or a Crack for Daem Chess Studio Full 76
-
-
While using a keygen or a crack might seem like an easy way to get Daem Chess Studio Full 76 for free, it is actually a very bad idea. Here are some of the reasons why:
-
-
-
You are breaking the law and violating the terms of service of Daem Chess Studio. This can result in legal consequences, such as fines or lawsuits.
-
You are risking your computer's security and performance. The keygen or crack might contain malware, viruses, spyware, or other harmful programs that can damage your system, steal your data, or compromise your privacy.
-
You are missing out on updates and support from Daem Chess Studio. The keygen or crack might prevent you from updating your software or accessing online features. You will also not be able to get technical support or customer service from Daem Chess Studio if you encounter any problems with your software.
-
You are hurting the developers of Daem Chess Studio. By using a keygen or crack, you are depriving them of their rightful income and discouraging them from creating more quality products in the future.
-
-
-
How to Get Daem Chess Studio Full 76 Legally and Safely
-
-
If you want to use Daem Chess Studio Full 76 without breaking the law or risking your computer's security, you should buy it from the official website of Daem Chess Studio at https://www.daemonsoft.com/. Here are some of the benefits of doing so:
-
-
-
-
You will get a genuine license and activation code that will work for your software.
-
You will get access to updates and support from Daem Chess Studio. You will be able to download the latest versions of your software and enjoy new features and improvements. You will also be able to contact Daem Chess Studio's customer service if you need any help with your software.
-
You will support the developers of Daem Chess Studio. By paying for their product, you will show them your appreciation and encourage them to keep making more quality products in the future.
-
-
-
The price of Daem Chess Studio Full 76 is $49 for a single license. However, there are some ways to save some money and get discounts on your purchase:
-
-
-
You can use coupons or promo codes that are available online. You can search for them on websites like RetailMeNot or CouponChief.
-
You can buy multiple licenses at once if you need them for your friends or family members. You can get volume discounts based on how many licenses you buy.
-
-
-
How to Use Daem Chess Studio Full 76 for Playing, Analyzing, and Learning Chess
-
-
Once you have installed and activated Daem Chess Studio Full 76, you can start using it to play, analyze, and learn chess with ease and convenience. Here are some of the features and functions that you can use:
-
-
-
You can play against the computer or online opponents using various modes and options. You can choose from different levels of difficulty, time controls, board styles, piece sets, sounds effects etc.You also undo moves save games load games etc
-
You can analyze your games or positions using various tools and options. You can use an engine analysis window that shows you the best moves and evaluations according to different engines. You can also use an annotation window that shows you comments and symbols according to different criteria. You can also use a notation window that shows you moves in algebraic notation.
-
You can learn chess openings, tactics, endgames using various tools and options. You can use an opening explorer window that shows you statistics and information about different openings according to different databases. You can also use a tactics trainer window that shows you puzzles and problems according to different themes and levels. You can also use an endgame trainer window that shows you endgame positions according to different types and principles.
-
You can watch and replay chess games and tournaments using various tools and options. You can use a game viewer window that shows you games in PGN format with moves, annotations, diagrams etc. You can also use a tournament viewer window that shows you tournaments in TRN format with standings tables etc.
-
You can create edit your own chess databases books using various tools options.You can use database manager window that allows create open save close delete databases.You also book manager window allows create open save close delete books.You also add delete edit import export games positions databases books.You also search filter sort databases books according criteria.You also merge split databases books
-
-
-
Daem Chess Studio Full 76 is a software that has a lot to offer for anyone who wants to play, analyze, and learn chess. By using these features and functions, you can improve your chess skills and enjoy the game more.
-
-
Conclusion
-
-
Daem Chess Studio Full 76 is a great software for playing, analyzing, and learning chess. It has many features and functions that can help you with your chess needs. However, it is not worth using a keygen or a crack to get it for free, as it can cause legal troubles, security risks, and ethical issues.
-
-
The best way to get Daem Chess Studio Full 76 is to buy it from the official website of Daem Chess Studio at https://www.daemonsoft.com/. You can get a genuine license and activation code that will work for your software. You can also get access to updates and support from Daem Chess Studio. You can also save some money by using coupons or promo codes, buying multiple licenses at once.
-
-
We hope this article has helped you understand how to download and activate Daem Chess Studio Full 76 with a keygen for 49, why you should not do it, what are the alternatives to do it legally and safely, and how to use Daem Chess Studio Full 76 for playing, analyzing, and learning chess.
-
How to Learn More About Daem Chess Studio Full 76 and Its Features
-
-
If you want to learn more about Daem Chess Studio Full 76 and its features, you can use various resources and materials that are available online or offline. Here are some of them:
-
-
-
You can visit the official website of Daem Chess Studio at https://www.daemonsoft.com/. You can find information about the software, its pricing, its features, its solutions, its support, and its community. You can also download a free trial version of Daem Chess Studio Full 76 or buy a license from the website.
-
You can read the user guides and manuals that are provided with Daem Chess Studio Full 76. You can find them in the Help menu of the software or in the installation folder of Daem Chess Studio Full 76. You can also download them from https://www.daemonsoft.com/support/documentation.html. You can learn about the basics, the advanced topics, and the best practices of using Daem Chess Studio Full 76.
-
You can watch the video tutorials and webinars that are available on Daem Chess Studio's YouTube channel at https://www.youtube.com/channel/UCw8QZQzZ6gXl9f0qy7n3YwA. You can find videos about various topics, such as getting started, designing layouts, managing data, sharing apps, developing apps, and more. You can also subscribe to the channel to get notified of new videos.
-
You can join the online forums and groups that are dedicated to Daem Chess Studio users and developers at https://www.daemonsoft.com/forum/. You can ask questions, share tips, exchange ideas, get feedback, and network with other Daem Chess Studio enthusiasts. You can also participate in challenges, events, and contests that are organized by Daem Chess Studio or its partners.
-
You can enroll in online courses or live training sessions that are offered by Daem Chess Studio or its authorized trainers at https://www.daemonsoft.com/learning/. You can find courses for different levels, from beginners to experts. You can also get certified as a Daem Chess Studio developer by taking an exam at https://www.daemonsoft.com/learning/certification/.
-
-
-
Daem Chess Studio Full 76 is a software that has a lot to offer for anyone who wants to play, analyze, and learn chess. By using these resources and materials, you can learn more about Daem Chess Studio Full 76 and its features, and improve your skills and knowledge as a Daem Chess Studio user or developer.
-
Conclusion
-
-
Daem Chess Studio Full 76 is a great software for playing, analyzing, and learning chess. It has many features and functions that can help you with your chess needs. However, it is not worth using a keygen or a crack to get it for free, as it can cause legal troubles, security risks, and ethical issues.
-
-
The best way to get Daem Chess Studio Full 76 is to buy it from the official website of Daem Chess Studio at https://www.daemonsoft.com/. You can get a genuine license and activation code that will work for your software. You can also get access to updates and support from Daem Chess Studio. You can also save some money by using coupons or promo codes, buying multiple licenses at once.
-
-
If you want to learn more about Daem Chess Studio Full 76 and its features, you can use various resources and materials that are available online or offline. You can visit the official website of Daem Chess Studio, read the user guides and manuals, watch the video tutorials and webinars, join the online forums and groups, or enroll in online courses or live training sessions. You can also get certified as a Daem Chess Studio developer by taking an exam.
-
-
We hope this article has helped you understand how to download and activate Daem Chess Studio Full 76 with a keygen for 49, why you should not do it, what are the alternatives to do it legally and safely, how to use Daem Chess Studio Full 76 for playing, analyzing, and learning chess, and how to learn more about Daem Chess Studio Full 76 and its features.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Evermotion Archinteriors Vol 44.md b/spaces/diacanFperku/AutoGPT/Evermotion Archinteriors Vol 44.md
deleted file mode 100644
index 0ca87a305556154e969efc8d7ff804326c005d0c..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Evermotion Archinteriors Vol 44.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Find films and movies featuring J. Or ek better tarika hai apni tension ka ... soo jaoooonga sub log ganay ga rahey they or halla gulla ker rahey thay raat key 3:00 ... दरà¥à¤¦ à¤à¤°à¥€ शायरी with Dard Shayari Images HD Photos Wallpapers ... 1fdad05405
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Kernel For Sql Database Recovery Keygen PORTABLE.md b/spaces/diacanFperku/AutoGPT/Kernel For Sql Database Recovery Keygen PORTABLE.md
deleted file mode 100644
index 87ac1dd2037cc2e381f10e649fbbeff90b101ba1..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Kernel For Sql Database Recovery Keygen PORTABLE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
the smart recovery wizard can scan your existing sql database and allows you to extract the database using its intuitive wizard. you do not need to install anything or boot into your windows database machine, as the recovery wizard scans your database and starts scanning the database for bad sectors, bad clusters, and damaged parts. as the recovery wizard scans for bad sectors in your databases, the tool produces a log of the scan and captures all the data sql database recovery legit found in the database.
-
backupit is a tool that works on the same principle as conventional database backup. however, it works on cloud files. users have the ability to back up multiple server files to a local location to take advantage of the file systems local file storage. they can also backup files directly to the cloud.
so, you are a mysql administrator and you need to recover the database tables which your boss has accidentally deleted. you need sql server backup, sql server recovery and other similar tools to sql server backup .
-
at times, you may have to restore the database from backup copy. in case you do not have the backup copy, you must use the sql backup for sql server. you can also connect the your pc with the sql server machine on a network, and then you can boot your computer.
-
sql recovery tool is an advanced tool that can recover files and database from corrupt files. with this tool you can restore deleted tables, triggers, procedures, views, functions, sequences, and any other object of the sql server database. you can export data in the sql server database to other database files such as access, excel, or access database and sql server files such as.mdb,.accdb,.sql,.bak,.ldf,.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/diffle/license/app.py b/spaces/diffle/license/app.py
deleted file mode 100644
index f6f318530f0aeb268c9f9389e556065beef2ac9e..0000000000000000000000000000000000000000
--- a/spaces/diffle/license/app.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import streamlit as st
-
-txt_link = "https://huggingface.co/spaces/CompVis/stable-diffusion-license/raw/main/license.txt"
-html_link = "https://huggingface.co/spaces/CompVis/stable-diffusion-license/raw/main/license.html"
-
-st.sidebar.title("Stable Diffusion")
-st.sidebar.markdown("## Stable Diffusion RAIL License v1.0")
-st.sidebar.markdown(f"This is the home of the Stable Diffusion RAIL License v1.0.\
-If you would like to download the license you can get it as [.txt]({txt_link}), or [.html]({html_link}) file.")
-
-with open("license.txt", "r") as f:
- license_html = f.read()
-
-st.markdown(license_html, unsafe_allow_html=True)
diff --git a/spaces/difinative/AIBuddy/README.md b/spaces/difinative/AIBuddy/README.md
deleted file mode 100644
index 22ce617416761d5d7be0e2372c7fa4f40db49449..0000000000000000000000000000000000000000
--- a/spaces/difinative/AIBuddy/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AIBuddy
-emoji: 👁
-colorFrom: pink
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/cleaner.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/text/cleaner.py
deleted file mode 100644
index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/cleaner.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from text import chinese, cleaned_text_to_sequence
-
-
-language_module_map = {
- 'ZH': chinese
-}
-
-
-def clean_text(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- return norm_text, phones, tones, word2ph
-
-def clean_text_bert(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- bert = language_module.get_bert_feature(norm_text, word2ph)
- return phones, tones, bert
-
-def text_to_sequence(text, language):
- norm_text, phones, tones, word2ph = clean_text(text, language)
- return cleaned_text_to_sequence(phones, tones, language)
-
-if __name__ == '__main__':
- pass
diff --git a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/preprocess_text.py b/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/preprocess_text.py
deleted file mode 100644
index 44c35fecd9b7f21016e80e9597d6055254cba3f7..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/preprocess_text.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import json
-from random import shuffle
-
-import tqdm
-from text.cleaner import clean_text
-from collections import defaultdict
-import shutil
-stage = [1,2,3]
-
-transcription_path = 'filelists/short_character_anno.list'
-train_path = 'filelists/train.list'
-val_path = 'filelists/val.list'
-config_path = "configs/config.json"
-val_per_spk = 4
-max_val_total = 8
-
-if 1 in stage:
- with open( transcription_path+'.cleaned', 'w', encoding='utf-8') as f:
- for line in tqdm.tqdm(open(transcription_path, encoding='utf-8').readlines()):
- try:
- utt, spk, language, text = line.strip().split('|')
- #language = "ZH"
- norm_text, phones, tones, word2ph = clean_text(text, language)
- f.write('{}|{}|{}|{}|{}|{}|{}\n'.format(utt, spk, language, norm_text, ' '.join(phones),
- " ".join([str(i) for i in tones]),
- " ".join([str(i) for i in word2ph])))
- except:
- print("err!", utt)
-
-if 2 in stage:
- spk_utt_map = defaultdict(list)
- spk_id_map = {}
- current_sid = 0
-
- with open( transcription_path+'.cleaned', encoding='utf-8') as f:
- for line in f.readlines():
- utt, spk, language, text, phones, tones, word2ph = line.strip().split('|')
- spk_utt_map[spk].append(line)
- if spk not in spk_id_map.keys():
- spk_id_map[spk] = current_sid
- current_sid += 1
- train_list = []
- val_list = []
- for spk, utts in spk_utt_map.items():
- shuffle(utts)
- val_list+=utts[:val_per_spk]
- train_list+=utts[val_per_spk:]
- if len(val_list) > max_val_total:
- train_list+=val_list[max_val_total:]
- val_list = val_list[:max_val_total]
-
- with open( train_path,"w", encoding='utf-8') as f:
- for line in train_list:
- f.write(line)
-
- file_path = transcription_path+'.cleaned'
- shutil.copy(file_path,'./filelists/train.list')
-
- with open(val_path, "w", encoding='utf-8') as f:
- for line in val_list:
- f.write(line)
-
-if 3 in stage:
- assert 2 in stage
- config = json.load(open(config_path))
- config['data']["n_speakers"] = current_sid #
- config["data"]['spk2id'] = spk_id_map
- with open(config_path, 'w', encoding='utf-8') as f:
- json.dump(config, f, indent=2, ensure_ascii=False)
diff --git a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/README.md b/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/README.md
deleted file mode 100644
index 9767dfa6d71c9216b8d864a840432c66828002e6..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AI星瞳 长文本专用(冬牧场 版本)
-emoji: 🌟
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.36.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/transcribe_genshin.py
deleted file mode 100644
index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/transcribe_genshin.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# coding=gbk
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-
-import soundfile
-from scipy.io import wavfile
-from tqdm import tqdm
-
-global speaker_annos
-speaker_annos = []
-
-def process(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=args.sr)
- soundfile.write(
- os.path.join(args.out_dir, speaker, wav_name),
- wav,
- sr
- )
-
-def process_text(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- global speaker_annos
- tr_name = wav_name.replace('.wav', '')
- with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file:
- text = file.read()
- text = text.replace("{NICKNAME}",'')
- text = text.replace("{M#}{F#}",'')
- text = text.replace("{M#}{F#}",'')
- substring = "{M#}{F#}"
- if substring in text:
- if tr_name.endswith("a"):
- text = text.replace("{M#}{F#}",'')
- if tr_name.endswith("b"):
- text = text.replace("{M#}{F#}",'')
- text = text.replace("#",'')
- text = "ZH|" + text + "\n" #
- speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text)
-
-
-
-if __name__ == "__main__":
- parent_dir = "./genshin_dataset/"
- speaker_names = list(os.walk(parent_dir))[0][1]
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir")
- parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir")
- args = parser.parse_args()
- # processs = 8
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
- for i in os.listdir(spk_dir):
- if i.endswith("wav"):
- pro=(spk_dir, i, args)
- process_text(pro)
- if len(speaker_annos) == 0:
- print("transcribe error!!!")
- with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f:
- for line in speaker_annos:
- f.write(line)
- print("transcript file finished.")
diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/bert_gen.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/bert_gen.py
deleted file mode 100644
index 467655b2c4171608ad690fe7dec350db85f84f1b..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/bert_gen.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import torch
-from torch.utils.data import DataLoader
-from multiprocessing import Pool
-import commons
-import utils
-from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate
-from tqdm import tqdm
-import warnings
-
-from text import cleaned_text_to_sequence, get_bert
-
-config_path = 'configs/config.json'
-hps = utils.get_hparams_from_file(config_path)
-
-def process_line(line):
- _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|")
- phone = phones.split(" ")
- tone = [int(i) for i in tone.split(" ")]
- word2ph = [int(i) for i in word2ph.split(" ")]
- w2pho = [i for i in word2ph]
- word2ph = [i for i in word2ph]
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-
- if hps.data.add_blank:
- phone = commons.intersperse(phone, 0)
- tone = commons.intersperse(tone, 0)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
- wav_path = f'{_id}'
-
- bert_path = wav_path.replace(".wav", ".bert.pt")
- try:
- bert = torch.load(bert_path)
- assert bert.shape[-1] == len(phone)
- except:
- bert = get_bert(text, word2ph, language_str)
- assert bert.shape[-1] == len(phone)
- torch.save(bert, bert_path)
-
-
-if __name__ == '__main__':
- lines = []
- with open(hps.data.training_files, encoding='utf-8' ) as f:
- lines.extend(f.readlines())
-
- # with open(hps.data.validation_files, encoding='utf-8' ) as f:
- # lines.extend(f.readlines())
-
- with Pool(processes=2) as pool: #A100 40GB suitable config,if coom,please decrease the processess number.
- for _ in tqdm(pool.imap_unordered(process_line, lines)):
- pass
diff --git a/spaces/doevent/vc/app.py b/spaces/doevent/vc/app.py
deleted file mode 100644
index 249dc99a91dc19d5cc54676ecb45078197e19a67..0000000000000000000000000000000000000000
--- a/spaces/doevent/vc/app.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import gradio as gr
-import logging
-import os
-from TTS.api import TTS
-import time
-
-os.system("pip show TTS")
-tts = TTS(model_name="voice_conversion_models/multilingual/vctk/freevc24", progress_bar=False, gpu=False)
-logging.basicConfig(level=logging.INFO)
-count = 0
-
-def main():
- global count
- count += 1
- if count > 150:
- time.sleep(30)
- os.system("rm -R /tmp/*")
- print(f"Reset count: {count}")
- count = 0
- gr.Error("Reset counter")
-
- with gr.Blocks() as demo:
-
- with gr.Row():
- with gr.Column(variant="panel"):
- src_audio_mic = gr.Audio(source="microphone", label="Record your voice")
- src_audio_file = gr.Audio(
- source="upload", type="filepath", label="Or upload audio to convert"
- )
-
- with gr.Column(variant="panel"):
- tgt_audio_file = gr.Audio(
- source="upload", type="filepath", label="Select audio with target voice"
- )
-
- with gr.Row():
- convert_btn = gr.Button("Convert")
- with gr.Row():
- result_audio = gr.Audio()
-
-
- def voice_conversion(src_from_mic_, src_from_file_, tgt_from_file_):
- """
- helper function which checks where source come from
- """
- src_ = None
- if src_from_mic_:
- src_ = src_from_mic_
- elif src_from_file_:
- src_ = src_from_file_
- tgt_ = tgt_from_file_
- if not src_ or not tgt_:
- logging.warning("source or target are not provided")
- return
-
- print(src_)
- print(tgt_)
- tts.voice_conversion_to_file(source_wav=src_, target_wav=tgt_, file_path="output.wav")
- return "output.wav"
-
- convert_btn.click(
- voice_conversion,
- inputs=[src_audio_mic, src_audio_file, tgt_audio_file],
- outputs=result_audio,
- )
-
- demo.queue(concurrency_count=1).launch(show_api=False, show_error=True)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/dorkai/text-generation-webui-main/css/chat_style-TheEncrypted777.css b/spaces/dorkai/text-generation-webui-main/css/chat_style-TheEncrypted777.css
deleted file mode 100644
index cac8015f505413b041df36552283c294caa94392..0000000000000000000000000000000000000000
--- a/spaces/dorkai/text-generation-webui-main/css/chat_style-TheEncrypted777.css
+++ /dev/null
@@ -1,137 +0,0 @@
-/* All credits to TheEncrypted777: https://www.reddit.com/r/Oobabooga/comments/12xe6vq/updated_css_styling_with_color_customization_for/ */
-
-.chat {
- margin-left: auto;
- margin-right: auto;
- max-width: 800px;
- height: calc(100vh - 300px);
- overflow-y: auto;
- padding-right: 20px;
- display: flex;
- flex-direction: column-reverse;
- word-break: break-word;
- overflow-wrap: anywhere;
-}
-
-.message {
- display: grid;
- grid-template-columns: 60px minmax(0, 1fr);
- padding-bottom: 28px;
- font-size: 18px;
- /*Change 'Quicksand' to a font you like or leave it*/
- font-family: Quicksand, Arial, sans-serif;
- line-height: 1.428571429;
-}
-
-.circle-you {
- background-color: gray;
- border-radius: 1rem;
- /*Change color to any you like to be the border of your image*/
- border: 2px solid white;
-}
-
-.circle-bot {
- background-color: gray;
- border-radius: 1rem;
- /*Change color to any you like to be the border of the bot's image*/
- border: 2px solid white;
-}
-
-.circle-bot img,
-.circle-you img {
- border-radius: 10%;
- width: 100%;
- height: 100%;
- object-fit: cover;
-}
-
-.circle-you, .circle-bot {
- /*You can set the size of the profile images here, but if you do, you have to also adjust the .text{padding-left: 90px} to a different number according to the width of the image which is right below here*/
- width: 135px;
- height: 175px;
-}
-
-.text {
- /*Change this to move the message box further left or right depending on the size of your profile pic*/
- padding-left: 90px;
- text-shadow: 2px 2px 2px rgb(0, 0, 0);
-}
-
-.text p {
- margin-top: 2px;
-}
-
-.username {
- padding-left: 10px;
- font-size: 22px;
- font-weight: bold;
- border-top: 1px solid rgb(51, 64, 90);
- padding: 3px;
-}
-
-.message-body {
- position: relative;
- border-radius: 1rem;
- border: 1px solid rgba(255, 255, 255, 0.459);
- border-radius: 10px;
- padding: 10px;
- padding-top: 5px;
- /*Message gradient background color - remove the line bellow if you don't want a background color or gradient*/
- background: linear-gradient(to bottom, #171730, #1b263f);
- }
-
- /*Adds 2 extra lines at the top and bottom of the message*/
- .message-body:before,
- .message-body:after {
- content: "";
- position: absolute;
- left: 10px;
- right: 10px;
- height: 1px;
- background-color: rgba(255, 255, 255, 0.13);
- }
-
- .message-body:before {
- top: 6px;
- }
-
- .message-body:after {
- bottom: 6px;
- }
-
-
-.message-body img {
- max-width: 300px;
- max-height: 300px;
- border-radius: 20px;
-}
-
-.message-body p {
- margin-bottom: 0 !important;
- font-size: 18px !important;
- line-height: 1.428571429 !important;
-}
-
-.message-body li {
- margin-top: 0.5em !important;
- margin-bottom: 0.5em !important;
-}
-
-.message-body li > p {
- display: inline !important;
-}
-
-.message-body code {
- overflow-x: auto;
-}
-.message-body :not(pre) > code {
- white-space: normal !important;
-}
-
-.dark .message-body p em {
- color: rgb(138, 138, 138) !important;
-}
-
-.message-body p em {
- color: rgb(110, 110, 110) !important;
-}
diff --git a/spaces/dorkai/text-generation-webui-main/modules/shared.py b/spaces/dorkai/text-generation-webui-main/modules/shared.py
deleted file mode 100644
index 7f945366cf7aca78b8a2a87b749964d038107f21..0000000000000000000000000000000000000000
--- a/spaces/dorkai/text-generation-webui-main/modules/shared.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import argparse
-import logging
-from collections import OrderedDict
-from pathlib import Path
-
-import yaml
-
-model = None
-tokenizer = None
-model_name = "None"
-model_type = None
-lora_names = []
-soft_prompt_tensor = None
-soft_prompt = False
-
-# Chat variables
-history = {'internal': [], 'visible': []}
-character = 'None'
-stop_everything = False
-processing_message = '*Is typing...*'
-
-# UI elements (buttons, sliders, HTML, etc)
-gradio = {}
-
-# For keeping the values of UI elements on page reload
-persistent_interface_state = {}
-
-input_params = [] # Generation input parameters
-reload_inputs = [] # Parameters for reloading the chat interface
-
-# For restarting the interface
-need_restart = False
-
-settings = {
- 'autoload_model': True,
- 'max_new_tokens': 200,
- 'max_new_tokens_min': 1,
- 'max_new_tokens_max': 2000,
- 'seed': -1,
- 'character': 'None',
- 'name1': 'You',
- 'name2': 'Assistant',
- 'context': 'This is a conversation with your Assistant. It is a computer program designed to help you with various tasks such as answering questions, providing recommendations, and helping with decision making. You can ask it anything you want and it will do its best to give you accurate and relevant information.',
- 'greeting': '',
- 'turn_template': '',
- 'custom_stopping_strings': '',
- 'stop_at_newline': False,
- 'add_bos_token': True,
- 'ban_eos_token': False,
- 'skip_special_tokens': True,
- 'truncation_length': 2048,
- 'truncation_length_min': 0,
- 'truncation_length_max': 8192,
- 'mode': 'chat',
- 'chat_style': 'cai-chat',
- 'instruction_template': 'None',
- 'chat-instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
- 'chat_prompt_size': 2048,
- 'chat_prompt_size_min': 0,
- 'chat_prompt_size_max': 2048,
- 'chat_generation_attempts': 1,
- 'chat_generation_attempts_min': 1,
- 'chat_generation_attempts_max': 10,
- 'default_extensions': [],
- 'chat_default_extensions': ["gallery"],
- 'presets': {
- 'default': 'Default',
- '.*(alpaca|llama|llava)': "LLaMA-Precise",
- '.*pygmalion': 'NovelAI-Storywriter',
- '.*RWKV': 'Naive',
- '.*moss': 'MOSS',
- },
- 'prompts': {
- 'default': 'QA',
- '.*(gpt4chan|gpt-4chan|4chan)': 'GPT-4chan',
- }
-}
-
-
-def str2bool(v):
- if isinstance(v, bool):
- return v
- if v.lower() in ('yes', 'true', 't', 'y', '1'):
- return True
- elif v.lower() in ('no', 'false', 'f', 'n', '0'):
- return False
- else:
- raise argparse.ArgumentTypeError('Boolean value expected.')
-
-
-parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54))
-
-# Basic settings
-parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
-parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.')
-parser.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.')
-parser.add_argument('--model', type=str, help='Name of the model to load by default.')
-parser.add_argument('--lora', type=str, nargs="+", help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
-parser.add_argument("--model-dir", type=str, default='models/', help="Path to directory with all the models")
-parser.add_argument("--lora-dir", type=str, default='loras/', help="Path to directory with all the loras")
-parser.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
-parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
-parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
-parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
-parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
-
-# Accelerate/transformers
-parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.')
-parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
-parser.add_argument('--gpu-memory', type=str, nargs="+", help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs. You can also set values in MiB like --gpu-memory 3500MiB.')
-parser.add_argument('--cpu-memory', type=str, help='Maximum CPU memory in GiB to allocate for offloaded weights. Same as above.')
-parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
-parser.add_argument('--disk-cache-dir', type=str, default="cache", help='Directory to save the disk cache to. Defaults to "cache".')
-parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
-parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
-parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
-parser.add_argument('--xformers', action='store_true', help="Use xformer's memory efficient attention. This should increase your tokens/s.")
-parser.add_argument('--sdp-attention', action='store_true', help="Use torch 2.0's sdp attention.")
-parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM.")
-
-# llama.cpp
-parser.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
-parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of prompt tokens to batch together when calling llama_eval.')
-parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
-parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
-parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
-parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
-
-# GPTQ
-parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
-parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
-parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
-parser.add_argument('--pre_layer', type=int, nargs="+", help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg --pre_layer 30 60.')
-parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.')
-parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
-parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')
-parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.')
-parser.add_argument('--fused_mlp', action='store_true', help='(triton) Enable fused mlp.')
-
-# AutoGPTQ
-parser.add_argument('--autogptq', action='store_true', help='Use AutoGPTQ for loading quantized models instead of the internal GPTQ loader.')
-parser.add_argument('--triton', action='store_true', help='Use triton.')
-
-# FlexGen
-parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')
-parser.add_argument('--percent', type=int, nargs="+", default=[0, 100, 100, 0, 100, 0], help='FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).')
-parser.add_argument("--compress-weight", action="store_true", help="FlexGen: activate weight compression.")
-parser.add_argument("--pin-weight", type=str2bool, nargs="?", const=True, default=True, help="FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%%).")
-
-# DeepSpeed
-parser.add_argument('--deepspeed', action='store_true', help='Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration.')
-parser.add_argument('--nvme-offload-dir', type=str, help='DeepSpeed: Directory to use for ZeRO-3 NVME offloading.')
-parser.add_argument('--local_rank', type=int, default=0, help='DeepSpeed: Optional argument for distributed setups.')
-
-# RWKV
-parser.add_argument('--rwkv-strategy', type=str, default=None, help='RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".')
-parser.add_argument('--rwkv-cuda-on', action='store_true', help='RWKV: Compile the CUDA kernel for better performance.')
-
-# Gradio
-parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
-parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
-parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
-parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
-parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
-parser.add_argument("--gradio-auth-path", type=str, help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"', default=None)
-
-# API
-parser.add_argument('--api', action='store_true', help='Enable the API extension.')
-parser.add_argument('--api-blocking-port', type=int, default=5000, help='The listening port for the blocking API.')
-parser.add_argument('--api-streaming-port', type=int, default=5005, help='The listening port for the streaming API.')
-parser.add_argument('--public-api', action='store_true', help='Create a public URL for the API using Cloudfare.')
-
-# Multimodal
-parser.add_argument('--multimodal-pipeline', type=str, default=None, help='The multimodal pipeline to use. Examples: llava-7b, llava-13b.')
-
-args = parser.parse_args()
-args_defaults = parser.parse_args([])
-
-# Deprecation warnings for parameters that have been renamed
-deprecated_dict = {}
-for k in deprecated_dict:
- if getattr(args, k) != deprecated_dict[k][1]:
- logging.warning(f"--{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.")
- setattr(args, deprecated_dict[k][0], getattr(args, k))
-
-# Security warnings
-if args.trust_remote_code:
- logging.warning("trust_remote_code is enabled. This is dangerous.")
-if args.share:
- logging.warning("The gradio \"share link\" feature downloads a proprietary and unaudited blob to create a reverse tunnel. This is potentially dangerous.")
-
-
-def add_extension(name):
- if args.extensions is None:
- args.extensions = [name]
- elif 'api' not in args.extensions:
- args.extensions.append(name)
-
-
-# Activating the API extension
-if args.api or args.public_api:
- add_extension('api')
-
-# Activating the multimodal extension
-if args.multimodal_pipeline is not None:
- add_extension('multimodal')
-
-
-def is_chat():
- return args.chat
-
-
-# Loading model-specific settings
-with Path(f'{args.model_dir}/config.yaml') as p:
- if p.exists():
- model_config = yaml.safe_load(open(p, 'r').read())
- else:
- model_config = {}
-
-# Applying user-defined model settings
-with Path(f'{args.model_dir}/config-user.yaml') as p:
- if p.exists():
- user_config = yaml.safe_load(open(p, 'r').read())
- for k in user_config:
- if k in model_config:
- model_config[k].update(user_config[k])
- else:
- model_config[k] = user_config[k]
-
-model_config = OrderedDict(model_config)
diff --git a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/css/html_bubble_chat_style.css b/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/css/html_bubble_chat_style.css
deleted file mode 100644
index a54a10734c0c14a1abe3ecd7fdb89602bc362dec..0000000000000000000000000000000000000000
--- a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/css/html_bubble_chat_style.css
+++ /dev/null
@@ -1,86 +0,0 @@
-.chat {
- margin-left: auto;
- margin-right: auto;
- max-width: 800px;
- height: calc(100vh - 306px);
- overflow-y: auto;
- padding-right: 20px;
- display: flex;
- flex-direction: column-reverse;
- word-break: break-word;
- overflow-wrap: anywhere;
-}
-
-.message {
- padding-bottom: 25px;
- font-size: 15px;
- font-family: Helvetica, Arial, sans-serif;
- line-height: 1.428571429;
-}
-
-.text-you {
- background-color: #d9fdd3;
- border-radius: 15px;
- padding: 10px;
- padding-top: 5px;
- float: right;
-}
-
-.text-bot {
- background-color: #f2f2f2;
- border-radius: 15px;
- padding: 10px;
- padding-top: 5px;
-}
-
-.dark .text-you {
- background-color: #005c4b;
- color: #111b21;
-}
-
-.dark .text-bot {
- background-color: #1f2937;
- color: #111b21;
-}
-
-.text-bot p, .text-you p {
- margin-top: 5px;
-}
-
-.message-body {}
-
-.message-body img {
- max-width: 300px;
- max-height: 300px;
- border-radius: 20px;
-}
-
-.message-body p {
- margin-bottom: 0 !important;
- font-size: 15px !important;
- line-height: 1.428571429 !important;
-}
-
-.message-body li {
- margin-top: 0.5em !important;
- margin-bottom: 0.5em !important;
-}
-
-.message-body li > p {
- display: inline !important;
-}
-
-.message-body code {
- overflow-x: auto;
-}
-.message-body :not(pre) > code {
- white-space: normal !important;
-}
-
-.dark .message-body p em {
- color: rgb(138, 138, 138) !important;
-}
-
-.message-body p em {
- color: rgb(110, 110, 110) !important;
-}
\ No newline at end of file
diff --git a/spaces/dpe1/beat_manipulator/beat_manipulator/parse.py b/spaces/dpe1/beat_manipulator/beat_manipulator/parse.py
deleted file mode 100644
index 81537a43b6312462e385af51fb106fd7748b6698..0000000000000000000000000000000000000000
--- a/spaces/dpe1/beat_manipulator/beat_manipulator/parse.py
+++ /dev/null
@@ -1,251 +0,0 @@
-from .utils import C_SLICE, C_JOIN, C_MISC, C_MATH
-import numpy as np
-from . import io, utils, main
-def _getnum(pattern, cur, symbols = '+-*/'):
- number = ''
- while pattern[cur].isdecimal() or pattern[cur] in symbols:
- number+=pattern[cur]
- cur+=1
- return number, cur-1
-
-def parse(pattern:str, samples:dict, pattern_length:int = None,
- c_slice:str = C_SLICE,
- c_join:str = C_JOIN,
- c_misc:str = C_MISC,
- log = True,
- simple_mode = False):
- """Returns (beats, operators, pattern_length, c_slice, c_misc, c_join)"""
- if log is True: print(f'Beatswapping with `{pattern}`')
-
- #load samples:
- if isinstance(samples, str): samples = (samples,)
- if not isinstance(samples, dict):
- samples = {str(i+1):samples[i] for i in range(len(samples))}
-
- #preprocess pattern
- separator = c_join[0]
- #forgot separator
- if simple_mode is True:
- if c_join[0] not in pattern and c_join[1] not in pattern and c_join[2] not in pattern and c_join[3] not in pattern: pattern = pattern.replace(' ', separator)
- if ' ' not in c_join: pattern = pattern.replace(' ', '') # ignore spaces
- for i in c_join:
- while i+i in pattern: pattern = pattern.replace(i+i, i) #double separator
- while pattern.startswith(i): pattern = pattern[1:]
- while pattern.endswith(i): pattern = pattern[:-1]
-
- # Creates a list of beat strings so that I can later see if there is a `!` in the string
- separated = pattern
- for i in c_join:
- separated = separated.replace(i, c_join[0])
- separated = separated.split(c_join[0])
- pattern = pattern.replace(c_misc[6], '')
-
- # parsing
- length = 0
- num = ''
- cur = 0
- beats = []
- operators = [separator]
- shuffle_beats = []
- shuffle_groups = []
- current_beat = 0
- effect = None
- pattern += ' '
- sample_toadd = None
-
- # Loops over all characters
- while cur < len(pattern):
- char = pattern[cur]
- #print(f'char = {char}, cur = {cur}, num = {num}, current_beat = {current_beat}, effect = {effect}, len(beats) = {len(beats)}, length = {length}')
- if char == c_misc[3]: char = str(current_beat+1) # Replaces `i` with current number
-
- # If character is `", ', `, or [`: searches for closing quote and gets the sample rate,
- # moves cursor to the character after last quote/bracket, creates a sample_toadd variable with the sample.
- elif char in c_misc[0:3]+c_misc[10:12]:
- quote = char
- if quote == c_misc[10]: quote = c_misc[11] # `[` is replaced with `]`
- cur += 1
- sample = ''
-
- # Gets sample name between quote characters, moves cursor to the ending quote.
- while pattern[cur] != quote:
- sample += pattern[cur]
- cur += 1
- assert sample in samples, f"No sample named `{sample}` found in samples. Available samples: {samples.keys()}"
-
- # If sample is a song, it will be converted to a song if needed, and beatmap will be generated
- if quote == c_misc[11]:
- if not isinstance(samples[sample], main.song): samples[sample] = main.song(samples[sample])
- if samples[sample].beatmap is None:
- samples[sample].beatmap_generate()
- samples[sample].beatmap_adjust()
-
- # Else sample is a sound file
- elif not isinstance(samples[sample], np.ndarray): samples[sample] = io._load(samples[sample])[0]
-
- sample_toadd = [samples[sample], [], quote, None] # Creates the sample_toadd variable
- cur += 1
- char = pattern[cur]
-
- # If character is a math character, a slice character, or `@_?!%` - random, not count, skip, create variable -
- # - it gets added to `num`, and the loop repeats.
- # _safer_eval only takes part of the expression to the left of special characters (@%#), so it won't affect length calculation
- if char.isdecimal() or char in (C_MATH + c_slice + c_misc[4:8] + c_misc[9]):
- num += char
- #print(f'char = {char}, added it to num: num = {num}')
-
- # If character is `%` and beat hasn't been created yet, it takes the next character as well
- if char == c_misc[7] and len(beats) == current_beat:
- cur += 1
- char = pattern[cur]
- num += char
-
- # If character is a shuffle character `#` + math expression, beat number gets added to `shuffle_beats`,
- # beat shuffle group gets added to `shuffle_groups`, cursor is moved to the character after the math expression, and loop repeats.
- # That means operations after this will only execute once character is not a math character.
- elif char == c_misc[8]:
- cur+=1
- number, cur = _getnum(pattern, cur)
- char = pattern[cur]
- shuffle_beats.append(current_beat)
- shuffle_groups.append(number)
-
- # If character is not math/shuffle, that means math expression has ended. Now it tries to figure out where the expression belongs,
- # and parses the further characters
- else:
-
- # If the beat has not been added, it adds the beat. Also figures out pattern length.
- if len(beats) == current_beat and len(num) > 0:
- # Checks all slice characters in the beat expression. If slice character is found, splits the slice and breaks.
- for c in c_slice:
- if c in num:
- num = num.split(c)[:2] + [c]
- #print(f'slice: split num by `{c}`, num = {num}, whole beat is {separated[current_beat]}')
- if pattern_length is None and c_misc[6] not in separated[current_beat]:
- num0, num1 = utils._safer_eval(num[0]), utils._safer_eval(num[1])
- if c == c_slice[0]: length = max(num0, num1, length)
- if c == c_slice[1]: length = max(num0-1, num0+num1-1, length)
- if c == c_slice[2]: length = max(num0-num1, num0, length)
- break
- # If it didn't break, the expression is not a slice, so it pattern length is just compared with the beat number.
- else:
- #print(f'single beat: {num}. Whole beat is {separated[current_beat]}')
- if c_misc[6] not in separated[current_beat]: length = max(utils._safer_eval(num), length)
-
- # If there no sample saved in `sample_toadd`, adds the beat to list of beats.
- if sample_toadd is None: beats.append([num, []])
- # If `sample_toadd` is not None, beat is a sample/song. Adds the beat and sets sample_toadd to None
- else:
- sample_toadd[3] = num
- beats.append(sample_toadd)
- sample_toadd = None
- #print(f'char = {char}, got num = {num}, appended beat {len(beats)}')
-
- # Sample might not have a `num` with a slice, this adds the sample without a slice
- elif len(beats) == current_beat and len(num) == 0 and sample_toadd is not None:
- beats.append(sample_toadd)
- sample_toadd = None
-
- # If beat has been added, it now parses beats.
- if len(beats) == current_beat+1:
- #print(f'char = {char}, parsing effects:')
-
- # If there is an effect and current character is not a math character, effect and value are added to current beat, and effect is set to None
- if effect is not None:
- #print(f'char = {char}, adding effect: type = {effect}, value = {num}')
- beats[current_beat][1].append([effect, num if num!='' else None])
- effect = None
-
- # If current character is a letter, it sets that letter to `effect` variable.
- # Since loop repeats after that, that while current character is a math character, it gets added to `num`.
- if char.isalpha() and effect is None:
- #print(f'char = {char}, effect type is {effect}')
- effect = char
-
- # If character is a beat separator, it starts parsing the next beat in the next loop.
- if char in c_join and len(beats) == current_beat + 1:
- #print(f'char = {char}, parsing next beat')
- current_beat += 1
- effect = None
- operators.append(char)
-
- num = '' # `num` is set to empty string. btw `num` is only used in this loop so it needs to be here
-
- cur += 1 # cursor goes to the next character
-
-
- #for i in beats: print(i)
- import math
- if pattern_length is None: pattern_length = int(math.ceil(length))
-
- return beats, operators, pattern_length, shuffle_groups, shuffle_beats, c_slice, c_misc, c_join
-
-# I can't be bothered to annotate this one. It just works, okay?
-def _random(beat:str, length:int, rchar = C_MISC[4], schar = C_MISC[5]) -> str:
- """Takes a string and replaces stuff like `@1_4_0.5` with randomly generated number where 1 - start, 4 - stop, 0.5 - step. Returns string."""
- import random
- beat+=' '
- while rchar in beat:
- rand_index = beat.find(rchar)+1
- char = beat[rand_index]
- number = ''
- while char.isdecimal() or char in '.+-*/':
- number += char
- rand_index+=1
- char = beat[rand_index]
- if number != '': start = utils._safer_eval(number)
- else: start = 0
- if char == schar:
- rand_index+=1
- char = beat[rand_index]
- number = ''
- while char.isdecimal() or char in '.+-*/':
- number += char
- rand_index+=1
- char = beat[rand_index]
- if number != '': stop = utils._safer_eval(number)
- else: stop = length
- if char == schar:
- rand_index+=1
- char = beat[rand_index]
- number = ''
- while char.isdecimal() or char in '.+-*/':
- number += char
- rand_index+=1
- char = beat[rand_index]
- if number != '': step = utils._safer_eval(number)
- else: step = length
- choices = []
- while start <= stop:
- choices.append(start)
- start+=step
- beat = list(beat)
- beat[beat.index(rchar):rand_index] = list(str(random.choice(choices)))
- beat = ''.join(beat)
- return beat
-
-def _shuffle(pattern: list, shuffle_beats: list, shuffle_groups: list) -> list:
- """Shuffles pattern according to shuffle_beats and shuffle_groups"""
- import random
- done = []
- result = pattern.copy()
- for group in shuffle_groups:
- if group not in done:
- shuffled = [i for n, i in enumerate(shuffle_beats) if shuffle_groups[n] == group]
- unshuffled = shuffled.copy()
- random.shuffle(shuffled)
- for i in range(len(shuffled)):
- result[unshuffled[i]] = pattern[shuffled[i]]
- done.append(group)
- return result
-
-def _metric_get(v, beat, metrics, c_misc7 = C_MISC[7]):
- assert v[v.find(c_misc7)+1] in metrics, f'`%{v[v.find(c_misc7)+1]}`: No metric called `{v[v.find(c_misc7)+1]}` found in metrics. Available metrics: {metrics.keys()}'
- metric = metrics[v[v.find(c_misc7)+1]](beat)
- return metric
-
-
-def _metric_replace(v, metric, c_misc7 = C_MISC[7]):
- for _ in range(v.count(c_misc7)):
- v= v[:v.find(c_misc7)] + str(metric) + v[v.find(c_misc7)+2:]
- return v
\ No newline at end of file
diff --git a/spaces/duycse1603/math2tex/ScanSSD/IOU_lib/IOUevaluater.py b/spaces/duycse1603/math2tex/ScanSSD/IOU_lib/IOUevaluater.py
deleted file mode 100644
index 16a1af36412ddd7c83397edec3e5e121fcefbecf..0000000000000000000000000000000000000000
--- a/spaces/duycse1603/math2tex/ScanSSD/IOU_lib/IOUevaluater.py
+++ /dev/null
@@ -1,433 +0,0 @@
-from zipfile import ZipFile
-import os
-from .Evaluator import *
-from utils import *
-import copy
-import argparse
-import sys
-import ntpath
-#import cStringIO
-from io import BytesIO
-import shutil
-
-
-def read_file(filename, bboxes, flag):
- '''
- Parses the input .csv file into map where key as page number and value as a list of bounding box objects
- corresponding to each math region in the file.
- :param filename: .csv file containing math regions
- :param bboxes: Map>
- :return:
- '''
- fh1 = open(filename, "r")
- prev_page = -1
- counter = 1
- for line in fh1:
- line = line.replace("\n", "")
- if line.replace(' ', '') == '':
- continue
- splitLine = line.split(",")
- idClass = float(splitLine[0])
- if prev_page == -1:
- prev_page = idClass
- else:
- if idClass != prev_page:
- counter = 1
- prev_page = idClass
- x = float(splitLine[1])
- y = float(splitLine[2])
- x2 = float(splitLine[3])
- y2 = float(splitLine[4])
- bb = BoundingBox(
- flag+"_"+str(counter),
- 1,
- x,
- y,
- x2,
- y2,
- CoordinatesType.Absolute, (200, 200),
- BBType.GroundTruth,
- format=BBFormat.XYX2Y2)
- counter += 1
- #print(counter)
- if idClass not in bboxes:
- bboxes[idClass] = []
- bboxes[idClass].append(bb)
-
- fh1.close()
-
-
-def extract_zipfile(zip_filename, target_dir):
- '''
- Extract zip file into the target directory
- :param zip_filename: full-file-path of the zip-file
- :param target_dir: target-dir to extract contents of zip-file
- :return:
- '''
- with ZipFile(zip_filename, 'r') as zip:
- # extracting all the files
- print('Extracting all the files now...')
- zip.extractall(target_dir)
- print('Done!')
-
-
-def create_doc_bboxes_map(dir_path,flag):
- '''
- Reads all files recursively in directory path and and returns a map containing bboxes for each page in each math
- file in directory.
- :param dir_path: full directory path containing math files
- :return: Map>>
- '''
- pdf_bboxes_map = {}
-
- for filename in os.listdir(dir_path):
- full_filepath = os.path.join(dir_path, filename)
- filename_key = os.path.splitext(os.path.basename(full_filepath))[0]
- #print(full_filepath)
- if (full_filepath.startswith(".")) or (not (full_filepath.endswith(".csv") or full_filepath.endswith(".math"))):
- continue
- bboxes_map = {}
-
- if os.path.isdir(full_filepath):
- continue
-
- try:
- read_file(full_filepath, bboxes_map,flag)
- except Exception as e:
- print('exception occurred in reading file',full_filepath, str(e))
-
- #if len(bboxes_map)==0:
- # raise ValueError("Empty ground truths file or not in valid format")
- pdf_bboxes_map[filename_key] = copy.deepcopy(bboxes_map)
-
- return pdf_bboxes_map
-
-def unique_values(input_dict):
- #return ground truth boxes that have same det boxes
- pred_list=[]
- repair_keys=[]
- for value in input_dict.values():
- if value[1] in pred_list: #preds.append(value)
- gts=[k for k,v in input_dict.items() if v[1] == value[1]]
- #print('pair length',len(gts))
- repair_keys.append(gts)
- pred_list.append(value[1])
-
- return repair_keys
-
-def generate_validpairs(pairs):
- newpairs=[]
- for pair in pairs:
- if len(pair)>2:
- for i in range(len(pair)-1):
- newpair=(pair[i],pair[i+1])
- if newpair not in newpairs:newpairs.append(newpair)
-
- elif pair not in newpairs: newpairs.append(pair)
- return newpairs
-
-def fix_preds(input_dict,keyPairs,thre):
-
- validPairs=generate_validpairs(keyPairs)
-
- for pair in validPairs:
- #check if both pair exists"
- if pair[0] not in list(input_dict.keys()) or pair[1] not in list(input_dict.keys()):
- continue
- val0=input_dict[pair[0]][0]
- val1=input_dict[pair[1]][0]
- if val0>=val1: #change prediction for second pair
- values=input_dict[pair[1]]
- newprob=values[2][1]
- if newprobval0: #change prediction for first pair
- values=input_dict[pair[0]]
- newprob=values[2][1]
- if newprob> for ground truth bboxes
- :param det_page_bboxes_map: Map> for detection bboxes
- :return:
- '''
- evaluator = Evaluator()
-
- correct_pred_coarse=0
- correct_pred_fine=0
-
- pdf_gt_boxes=0
- pdf_det_boxes=0
-
- coarse_keys = {}
- fine_keys = {}
-
- for page_num in gt_page_bboxes_map:
- if page_num not in det_page_bboxes_map:
- print('Detections not found for page', str(page_num + 1), ' in', pdf_name)
- continue
- gt_boxes = gt_page_bboxes_map[page_num]
- det_boxes = det_page_bboxes_map[page_num]
-
- pdf_gt_boxes+=len(gt_boxes)
- pdf_det_boxes+=len(det_boxes)
-
- pred_dict={}
- for gt_box in gt_boxes:
- ious = evaluator._getAllIOUs(gt_box, det_boxes)
- preds=[]
- labels=[]
- for i in range(len(ious)):
- preds.append(round(ious[i][0],2))
- labels.append(ious[i][2].getImageName())
-
- pred_dict[gt_box.getImageName()]=preds[0],labels[0],preds,labels
-
- coarse,coarse_dict=count_true_box(copy.deepcopy(pred_dict),0.5)
- fine,fine_dict=count_true_box(copy.deepcopy(pred_dict),0.75)
-
- coarse_keys[page_num] = coarse_dict.keys()
- fine_keys[page_num] = fine_dict.keys()
-
- #count correct preds for coarse 0.5 and fine 0.75 in one page
- correct_pred_coarse= correct_pred_coarse+coarse
- correct_pred_fine= correct_pred_fine+fine
- #write iou per page
- if outdir:
- out_file = open(os.path.join(outdir,pdf_name.split(".csv")[0]+"_"+str(page_num)+"_eval.txt"), "w")
- out_file.write('#page num '+str(page_num)+", gt_box:"+str(len(gt_boxes))+
- ", pred_box:"+str(len(det_boxes))+"\n")
- out_file.write('\n')
- out_file.write('#COARSE DETECTION (iou>0.5):\n#number of correct prediction:'+ str(coarse)+ '\n#correctly detected:'+
- str(list(coarse_dict.keys()))+'\n')
- out_file.write('\n')
- out_file.write('#FINE DETECTION (iou>0.75):\n#number of correct prediction:'+ str(fine)+ '\n#correctly detected:'+
- str(list(fine_dict.keys()))+'\n')
- out_file.write('\n')
- out_file.write('#Sorted IOU scores for each GT box:\n')
- for gt_box in gt_boxes:
- ious = evaluator._getAllIOUs(gt_box, det_boxes)
- out_file.write(gt_box.getImageName()+",")
- for i in range(len(ious)-1):
- out_file.write("("+str(round(ious[i][0],2))+" "+ str(ious[i][2].getImageName())+"),")
- out_file.write( "("+str(round(ious[-1][0],2))+" "+ str(ious[-1][2].getImageName())+")\n" )
- out_file.close()
-
- return correct_pred_coarse, correct_pred_fine, pdf_gt_boxes, pdf_det_boxes, coarse_keys, fine_keys
-
-def count_box(input_dict):
- count=0
- for pdf in input_dict.values():
- for page in pdf.values():
- count+=len(page)
-
- return count
-
-# Zip every uploading files
-def archive_iou_txt(username, task_id, sub_id,userpath):
-
- inputdir=os.path.join(userpath,'iouEval_stats')
-
- if not os.path.exists(inputdir):
- print('No txt file is generated for IOU evaluation')
- pass
-
- dest_uploader = 'IOU_stats_archive'
- dest_uploader = os.path.join(userpath, dest_uploader)
-
- if not os.path.exists(dest_uploader):
- os.makedirs(dest_uploader)
-
- zip_file_name = '/' + task_id + '_' + sub_id
- shutil.make_archive(dest_uploader + zip_file_name, 'zip', inputdir)
-
- # return '/media/' + dest_uploader
-
-def write_html(gtFile,resultsFile,info,scores,destFile):
-
- destFile.write('')
- destFile.write('')
- destFile.write('')
- #writeCSS(destFile)
- destFile.write ("
CROHME 2019
Formula Detection Results ( TASK 3 )
")
- destFile.write("Submitted Files
Output: "+ ntpath.basename(resultsFile) +"
")
- destFile.write ("
Ground-truth: " + ntpath.basename(gtFile) + "
")
- if info['allGTbox'] == 0:
- sys.stderr.write("Error : no sample in this GT list !\n")
- exit(-1)
- #all detection and gt boxes
- destFile.write ("
Number of ground truth bounding boxes: " + str(info['allGTbox']) + " Number of detected bounding boxes: " + str(info['allDet']))
- destFile.write ("
-
-Wondershare TidyMyMusic for Mac helps you fix the mislabeled songs, find album arts, lyrics and remove duplicated songs. 4d29de3e1b
-
-
-
diff --git a/spaces/fatiXbelha/sd/CarX Street APK A Realistic and Immersive Racing Game for Android.md b/spaces/fatiXbelha/sd/CarX Street APK A Realistic and Immersive Racing Game for Android.md
deleted file mode 100644
index 39be9376f3502b8dafecfe2ad1f1b25c178d60f8..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/CarX Street APK A Realistic and Immersive Racing Game for Android.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-
How to Install CarX Street APK on Your Android Device
-
If you are a fan of street racing games, you might want to try out CarX Street APK, a free racing game from CarX Technology for Android devices. This game offers you a dynamic open world of street racing, where you can challenge other racers, join clubs, customize your cars, and become the legend of Sunset City. In this article, we will show you what CarX Street APK is, what features it has, how to download and install it on your Android device, and some tips and tricks for playing it.
-
What is CarX Street APK?
-
CarX Street APK is a game that lets you embrace the freedom of being a street racer in a realistic open world. You can choose from over 40 different varieties of cars, each with its own unique performance and behavior. You can also tune your cars with various parts and upgrades, as well as customize their appearance with different colors, stickers, rims, and more. You can race on highways and city streets, as well as drift on corners and perform stunts. You can also join clubs, compete with other players online, and take part in various game modes, such as career, sprint, drift, drag, time attack, and more.
CarX Street APK has many features that make it an exciting and enjoyable game for street racing enthusiasts. Here are some of them:
-
Diverse selection of cars and tracks
-
One of the most interesting features of CarX Street APK is its diverse selection of cars and tracks. With over 40 different varieties of cars available, you can choose whatever design that best suits your interests. From sunny racing tracks to dark alleys, there are a number of regions to explore around.
-
Realistic driving physics and mechanics
-
CarX Street APK uses the CarX Technology engine, which provides realistic driving physics and mechanics. You can feel the difference between front-wheel drive, rear-wheel drive, and all-wheel drive cars. You can also experience the effects of traction control, ABS, ESP, and other systems. You can also adjust the steering sensitivity, brake force, suspension stiffness, tire pressure, and other parameters to suit your driving style.
-
Customization option for vehicles
-
CarX Street APK allows you to customize your vehicles in various ways. You can change the color, stickers, rims, mirrors, headlights, lights, skirt, bumper, and much more. You can also swap parts and trick out your car for a specific race. You can upgrade the engine, transmission, body, suspension, tires, and more. You can even swap the engine of your unique car.
-
Range of game modes
-
CarX Street APK offers a range of game modes for you to enjoy. You can play the career mode, where you can drive at top speed or drift through turns. You can join clubs, defeat bosses, and prove to everyone that you're the best street racer in Sunset City. You can also play the sprint mode, where you can race against other players online or offline. You can also try the drift mode, where you can show off your drifting skills and earn points. You can also play the drag mode, where you can test your reaction time and acceleration. You can also play the time attack mode, where you can beat your own records or challenge other players.
-
Thrilling multiplayer mode
-
CarX Street APK also has a thrilling multiplayer mode, where you can race with other players from around the world. You can join or create a club, chat with other members, and participate in club events. You can also challenge other players to duels, sprints, drifts, or time attacks. You can also join tournaments and compete for prizes and glory.
-
How to install carx street apk on android device
-Carx street apk download latest version for free
-Carx street racing game review and features
-Best cars and tracks in carx street apk
-Carx street apk mod unlimited money and gold
-Carx street apk offline mode and multiplayer mode
-Carx street apk system requirements and compatibility
-Carx street apk tips and tricks for beginners
-Carx street apk cheats and hacks for android
-Carx street apk vs carx drift racing 2 comparison
-Carx street apk gameplay and graphics quality
-Carx street apk update and patch notes
-Carx street apk problems and solutions
-Carx street apk feedback and ratings
-Carx street apk alternatives and similar games
-How to uninstall carx street apk from android device
-Carx street apk file size and download speed
-Carx street apk best settings and options
-Carx street apk customisation and personalisation
-Carx street apk achievements and trophies
-How to backup and restore carx street apk data
-Carx street apk news and events
-Carx street apk community and forums
-Carx street apk support and contact information
-Carx street apk developer and publisher details
-
Regular updates and new content releases
-
CarX Street APK is constantly updated and improved by the developers. They regularly release new content, such as cars, tracks, parts, modes, events, and more. They also fix bugs and glitches, and optimize the game performance. You can always expect something new and exciting in CarX Street APK.
-
Stunning graphics and sound effects
-
CarX Street APK has stunning graphics and sound effects that make the game more immersive and realistic. You can see the details of the cars, the environments, the weather effects, the lighting effects, and more. You can also hear the engine sounds, the tire screeches, the collisions, the music, and more. You can adjust the graphics quality and sound volume to suit your preferences.
-
How to Download and Install CarX Street APK?
-
If you want to download and install CarX Street APK on your Android device, you need to follow these simple steps:
-
Step 1: Download the APK file from a trusted source
-
The first step is to download the APK file of CarX Street APK from a trusted source. You can use this link to download the latest version of CarX Street APK for free. Make sure you have enough storage space on your device before downloading the file.
-
Step 2: Enable Unknown Sources on your Android device
-
The next step is to enable Unknown Sources on your Android device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but you can ignore it and proceed.
-
Step 3: Locate and tap on the downloaded APK file to start the installation process
-
The third step is to locate and tap on the downloaded APK file to start the installation process. You can use a file manager app or your device's default file explorer to find the file. Once you find it, tap on it and you will see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation to complete.
-
Step 4: Follow the on-screen instructions and wait for the installation to complete
-
The fourth step is to follow the on-screen instructions and wait for the installation to complete. The app will ask for some permissions, such as access to your storage, location, camera, microphone, etc. Grant them as needed and continue with the installation. Once the installation is done, you will see a confirmation message saying that CarX Street APK has been installed successfully.
-
Step 5: Launch the game and enjoy the open world street racing experience
-
The final step is to launch the game and enjoy the open world street racing experience. You can find the app icon on your home screen or app drawer. Tap on it and you will see the game's main menu. From there, you can choose your preferred game mode, select your car, customize it, and start racing.
-
Tips and Tricks for Playing CarX Street APK
-
If you want to get better at playing CarX Street APK, here are some tips and tricks that you can use:
-
Follow the tutorial
-
When you start playing CarX Street APK for the first time, you will be guided by a tutorial that will teach you the basics of the game, such as how to control your car, how to drift, how to race, and more. It is advisable to follow the tutorial and learn the ropes before you jump into the action. The tutorial will also reward you with some coins and gems that you can use to buy and upgrade your cars.
-
Roam through the city for more rewards
-
One of the fun things about CarX Street APK is that you can roam through the city and explore its different areas. You can also find various rewards and bonuses hidden around the city, such as coins, gems, crates, parts, and more. You can also encounter random events and challenges that will test your skills and give you more rewards. Roaming through the city is a great way to earn more resources and discover new places.
-
Take part in sprints
-
Sprints are short races that you can join anytime in CarX Street APK. They are usually marked by green icons on the map. Sprints are a good way to earn some quick cash and reputation points, as well as improve your driving skills. You can also challenge other players to sprints and see who is faster. Sprints are a fun and easy way to enjoy the game without committing to a long career mode.
-
Participate in clubs
-
Clubs are groups of players that you can join or create in CarX Street APK. Clubs allow you to chat with other members, share tips and tricks, and participate in club events. Club events are special races that require you to cooperate with your club members and compete with other clubs for prizes and rankings. Clubs are a great way to make friends, learn from others, and have more fun in the game.
-
Go for the best cars
-
CarX Street APK has a lot of cars to choose from, but not all of them are equal. Some cars are better than others in terms of speed, acceleration, handling, durability, and more. You should always aim for the best cars that suit your needs and preferences. You can check the stats and ratings of each car before you buy it or upgrade it. You can also compare different cars and see which one is better for you. You should also try out different cars and see how they perform on different tracks and modes.
-
Visit the tuning shop
-
The tuning shop is where you can customize your cars in CarX Street APK. You can change the appearance of your cars with different colors, stickers, rims, lights, and more. You can also tune your cars with different parts and upgrades that will improve their performance and behavior. You can adjust the engine, transmission, body, suspension, tires, brakes, and more. You can also swap the engine of your unique car with another one. The tuning shop is where you can unleash your creativity and make your cars stand out.
-
Conclusion
-
CarX Street APK is a free racing game that offers you a dynamic open world of street racing. You can choose from over 40 different varieties of cars, each with its own unique performance and behavior. You can also tune your cars with various parts and upgrades, as well as customize their appearance with different colors, stickers, rims, and more. You can race on highways and city streets, as well as drift on corners and perform stunts. You can also join clubs, compete with other players online, and take part in various game modes, such as career, sprint, drift, drag, time attack, and more. CarX Street APK is a game that will keep you entertained for hours with its realistic driving physics and mechanics, stunning graphics and sound effects, regular updates and new content releases, thrilling multiplayer mode, range of game modes, diverse selection of cars and tracks, and customization option for vehicles.
-
FAQs
-
Here are some frequently asked questions about CarX Street APK:
-
-
Is CarX Street APK safe to download?
-
Yes, CarX Street APK is safe to download from a trusted source like this link . It does not contain any viruses or malware that will harm your device or data.
-
Is CarX Street APK free to play?
-
Yes, CarX Street APK is free to play for everyone. However, it does have some optional in-app purchases that you can use to buy more coins, gems, crates, parts, or premium cars.
-
How do I update CarX Street APK?
-
You can update CarX Street APK by downloading the latest version from this link . You can also check for updates from within the game by going to Settings > About > Check for updates.
-
How do I contact the developers of CarX Street APK?
-
You can contact the developers of CarX Street APK by sending them an email at support@carx-tech.com. You can also visit their website at https://carx-tech.com/ or follow them on Facebook, Instagram, YouTube, or Discord.
-
Can I play CarX Street APK on PC?
-
Yes, you can play CarX Street APK on PC by using an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, LDPlayer, and MEmu. You can download any of these emulators from their official websites and install them on your PC. Then, you can download CarX Street APK from this link and install it on the emulator. After that, you can launch the game and play it on your PC.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download 2 Chainz Songs The Ultimate Collection of Hits and Albums.md b/spaces/fatiXbelha/sd/Download 2 Chainz Songs The Ultimate Collection of Hits and Albums.md
deleted file mode 100644
index aa26975b279c5839b76a31ee3210239c6cd3810f..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download 2 Chainz Songs The Ultimate Collection of Hits and Albums.md
+++ /dev/null
@@ -1,144 +0,0 @@
-
-
How to Download 2 Chainz Songs Online
-
If you are a fan of rap music, you might have heard of 2 Chainz, one of the most popular and successful rappers in the industry. He has released several hit songs and albums that have earned him millions of fans and accolades. But how can you download his songs online and enjoy them anytime and anywhere? In this article, we will show you how to download 2 Chainz songs online from various platforms, as well as the benefits of doing so.
2 Chainz, whose real name is Tauheed Epps, is an American rapper who was born in College Park, Georgia. He started his musical career as one half of the duo Playaz Circle, along with his friend Earl "Dolla Boy" Conyers. They gained recognition for their debut single "Duffle Bag Boy" featuring Lil Wayne in 2007. Later, 2 Chainz signed a solo deal with Def Jam Recordings and released his first studio album Based on a T.R.U. Story in 2012. The album was a huge success, spawning three hit singles: "No Lie" featuring Drake, "Birthday Song" featuring Kanye West, and "I'm Different". Since then, he has released six more studio albums, including B.O.A.T.S. II: Me Time (2013), ColleGrove (2016), Pretty Girls Like Trap Music (2017), Rap or Go to the League (2019), So Help Me God! (2020), and Dope Don't Sell Itself (2022). He has also collaborated with many other artists, such as Ariana Grande, Eminem, Kendrick Lamar, Nicki Minaj, and Wiz Khalifa. He has won four Grammy Awards, five BET Awards, and two MTV Video Music Awards. He is widely regarded as one of the most influential and versatile rappers of his generation.
-
What are some of his best songs and albums
-
2 Chainz has a vast and diverse discography that showcases his lyrical skills, musical creativity, and artistic vision. Some of his best songs and albums are:
-
-
-
Song
-
Album
-
Year
-
-
-
No Lie (feat. Drake)
-
Based on a T.R.U. Story
-
2012
-
-
-
Birthday Song (feat. Kanye West)
-
Based on a T.R.U. Story
-
2012
-
-
-
I'm Different
-
Based on a T.R.U. Story
-
2012
-
-
-
Feds Watching (feat. Pharrell Williams)
-
B.O.A.T.S. II: Me Time
-
2013
-
-
-
Watch Out
-
Trap-A-Velli Tre
-
2015
-
-
-
Bounce (feat. Lil Wayne)
-
ColleGrove
-
2016
-
-
-
Good Drank (feat. Gucci Mane and Quavo)
-
Pretty Girls Like Trap Music
-
2017
-
-
-
It's a Vibe (feat. Ty Dolla Sign, Trey Songz, and Jhené Aiko)
-
Pretty Girls Like Trap Music
-
2017
-
-
-
Rap Saved Me (with Metro Boomin, Offset, and 21 Savage)
Without Warning
2017
RULE THE WORLD (feat. Ariana Grande)
Rap or Go to the League
2019
MONEY MAKER (feat. Lil Wayne)
So Help Me God!
2020
LAMBO WRIST (feat. Young Thug)
Dope Don't Sell Itself
2022
-
Why you might want to download his songs online
-
If you are a fan of 2 Chainz or rap music in general, you might want to download his songs online for various reasons. For instance, you might want to:
- - Listen to his songs offline without any interruptions or ads - Create your own playlists and mixtapes with his songs - Share his songs with your friends and family via Bluetooth or other methods - Support him as an artist by buying his songs legally and ethically
Benefits of Downloading 2 Chainz Songs Online
-
Downloading 2 Chainz songs online can offer you many benefits, such as:
-
Convenience and accessibility
-
By downloading 2 Chainz songs online, you can access them anytime and anywhere, regardless of your internet connection or device. You can also save your storage space by deleting the songs you don't want to listen to anymore.
-
Cost-effectiveness and variety
-
Downloading 2 Chainz songs online can also save you money and time, as you don't have to buy physical CDs or DVDs that might get damaged or lost. You can also choose from a wide range of platforms and services that offer different prices and features for downloading 2 Chainz songs online.
-
download 2 chainz songs mp3
-download 2 chainz songs free
-download 2 chainz songs online
-download 2 chainz songs 2023
-download 2 chainz songs from wynk music
-download 2 chainz songs from gaana.com
-download 2 chainz songs from spotify
-download 2 chainz songs from youtube
-download 2 chainz songs from soundcloud
-download 2 chainz songs from apple music
-download 2 chainz songs from amazon music
-download 2 chainz songs from tidal
-download 2 chainz songs from pandora
-download 2 chainz songs from deezer
-download 2 chainz songs from napster
-download 2 chainz new songs
-download 2 chainz hit songs
-download 2 chainz best songs
-download 2 chainz latest songs
-download 2 chainz old songs
-download 2 chainz rap songs
-download 2 chainz hip hop songs
-download 2 chainz trap songs
-download 2 chainz pop songs
-download 2 chainz r&b songs
-download 2 chainz birthday song
-download 2 chainz we own it song
-download 2 chainz talk dirty song
-download 2 chainz mercy song
-download 2 chainz fkin' problems song
-download 2 chainz bugatti song
-download 2 chainz x song
-download 2 chainz deadz song
-download 2 chainz it's a vibe song
-download 2 chainz givin' up song
-download 2 chainz million dollars worth of game song
-download 2 chainz olha a explosão remix song
-download 2 chainz gang up song
-download 2 chainz kill 'em with success song
-download 2 chainz alive song
-download 2 chainz chloraseptic remix song
-download 2 chainz money made me do it song
-download 2 chainz barry from simpson song
-download 2 chainz no lie song
-download 2 chainz watch out song
-download 2 chainz i'm different song
-download 2 chainz good drank song
-download 2 chainz used to song
-download 2 chainz riot song
-
Quality and customization
-
Downloading 2 Chainz songs online can also ensure that you get the best quality and format of his songs, as you can choose the bitrate, sample rate, and file type that suit your preferences and needs. You can also customize your downloads by adding tags, cover art, lyrics, and other metadata to your 2 Chainz songs.
-
Platforms to Download 2 Chainz Songs Online
-
There are many platforms and services that allow you to download 2 Chainz songs online, but here are some of the most popular and reliable ones:
-
Apple Music
-
Features and benefits of Apple Music
-
[Apple Music](^1^ is a music streaming and downloading service that offers over 6 million songs, including 2 Chainz's latest releases. You can enjoy the following features and benefits of Wynk Music:
- - Access to a wide range of genres and languages, such as Hindi, English, Punjabi, Tamil, Telugu, and more - Ability to download unlimited songs for offline listening on up to five devices - Option to create your own playlists and share them with your friends - Integration with Airtel Xstream and other Airtel services, such as Airtel Thanks and Airtel Payments Bank - Personalized recommendations and curated playlists based on your preferences and listening history - Ad-free music experience with Wynk Premium subscription
How to download 2 Chainz songs on Wynk Music
-
To download 2 Chainz songs on Wynk Music, you need to have an Android or iOS device and a Wynk Music account. You can sign up for a free trial or choose from different plans starting from Rs. 49 per month. Once you have an account, you can follow these steps to download 2 Chainz songs on Wynk Music:
- - Open the Wynk Music app on your device and search for 2 Chainz in the search bar - Tap on his name and browse his albums and songs - Tap on the download icon (a downward arrow) next to the album or song you want to download - Wait for the download to complete and enjoy listening to 2 Chainz songs offline
JioSaavn
-
Features and benefits of JioSaavn
-
[JioSaavn] is a music streaming and downloading service that offers over 55 million songs, including 2 Chainz's entire discography. You can enjoy the following features and benefits of JioSaavn:
- - Access to exclusive content, such as podcasts, originals, live shows, and videos by 2 Chainz and other artists - Ability to download up to 10,000 songs for offline listening on up to five devices - Option to sync your music library across your devices with JioSaavn Pro - Integration with Jio network and other Jio services, such as JioCinema, JioTV, JioChat, and JioMoney - Personalized recommendations and curated playlists based on your preferences and listening history - High-quality music experience with JioSaavn Pro subscription
How to download 2 Chainz songs on JioSaavn
-
To download 2 Chainz songs on JioSaavn, you need to have an Android or iOS device and a JioSaavn account. You can sign up for a free trial or choose from different plans starting from Rs. 99 per month. Once you have an account, you can follow these steps to download 2 Chainz songs on JioSaavn:
- - Open the JioSaavn app on your device and search for 2 Chainz in the search bar - Tap on his name and browse his albums and songs - Tap on the download icon (a cloud with a downward arrow) next to the album or song you want to download - Wait for the download to complete and enjoy listening to 2 Chainz songs offline
Conclusion
-
In this article, we have shown you how to download 2 Chainz songs online from various platforms, as well as the benefits of doing so. Downloading 2 Chainz songs online can offer you convenience, cost-effectiveness, quality, and customization. You can choose from different platforms and services that suit your needs and preferences, such as Apple Music, Wynk Music, and JioSaavn. By downloading 2 Chainz songs online, you can enjoy his music anytime and anywhere, as well as support him as an artist.
-
If you are a fan of 2 Chainz or rap music in general, we hope this article has helped you find the best way to download his songs online. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
Q1: Where can I find more information about 2 Chainz and his discography?
-
A1: You can visit his official website or his Wikipedia page for more information about his biography, awards, and discography.
-
Q2: What are some of the genres and influences of 2 Chainz's music?
-
A2: 2 Chainz's music is mainly influenced by hip hop, dirty south, and trap genres. He has also collaborated with artists from different genres, such as pop, R&B, rock, and EDM.
-
Q3 : How can I stream 2 Chainz songs online without downloading them?
-
A3: You can stream 2 Chainz songs online on various platforms, such as Spotify, YouTube Music, Amazon Music, Pandora, SoundCloud, and Tidal.
-
Q4: How can I support 2 Chainz as a fan?
-
A4: You can support 2 Chainz by buying his albums and merchandise, attending his concerts and events, following him on social media, and joining his fan club.
-
Q5: What are some of the latest releases by 2 Chainz?
-
A5: Some of the latest releases by 2 Chainz are Dope Don't Sell Itself (2022), So Help Me God! (2020), No Face No Case (2020), Rap or Go to the League (2019), and Pretty Girls Like Trap Music (2017).
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Standoff 2 Case Simulator MOD APK for Free and Enjoy Unlimited Fun.md b/spaces/fatiXbelha/sd/Download Standoff 2 Case Simulator MOD APK for Free and Enjoy Unlimited Fun.md
deleted file mode 100644
index 7848e99ea3b6bffa909e14be2ef24722aca48c15..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Standoff 2 Case Simulator MOD APK for Free and Enjoy Unlimited Fun.md
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
Standoff 2 Case Simulator Mod Apk: Everything You Need to Know
-
If you are a fan of the popular first-person shooter game Standoff 2, you might have heard of Standoff 2 Case Simulator. This is an app that allows you to open cases and collect skins and weapons from the game without spending real money. Sounds tempting, right? But before you download it, you should know what it is, how it works, and what are the pros and cons of using it. In this article, we will tell you everything you need to know about Standoff 2 Case Simulator mod apk, including its features, benefits, drawbacks, and alternatives.
Standoff 2 Case Simulator is an app that simulates the case opening system from the original Standoff 2 game. In case you don't know, Standoff 2 is a multiplayer online shooter game that features various modes, maps, weapons, and skins. You can customize your character and your weapons with different skins that you can get from opening cases. However, opening cases in the game requires gold or coins, which are not easy to earn or buy. That's why some players use Standoff 2 Case Simulator to get the skins they want without spending any money.
-
Features of Standoff 2 Case Simulator
-
Standoff 2 Case Simulator has many features that make it appealing to players who want to collect skins and weapons from the game. Some of these features are:
-
-
How to play Standoff 2 Case Simulator
-
The app is very easy to use. All you have to do is select a case from the list and tap on it to open it. You will see a spinning wheel with different items on it. When the wheel stops, you will get one of the items randomly. You can then check your inventory to see what you have got. You can also sell or exchange your items for more cases or coins.
-
How to get Standoff 2 Case Simulator mod apk
-
The app is not available on the official Google Play Store or App Store, so you have to download it from a third-party source. There are many websites that offer the modded version of the app, which gives you unlimited money and cases, as well as all the skins and weapons unlocked. However, you should be careful when downloading such files, as they may contain malware or viruses that can harm your device or steal your personal information.
-
-
Benefits of Standoff 2 Case Simulator mod apk
-
Using the modded version of Standoff 2 Case Simulator has some advantages that may tempt you to try it out. Some of these advantages are:
-
-
Unlimited money and cases
-
With the mod apk, you don't have to worry about running out of money or cases to open. You can open as many cases as you want and get as many skins and weapons as you want. You can also sell or exchange your items for more money or cases.
-
standoff 2 case opener mod apk unlimited money
-standoff 2 case simulator hack apk download
-standoff 2 case simulator mod menu apk
-standoff 2 case simulator mod apk latest version
-standoff 2 case simulator mod apk android 1
-standoff 2 case simulator mod apk revdl
-standoff 2 case simulator mod apk happymod
-standoff 2 case simulator mod apk no ads
-standoff 2 case simulator mod apk free skins
-standoff 2 case simulator mod apk offline
-standoff 2 case opener pro mod apk
-standoff 2 case simulator cheat apk
-standoff 2 case simulator premium mod apk
-standoff 2 case simulator mod apk unlimited coins
-standoff 2 case simulator mod apk unlimited keys
-standoff 2 case simulator mod apk all cases unlocked
-standoff 2 case simulator mod apk online
-standoff 2 case simulator mod apk ios
-standoff 2 case simulator mod apk rexdl
-standoff 2 case simulator mod apk obb
-standoff 2 case opener hack mod apk
-standoff 2 case simulator cracked apk
-standoff 2 case simulator vip mod apk
-standoff 2 case simulator mod apk unlimited gems
-standoff 2 case simulator mod apk unlimited tickets
-standoff 2 case opener plus mod apk
-standoff 2 case simulator patched apk
-standoff 2 case simulator mega mod apk
-standoff 2 case simulator mod apk unlimited gold
-standoff 2 case simulator mod apk unlimited spins
-standoff 2 case opener ultimate mod apk
-standoff 2 case simulator unlocked apk
-standoff 2 case simulator god mode apk
-standoff 2 case simulator modded apk download
-standoff 2 case opener free mod apk
-standoff 2 case opener full mod apk
-standoff 2 case opener new update mod apk
-standoff 2 case opener no root mod apk
-standoff 2 case opener anti ban mod apk
-standoff 2 case opener unlimited cases mod apk
-
All skins and weapons unlocked
-
With the mod apk, you don't have to wait for luck or chance to get the skins and weapons you want. You can access all the items from the game without any restrictions. You can customize your character and your weapons with any skin you like and impress your friends and opponents.
-
No ads and no root required
-
With the mod apk, you don't have to deal with annoying ads that interrupt your gameplay or ask for permissions that you don't want to give. You also don't have to root your device to install the app, which can void your warranty or damage your device.
-
-
Drawbacks of Standoff 2 Case Simulator mod apk
-
However, using the modded version of Standoff 2 Case Simulator also has some disadvantages that you should be aware of. Some of these disadvantages are:
-
-
Risk of malware and viruses
-
As mentioned earlier, the app is not available on the official app stores, so you have to download it from a third-party source. This means that you are exposing your device and your personal information to potential threats from malware and viruses that may be hidden in the file. These malicious programs can harm your device, steal your data, or compromise your security.
-
Risk of ban and account suspension
-
Using the mod apk is also against the terms and conditions of the original Standoff 2 game. This means that you are violating the rules and cheating the system. If the game developers detect that you are using the mod apk, they may ban your account or suspend your access to the game. This can result in losing all your progress and achievements, as well as being unable to play with other players online.
-
Risk of losing game progress and data
-
Another risk of using the mod apk is that it may not be compatible with the latest updates or versions of the original Standoff 2 game. This can cause errors, glitches, or crashes that can affect your gameplay or even delete your game data. You may also lose your game progress and data if you uninstall the app or switch to another device.
-
-
Alternatives to Standoff 2 Case Simulator mod apk
-
If you are looking for other ways to enjoy Standoff 2 without using the mod apk, you have some options. Some of these options are:
-
-
Official Standoff 2 game
-
The best way to play Standoff 2 is to download the official game from the Google Play Store or App Store. This way, you can enjoy the game without any risks or drawbacks. You can also support the game developers and appreciate their hard work. You can still get skins and weapons from opening cases, but you have to earn or buy them with real money. You can also participate in events, tournaments, and challenges that can reward you with more cases, coins, or gold.
-
Other case simulator apps
-
If you still want to try a case simulator app, you can look for other apps that are similar to Standoff 2 Case Simulator but are more reliable and safe. For example, you can try Case Simulator for Standoff 2 by Smoked Studios, which has over 1 million downloads and a 4.5-star rating on Google Play Store. This app has many features such as online trading, jackpot, contracts, achievements, leaderboards, and more. You can also open cases from other games such as CS:GO, PUBG, and Fortnite.
-
-
Conclusion
-
Standoff 2 Case Simulator mod apk is an app that simulates the case opening system from the original Standoff 2 game. It allows you to open cases and collect skins and weapons from the game without spending real money. However, it also has some drawbacks such as risk of malware and viruses, risk of ban and account suspension, and risk of losing game progress and data. Therefore, we recommend that you avoid using this app and instead download the official Standoff 2 game or other case simulator apps that are more trustworthy and secure.
-
FAQs
-
-
Q: Is Standoff 2 Case Simulator mod apk safe?
-
A: No, it is not safe. It may contain malware or viruses that can harm your device or steal your personal information. It may also get you banned or suspended from the original Standoff 2 game.
-
Q: How do I download Standoff 2 Case Simulator mod apk?
-
A: You have to download it from a third-party source, as it is not available on the official app stores. However, we do not recommend doing this, as it is risky and illegal.
-
Q: What are the benefits of Standoff 2 Case Simulator mod apk?
-
A: The benefits of Standoff 2 Case Simulator mod apk are unlimited money and cases, all skins and weapons unlocked, and no ads and no root required.
-
Q: What are the drawbacks of Standoff 2 Case Simulator mod apk?
-
A: The drawbacks of Standoff 2 Case Simulator mod apk are risk of malware and viruses, risk of ban and account suspension, and risk of losing game progress and data.
-
Q: What are the alternatives to Standoff 2 Case Simulator mod apk?
-
A: The alternatives to Standoff 2 Case Simulator mod apk are the official Standoff 2 game or other case simulator apps that are more reliable and safe.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py"
deleted file mode 100644
index fe71a46326cf0b8188f8d81fed99d89753b43f94..0000000000000000000000000000000000000000
--- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\350\231\232\347\251\272\347\273\210\347\253\257.py"
+++ /dev/null
@@ -1,131 +0,0 @@
-from toolbox import CatchException, update_ui, gen_time_str
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-from .crazy_utils import input_clipping
-
-
-prompt = """
-I have to achieve some functionalities by calling one of the functions below.
-Your job is to find the correct funtion to use to satisfy my requirement,
-and then write python code to call this function with correct parameters.
-
-These are functions you are allowed to choose from:
-1.
- 功能描述: 总结音视频内容
- 调用函数: ConcludeAudioContent(txt, llm_kwargs)
- 参数说明:
- txt: 音频文件的路径
- llm_kwargs: 模型参数, 永远给定None
-2.
- 功能描述: 将每次对话记录写入Markdown格式的文件中
- 调用函数: WriteMarkdown()
-3.
- 功能描述: 将指定目录下的PDF文件从英文翻译成中文
- 调用函数: BatchTranslatePDFDocuments_MultiThreaded(txt, llm_kwargs)
- 参数说明:
- txt: PDF文件所在的路径
- llm_kwargs: 模型参数, 永远给定None
-4.
- 功能描述: 根据文本使用GPT模型生成相应的图像
- 调用函数: ImageGeneration(txt, llm_kwargs)
- 参数说明:
- txt: 图像生成所用到的提示文本
- llm_kwargs: 模型参数, 永远给定None
-5.
- 功能描述: 对输入的word文档进行摘要生成
- 调用函数: SummarizingWordDocuments(input_path, output_path)
- 参数说明:
- input_path: 待处理的word文档路径
- output_path: 摘要生成后的文档路径
-
-
-You should always anwser with following format:
-----------------
-Code:
-```
-class AutoAcademic(object):
- def __init__(self):
- self.selected_function = "FILL_CORRECT_FUNCTION_HERE" # e.g., "GenerateImage"
- self.txt = "FILL_MAIN_PARAMETER_HERE" # e.g., "荷叶上的蜻蜓"
- self.llm_kwargs = None
-```
-Explanation:
-只有GenerateImage和生成图像相关, 因此选择GenerateImage函数。
-----------------
-
-Now, this is my requirement:
-
-"""
-def get_fn_lib():
- return {
- "BatchTranslatePDFDocuments_MultiThreaded": ("crazy_functions.批量翻译PDF文档_多线程", "批量翻译PDF文档"),
- "SummarizingWordDocuments": ("crazy_functions.总结word文档", "总结word文档"),
- "ImageGeneration": ("crazy_functions.图片生成", "图片生成"),
- "TranslateMarkdownFromEnglishToChinese": ("crazy_functions.批量Markdown翻译", "Markdown中译英"),
- "SummaryAudioVideo": ("crazy_functions.总结音视频", "总结音视频"),
- }
-
-def inspect_dependency(chatbot, history):
- return True
-
-def eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- import subprocess, sys, os, shutil, importlib
-
- with open('gpt_log/void_terminal_runtime.py', 'w', encoding='utf8') as f:
- f.write(code)
-
- try:
- AutoAcademic = getattr(importlib.import_module('gpt_log.void_terminal_runtime', 'AutoAcademic'), 'AutoAcademic')
- # importlib.reload(AutoAcademic)
- auto_dict = AutoAcademic()
- selected_function = auto_dict.selected_function
- txt = auto_dict.txt
- fp, fn = get_fn_lib()[selected_function]
- fn_plugin = getattr(importlib.import_module(fp, fn), fn)
- yield from fn_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
- except:
- from toolbox import trimmed_format_exc
- chatbot.append(["执行错误", f"\n```\n{trimmed_format_exc()}\n```\n"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-def get_code_block(reply):
- import re
- pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
- matches = re.findall(pattern, reply) # find all code blocks in text
- if len(matches) != 1:
- raise RuntimeError("GPT is not generating proper code.")
- return matches[0].strip('python') # code block
-
-@CatchException
-def 终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- """
- txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
- llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
- plugin_kwargs 插件模型的参数, 暂时没有用武之地
- chatbot 聊天显示框的句柄, 用于显示给用户
- history 聊天历史, 前情提要
- system_prompt 给gpt的静默提醒
- web_port 当前软件运行的端口号
- """
- # 清空历史, 以免输入溢出
- history = []
-
- # 基本信息:功能、贡献者
- chatbot.append(["函数插件功能?", "根据自然语言执行插件命令, 作者: binary-husky, 插件初始化中 ..."])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # # 尝试导入依赖, 如果缺少依赖, 则给出安装建议
- # dep_ok = yield from inspect_dependency(chatbot=chatbot, history=history) # 刷新界面
- # if not dep_ok: return
-
- # 输入
- i_say = prompt + txt
- # 开始
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=i_say, inputs_show_user=txt,
- llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
- sys_prompt=""
- )
-
- # 将代码转为动画
- code = get_code_block(gpt_say)
- yield from eval_code(code, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Xmodgames APK 2021 The Ultimate Game Hacker for Android and iOS.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Xmodgames APK 2021 The Ultimate Game Hacker for Android and iOS.md
deleted file mode 100644
index af203f16314e141bbaea77fda467efe203b103fa..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Xmodgames APK 2021 The Ultimate Game Hacker for Android and iOS.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
Xmodgames APK Download 2021: How to Get the Best Modding App for Android Games
-
If you are a fan of Android games and you want to enhance your gaming experience, then you should definitely try Xmodgames APK. This is a powerful app that allows you to mod various games on your Android device, such as Clash of Clans, Minecraft, Subway Surfers, and more. With Xmodgames APK, you can unlock unlimited resources, get unlimited lives, bypass levels, and enjoy many other features that will make your games more fun and exciting.
In this article, we will show you what Xmodgames APK is, what are its features and benefits, how to download and install it on your Android device, how to use it to mod your favorite games, and some of the most popular games that you can mod with it. So, without further ado, let's get started!
-
What is Xmodgames APK?
-
Xmodgames APK is an app that allows you to modify or hack various games on your Android device. It works by injecting code into the game files, which enables you to change or add different features and functions to the game. For example, you can get unlimited coins, gems, elixir, gold, etc., in Clash of Clans; you can get unlimited blocks, items, skins, etc., in Minecraft; you can get unlimited keys, coins, hoverboards, etc., in Subway Surfers; and so on.
-
Xmodgames APK has a user-friendly interface that lets you browse and download mods for hundreds of games. You can also create your own mods using the built-in script editor. Moreover, Xmodgames APK has a social platform where you can chat with other gamers, share your mods, join forums, and get updates on the latest mods and games.
-
xmod apk latest version free download
-how to install xmodgames on android 2021
-xmodgames hack any game with root
-xmod apk download for clash of clans
-xmodgames screen recorder and screenshot tool
-xmod apk no root required
-xmodgames game accelerator and performance booster
-xmod apk download for pubg mobile
-xmodgames remove bugs and viruses from games
-xmod apk unlock premium features in games
-xmodgames support for android 11
-xmod apk download for minecraft pe
-xmodgames modded games library and updates
-xmod apk download for 8 ball pool
-xmodgames online multiplayer mode and chat
-xmod apk safe and secure download link
-how to use xmodgames on android 2021
-xmod apk download for candy crush saga
-xmodgames backup and restore game data
-xmod apk download for subway surfers
-xmodgames custom scripts and plugins for games
-xmod apk download for pokemon go
-xmodgames compatible with all android devices
-xmod apk download for asphalt 9 legends
-xmodgames easy to use and user-friendly interface
-xmod apk download for gta san andreas
-xmodgames best game hacking tool for android 2021
-xmod apk download for shadow fight 2
-xmodgames unlimited coins and gems in games
-xmod apk download for temple run 2
-xmodgames auto clicker and macro recorder for games
-xmod apk download for clash royale
-xmodgames bypass anti-cheat detection in games
-xmod apk download for roblox
-xmodgames root explorer and file manager for games
-xmod apk download for angry birds 2
-xmodgames speed hack and slow motion in games
-xmod apk download for plants vs zombies 2
-xmodgames lua scripting and code injection for games
-xmod apk download for marvel contest of champions
-
Features of Xmodgames APK
-
Some of the main features of Xmodgames APK are:
-
-
It supports a wide range of games across different genres and categories.
-
It provides various mods for each game that can enhance your gameplay and give you an edge over other players.
-
It allows you to create your own mods using the script editor.
-
It has a simple and intuitive interface that makes it easy to use.
-
It has a social platform where you can interact with other gamers and modders.
-
It is updated regularly with new mods and games.
-
It is free to download and use.
-
-
Benefits of using Xmodgames APK
-
Some of the benefits of using Xmodgames APK are:
-
-
You can enjoy your games more by unlocking new features and functions.
-
You can save time and money by getting unlimited resources and skipping levels.
-
You can customize your games according to your preferences and style.
-
You can challenge yourself by trying different mods and settings.
-
You can learn new skills and techniques by creating your own mods.
-
You can join a community of gamers and modders who share your passion and interests.
-
-
How to download and install Xmodgames APK on your Android device
-
If you want to download and install Xmodgames APK on your Android device, you need to follow these steps:
-
Requirements for installing Xmodgames APK
Before you can install Xmodgames APK on your Android device, you need to meet some requirements. These are:
-
-
Your device must be running Android 4.0 or higher.
-
Your device must have at least 50 MB of free storage space.
-
Your device must be rooted. Rooting is the process of gaining full access to your device's system, which is necessary for Xmodgames APK to work. If you don't know how to root your device, you can check out some of the guides from the web search results below . However, be aware that rooting may void your warranty, brick your device, or expose it to security risks. Proceed at your own risk and follow the instructions carefully.
-
-
Steps to download and install Xmodgames APK
-
Once you have met the requirements, you can follow these steps to download and install Xmodgames APK on your Android device:
-
-
Download the latest version of Xmodgames APK from the official website or a trusted source. You can use this link to download the APK file.
-
Go to your device's settings and enable the installation of apps from unknown sources. This will allow you to install Xmodgames APK without any issues.
-
Locate the downloaded APK file on your device and tap on it to start the installation process.
-
Follow the on-screen instructions and grant the necessary permissions to Xmodgames APK.
-
Wait for the installation to finish and then launch Xmodgames APK from your app drawer or home screen.
-
-
Congratulations! You have successfully installed Xmodgames APK on your Android device. Now you can start modding your games and enjoy them like never before.
How to use Xmodgames APK to mod your favorite Android games
-
Now that you have installed Xmodgames APK on your Android device, you can start using it to mod your favorite games. Here are the steps to follow:
-
How to launch Xmodgames APK and grant root access
-
The first thing you need to do is to launch Xmodgames APK from your app drawer or home screen. You will see a list of games that are supported by Xmodgames APK. If you don't see your game, you can tap on the "+" icon at the bottom right corner and search for it.
-
When you launch Xmodgames APK for the first time, it will ask you to grant root access. This is necessary for Xmodgames APK to work properly. Tap on "Grant" or "Allow" when prompted by your root manager app. If you don't have a root manager app, you can download one from the web search results below .
-
How to browse and download mods from Xmodgames APK
-
Once you have granted root access, you can browse and download mods from Xmodgames APK. To do this, tap on the game that you want to mod and then tap on "Mods" at the bottom of the screen. You will see a list of available mods for that game, along with their descriptions, ratings, and screenshots.
-
To download a mod, simply tap on it and then tap on "Install". The mod will be downloaded and installed automatically. You can also update or uninstall mods from the same screen.
-
How to apply mods to your games using Xmodgames APK
-
After downloading and installing mods, you can apply them to your games using Xmodgames APK. To do this, tap on the game that you want to mod and then tap on "Launch" at the bottom of the screen. The game will be launched with the mod applied.
-
To access the mod menu, tap on the floating Xmod icon on the screen. You will see a list of options that you can toggle on or off, depending on the mod. For example, you can enable unlimited resources, auto-aim, speed hack, etc. You can also adjust the settings of the mod according to your preference.
-
To disable the mod, simply exit the game and launch it normally without using Xmodgames APK.
-
Some of the most popular games that you can mod with Xmodgames APK
-
Xmodgames APK supports hundreds of games across different genres and categories. However, some of the most popular games that you can mod with Xmodgames APK are:
-
Clash of Clans
-
Clash of Clans is one of the most popular strategy games on Android. It involves building your own village, training your troops, and battling other players online. With Xmodgames APK, you can mod Clash of Clans to get unlimited resources, such as gold, elixir, dark elixir, gems, etc. You can also use features like sandbox attack, search loot, keep active, etc., to improve your gameplay and win more battles.
-
Minecraft
-
Minecraft is one of the most creative games on Android. It allows you to create and explore a world made of blocks, where you can build anything you can imagine. With Xmodgames APK, you can mod Minecraft to get unlimited blocks, items, skins, etc. You can also use features like teleportation, x-ray vision, night vision, etc., to enhance your experience and discover new possibilities.
-
Subway Surfers
-
Subway Surfers is one of the most addictive games on Android. It involves running away from the police on a subway track, dodging obstacles, collecting coins and power-ups, and unlocking new characters and boards. With Xmodgames APK, you can mod Subway Surfers to get unlimited keys, coins, hoverboards, etc. You can also use features like speed hack, score multiplier, etc., to boost your performance and beat your high score.
-
Conclusion
-
Xmodgames APK is a great app for Android gamers who want to mod their games and enjoy them more. It supports a wide range of games across different genres and categories. It provides various mods for each game that can enhance your gameplay and give you an edge over other players. It allows you to create your own mods using the script editor. It has a simple and intuitive interface that makes it easy to use. It has a social platform where you can interact with other gamers and modders.
-
To use Xmodgames APK, you need to download and install it on your Android device, and how to use it to mod your favorite games. You will also learn about some of the most popular games that you can mod with Xmodgames APK, such as Clash of Clans, Minecraft, and Subway Surfers. Let's continue with the article.
FAQs
-
Here are some of the frequently asked questions about Xmodgames APK and their answers:
-
-
Is Xmodgames APK safe to use?
-
Xmodgames APK is generally safe to use, as long as you download it from a trusted source and scan it for malware before installing it. However, there are some risks involved in using Xmodgames APK, such as violating the terms of service of the games you mod, getting banned from online servers, or damaging your device if you use incompatible or faulty mods. Therefore, you should use Xmodgames APK at your own discretion and responsibility.
-
Does Xmodgames APK work on all Android devices?
-
Xmodgames APK works on most Android devices that are running Android 4.0 or higher and have root access. However, some devices may not be compatible with Xmodgames APK or some of its mods, depending on the device model, firmware version, and hardware specifications. If you encounter any issues while using Xmodgames APK on your device, you can try to update your device software, uninstall any conflicting apps, or contact the Xmodgames support team for assistance.
-
Does Xmodgames APK require an internet connection?
-
Xmodgames APK requires an internet connection to download and update mods from its online repository. However, once you have downloaded and installed the mods, you can use them offline without an internet connection, unless the game itself requires an internet connection to run.
-
Can I use Xmodgames APK to mod online games?
-
Xmodgames APK can be used to mod online games, but it is not recommended. Modding online games can give you an unfair advantage over other players, which is considered cheating and unethical. Moreover, modding online games can be detected by the game developers or administrators, who may ban your account or take legal action against you. Therefore, you should only use Xmodgames APK to mod offline games or online games that allow modding.
-
Can I uninstall Xmodgames APK after using it?
-
You can uninstall Xmodgames APK after using it, but you will lose all the mods and settings that you have applied to your games. To uninstall Xmodgames APK, you can go to your device's settings and find Xmodgames under the apps section. Then, tap on it and select "Uninstall". Alternatively, you can use a root uninstaller app to remove Xmodgames APK from your device.
-
-
I hope this article has helped you learn more about Xmodgames APK and how to use it to mod your Android games. If you have any questions or feedback, feel free to leave a comment below. Happy gaming!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fffiloni/Music_Source_Separation/bytesep/models/__init__.py b/spaces/fffiloni/Music_Source_Separation/bytesep/models/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/finaspirant/SearchWithVoice/README.md b/spaces/finaspirant/SearchWithVoice/README.md
deleted file mode 100644
index 7a076a369b3a06b57424c875cb755ac83d4afdb7..0000000000000000000000000000000000000000
--- a/spaces/finaspirant/SearchWithVoice/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: SearchWithVoice
-emoji: 🌖
-colorFrom: purple
-colorTo: green
-sdk: gradio
-sdk_version: 3.46.1
-app_file: app.py
-pinned: false
----
-This is a simple configuration of LLM with Google Search using Langchain. You ask a question by voice and
-ansewr will be provided as an audio and text.
-
-Unfortunately, the elevenlabs API to playback is not working on huggingface (no error though). However,
-you can see the code in local. But you will still see the text answer generated by Google using Langchain.
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/flax-community/SentenceSimplifier/About/gitrepo.md b/spaces/flax-community/SentenceSimplifier/About/gitrepo.md
deleted file mode 100644
index d7d3d0affbbe7e5ebcaee6fac09bc290ec353abb..0000000000000000000000000000000000000000
--- a/spaces/flax-community/SentenceSimplifier/About/gitrepo.md
+++ /dev/null
@@ -1,2 +0,0 @@
-## Github Repo
-* [t5-sentence-split](https://github.com/bhadreshpsavani/t5-sentence-split)
\ No newline at end of file
diff --git a/spaces/flax-community/koclip/image2text.py b/spaces/flax-community/koclip/image2text.py
deleted file mode 100644
index 39065ffbbf5846360dbc94753bb6eca9be5f1026..0000000000000000000000000000000000000000
--- a/spaces/flax-community/koclip/image2text.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import jax
-import jax.numpy as jnp
-import pandas as pd
-import requests
-import streamlit as st
-from PIL import Image
-
-from utils import load_model
-
-
-def app(model_name):
- model, processor = load_model(f"koclip/{model_name}")
-
- st.title("Zero-shot Image Classification")
- st.markdown(
- """
- This demo explores KoCLIP's zero-shot prediction capabilities. The model takes an image and a list of candidate captions from the user and predicts the most likely caption that best describes the given image.
-
- ---
- """
- )
-
- query1 = st.text_input(
- "Enter a URL to an image...",
- value="http://images.cocodataset.org/val2017/000000039769.jpg",
- )
- query2 = st.file_uploader("or upload an image...", type=["jpg", "jpeg", "png"])
-
- col1, col2 = st.beta_columns([3, 1])
-
- with col2:
- captions_count = st.selectbox("Number of labels", options=range(1, 6), index=2)
- normalize = st.checkbox("Apply Softmax")
- compute = st.button("Classify")
-
- with col1:
- captions = []
- defaults = ["귀여운 고양이", "멋있는 강아지", "포동포동한 햄스터"]
- for idx in range(captions_count):
- value = defaults[idx] if idx < len(defaults) else ""
- captions.append(st.text_input(f"Insert caption {idx+1}", value=value))
-
- if compute:
- if not any([query1, query2]):
- st.error("Please upload an image or paste an image URL.")
- else:
- st.markdown("""---""")
- with st.spinner("Computing..."):
- image_data = (
- query2
- if query2 is not None
- else requests.get(query1, stream=True).raw
- )
- image = Image.open(image_data)
-
- # captions = [caption.strip() for caption in captions.split(",")]
- captions = [f"이것은 {caption.strip()}이다." for caption in captions]
- inputs = processor(
- text=captions, images=image, return_tensors="jax", padding=True
- )
- inputs["pixel_values"] = jnp.transpose(
- inputs["pixel_values"], axes=[0, 2, 3, 1]
- )
- outputs = model(**inputs)
- if normalize:
- name = "normalized prob"
- probs = jax.nn.softmax(outputs.logits_per_image, axis=1)
- else:
- name = "cosine sim"
- probs = outputs.logits_per_image
- chart_data = pd.Series(probs[0], index=captions, name=name)
-
- col1, col2 = st.beta_columns(2)
- with col1:
- st.image(image)
- with col2:
- st.bar_chart(chart_data)
diff --git a/spaces/flf/8983/Dockerfile b/spaces/flf/8983/Dockerfile
deleted file mode 100644
index a0275a9dc71630016c09d6dd477a4a73b8c64b71..0000000000000000000000000000000000000000
--- a/spaces/flf/8983/Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-FROM openjdk:21-slim
-
-# 设置大陆时区
-ENV TZ Asia/Shanghai
-
-# 切换工作目录
-WORKDIR /home
-
-# 复制文件到工作目录
-COPY ./ /home
-
-# 赋予权限/归递
-RUN chmod -R 777 /tmp
-RUN chmod -R 777 /home
-
-# 安装所需工具
-RUN apt update
-RUN apt install -y jq
-RUN apt install -y curl
-RUN apt install -y procps
-
-# 暴露端口
-EXPOSE 7860
-
-# 看看抱脸的CPU
-RUN lscpu
-
-# 看看抱脸的内存
-RUN free -m
-
-# 启动!
-CMD bash app
\ No newline at end of file
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/checkpoint.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
deleted file mode 100644
index 6af3fae43ac4b35532641a81eb13557edfc7dfba..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import os.path as osp
-import warnings
-
-from annotator.uniformer.mmcv.fileio import FileClient
-from ..dist_utils import allreduce_params, master_only
-from .hook import HOOKS, Hook
-
-
-@HOOKS.register_module()
-class CheckpointHook(Hook):
- """Save checkpoints periodically.
-
- Args:
- interval (int): The saving period. If ``by_epoch=True``, interval
- indicates epochs, otherwise it indicates iterations.
- Default: -1, which means "never".
- by_epoch (bool): Saving checkpoints by epoch or by iteration.
- Default: True.
- save_optimizer (bool): Whether to save optimizer state_dict in the
- checkpoint. It is usually used for resuming experiments.
- Default: True.
- out_dir (str, optional): The root directory to save checkpoints. If not
- specified, ``runner.work_dir`` will be used by default. If
- specified, the ``out_dir`` will be the concatenation of ``out_dir``
- and the last level directory of ``runner.work_dir``.
- `Changed in version 1.3.16.`
- max_keep_ckpts (int, optional): The maximum checkpoints to keep.
- In some cases we want only the latest few checkpoints and would
- like to delete old ones to save the disk space.
- Default: -1, which means unlimited.
- save_last (bool, optional): Whether to force the last checkpoint to be
- saved regardless of interval. Default: True.
- sync_buffer (bool, optional): Whether to synchronize buffers in
- different gpus. Default: False.
- file_client_args (dict, optional): Arguments to instantiate a
- FileClient. See :class:`mmcv.fileio.FileClient` for details.
- Default: None.
- `New in version 1.3.16.`
-
- .. warning::
- Before v1.3.16, the ``out_dir`` argument indicates the path where the
- checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the
- root directory and the final path to save checkpoint is the
- concatenation of ``out_dir`` and the last level directory of
- ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"
- and the value of ``runner.work_dir`` is "/path/of/B", then the final
- path will be "/path/of/A/B".
- """
-
- def __init__(self,
- interval=-1,
- by_epoch=True,
- save_optimizer=True,
- out_dir=None,
- max_keep_ckpts=-1,
- save_last=True,
- sync_buffer=False,
- file_client_args=None,
- **kwargs):
- self.interval = interval
- self.by_epoch = by_epoch
- self.save_optimizer = save_optimizer
- self.out_dir = out_dir
- self.max_keep_ckpts = max_keep_ckpts
- self.save_last = save_last
- self.args = kwargs
- self.sync_buffer = sync_buffer
- self.file_client_args = file_client_args
-
- def before_run(self, runner):
- if not self.out_dir:
- self.out_dir = runner.work_dir
-
- self.file_client = FileClient.infer_client(self.file_client_args,
- self.out_dir)
-
- # if `self.out_dir` is not equal to `runner.work_dir`, it means that
- # `self.out_dir` is set so the final `self.out_dir` is the
- # concatenation of `self.out_dir` and the last level directory of
- # `runner.work_dir`
- if self.out_dir != runner.work_dir:
- basename = osp.basename(runner.work_dir.rstrip(osp.sep))
- self.out_dir = self.file_client.join_path(self.out_dir, basename)
-
- runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by '
- f'{self.file_client.name}.'))
-
- # disable the create_symlink option because some file backends do not
- # allow to create a symlink
- if 'create_symlink' in self.args:
- if self.args[
- 'create_symlink'] and not self.file_client.allow_symlink:
- self.args['create_symlink'] = False
- warnings.warn(
- ('create_symlink is set as True by the user but is changed'
- 'to be False because creating symbolic link is not '
- f'allowed in {self.file_client.name}'))
- else:
- self.args['create_symlink'] = self.file_client.allow_symlink
-
- def after_train_epoch(self, runner):
- if not self.by_epoch:
- return
-
- # save checkpoint for following cases:
- # 1. every ``self.interval`` epochs
- # 2. reach the last epoch of training
- if self.every_n_epochs(
- runner, self.interval) or (self.save_last
- and self.is_last_epoch(runner)):
- runner.logger.info(
- f'Saving checkpoint at {runner.epoch + 1} epochs')
- if self.sync_buffer:
- allreduce_params(runner.model.buffers())
- self._save_checkpoint(runner)
-
- @master_only
- def _save_checkpoint(self, runner):
- """Save the current checkpoint and delete unwanted checkpoint."""
- runner.save_checkpoint(
- self.out_dir, save_optimizer=self.save_optimizer, **self.args)
- if runner.meta is not None:
- if self.by_epoch:
- cur_ckpt_filename = self.args.get(
- 'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
- else:
- cur_ckpt_filename = self.args.get(
- 'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
- runner.meta.setdefault('hook_msgs', dict())
- runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(
- self.out_dir, cur_ckpt_filename)
- # remove other checkpoints
- if self.max_keep_ckpts > 0:
- if self.by_epoch:
- name = 'epoch_{}.pth'
- current_ckpt = runner.epoch + 1
- else:
- name = 'iter_{}.pth'
- current_ckpt = runner.iter + 1
- redundant_ckpts = range(
- current_ckpt - self.max_keep_ckpts * self.interval, 0,
- -self.interval)
- filename_tmpl = self.args.get('filename_tmpl', name)
- for _step in redundant_ckpts:
- ckpt_path = self.file_client.join_path(
- self.out_dir, filename_tmpl.format(_step))
- if self.file_client.isfile(ckpt_path):
- self.file_client.remove(ckpt_path)
- else:
- break
-
- def after_train_iter(self, runner):
- if self.by_epoch:
- return
-
- # save checkpoint for following cases:
- # 1. every ``self.interval`` iterations
- # 2. reach the last iteration of training
- if self.every_n_iters(
- runner, self.interval) or (self.save_last
- and self.is_last_iter(runner)):
- runner.logger.info(
- f'Saving checkpoint at {runner.iter + 1} iterations')
- if self.sync_buffer:
- allreduce_params(runner.model.buffers())
- self._save_checkpoint(runner)
diff --git a/spaces/globalmatt/catsanddogs/README.md b/spaces/globalmatt/catsanddogs/README.md
deleted file mode 100644
index 6121bef54acac63a959db20750a95afacf22b106..0000000000000000000000000000000000000000
--- a/spaces/globalmatt/catsanddogs/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Catsanddogs
-emoji: 🌖
-colorFrom: pink
-colorTo: purple
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/90 Meter Smart Card Manager Software Download VERIFIED.md b/spaces/gotiQspiryo/whisper-ui/examples/90 Meter Smart Card Manager Software Download VERIFIED.md
deleted file mode 100644
index cea83e148f9a9da0d313be8f679cd3cd60d5d779..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/90 Meter Smart Card Manager Software Download VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Velamma Comics Kickass Reading booktorrent my id. Velamma All Episodes In Hindi Pdf Free Download Full Pdf. Waptrick com Free E Books Waptrick E Book. 4d29de3e1b
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/MiniTool Partition Wizard 2020 Crack License Key Full Free Download A Powerful Tool for Disk Optimization and Data Recovery.md b/spaces/gotiQspiryo/whisper-ui/examples/MiniTool Partition Wizard 2020 Crack License Key Full Free Download A Powerful Tool for Disk Optimization and Data Recovery.md
deleted file mode 100644
index a36268aef39722bf050fadb1719b4d4254ae72be..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/MiniTool Partition Wizard 2020 Crack License Key Full Free Download A Powerful Tool for Disk Optimization and Data Recovery.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
MiniTool Partition Wizard Free is definitely among the best free partition manager software for Windows 10. This free program enables users to redistribute hard drive space and check hard drive health. Feel free to download it and have a try.
-
MiniTool Partition Wizard 2020 Crack License Key Full Free Download
i aint no audiphile, but i guess the source of the song plays a major role. a very good.. the dali cd - in admiration of music. at least 320kbps if mp3, ideally you should prefer any lossless format like flac or use a cd if that is convenient. it is also recommended to test at the same volume level.
-
the other members of keneally touring band bryan beller (dethklok, the aristocrats) on bass, joe travers (zappa plays zappa, duran duran) on drums, and rick musallam (ben taylor, the roots) on guitar join mike for cornbread crumb and popes, two relaxed, joyful celebrations of groove and texture. cornbread features perhaps keneallys best recorded lead guitar playing to date, and the slyly humorous popes is the albums lead video, with playful, colorful animation from bulgarian filmmaker dimitriya ( ).
- dali -.. the dali cd vol.1-4 (4cd). : ape, flac. the dali cd - jan harbeck quartet, too darn hot.flac39 mb; 16. grieg- in the hall of the mountain king.flac9,451 kb; the dali cd - volume 3.log18 kb. #3. i aint no audiphile, but i guess the source of the song plays a major role. a very good. the dali cd - in admiration of music. at least 320kbps if mp3, ideally you should prefer any lossless format like flac or use a cd if that is convenient. it is also recommended to test at the same volume level.
-
keneallys a startlingly versatile artist, having first come to prominence as a member of frank zappas 1988 band, and he can currently be found touring alternately as a member of [adult swim]-spawned metal act dethklok, and as both keyboardist and guitarist with joe satriani. but while his adaptability and wide-ranging musical interests make him an invaluable addition to these and many other musical environments, its as bandleader, producer and composer/performer/vocalist that his talents reach their highest fruition. you must be this tall provides an expansive platform for these talents; relatively compact (12 songs in 44 minutes) but covering a huge swath of stylistic ground.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Dll Tool License Key Crack ((BETTER)).md b/spaces/inplisQlawa/anything-midjourney-v4-1/Dll Tool License Key Crack ((BETTER)).md
deleted file mode 100644
index e42d833adb41c038abccdbcfb127eaf9909581e3..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Dll Tool License Key Crack ((BETTER)).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Helicon Remote Android Crack Wifi: How to Use Helicon Remote on Your Android Device
-
Helicon Remote is a utility for tethered shooting and camera remote control compatible with all recent Nikon and Canon DSLR cameras. It allows you to control your camera settings, focus, exposure, and more from your Android device via Wi-Fi or USB connection. You can also use Helicon Remote to perform automated focus bracketing, focus stacking, time lapse shooting, video recording, and other advanced features.
If you want to use Helicon Remote on your Android device, you will need to download and install the APK file from APKCombo or Google Play Store. You will also need a device with USB OTG (host mode) support and a USB OTG adapter if your device doesn't have a full size USB socket. Alternatively, you can use a Wi-Fi connection if your camera has a built-in Wi-Fi unit or an external wireless adapter.
-
How to connect your camera to your Android device via USB
-
To connect your camera to your Android device via USB, you will need to follow these steps:
-
-
Make sure no other camera-related apps are running on your device.
-
Run Helicon Remote on your device.
-
Connect your camera to your device using a USB cable and a USB OTG adapter if needed.
-
Turn on your camera and set it to PTP mode (for Canon cameras) or MTP mode (for Nikon cameras).
-
Press "Select a camera" on Helicon Remote and choose your camera model from the list.
-
-
You should see a live view of your camera on your device screen. You can now use Helicon Remote to control your camera settings and functions.
-
How to connect your camera to your Android device via Wi-Fi
-
To connect your camera to your Android device via Wi-Fi, you will need to follow these steps:
-
-
-
Make sure no other camera-related apps are running on your device, especially Canon Connect or Nikon Wireless Mobile Utility. You may need to uninstall them if they interfere with Helicon Remote.
-
Run Helicon Remote on your device.
-
Activate Wi-Fi on your camera and choose the Smartphone mode (for Canon cameras) or Network connection: ON (for Nikon cameras).
-
Connect your device to the Wi-Fi network created by your camera.
-
Press "Select a camera" on Helicon Remote and choose "Auto-detected Canon camera" or "Auto-detected Nikon camera" from the list.
-
-
You should see a live view of your camera on your device screen. You can now use Helicon Remote to control your camera settings and functions.
-
What are the benefits of using Helicon Remote on your Android device?
-
Using Helicon Remote on your Android device has many benefits, such as:
-
-
You can control your camera settings and functions remotely from your device without touching the camera body.
-
You can preview the results of your shots on a larger screen and zoom in to check the focus and details.
-
You can perform automated focus bracketing, focus stacking, time lapse shooting, video recording, and other advanced features with ease.
-
You can save battery power and storage space by transferring only the images you need to your device.
-
You can share your images instantly with others via email, social media, or cloud services.
-
-
Conclusion
-
Helicon Remote is a utility for tethered shooting and camera remote control compatible with all recent Nikon and Canon DSLR cameras. It allows you to control your camera settings, focus, exposure, and more from your Android device via Wi-Fi or USB connection. You can also use Helicon Remote to perform automated focus bracketing, focus stacking, time lapse shooting, video recording, and other advanced features.
-
If you want to use Helicon Remote on your Android device, you will need to download and install the APK file from APKCombo or Google Play Store. You will also need a device with USB OTG (host mode) support and a USB OTG adapter if your device doesn't have a full size USB socket. Alternatively, you can use a Wi-Fi connection if your camera has a built-in Wi-Fi unit or an external wireless adapter.
-
Using Helicon Remote on your Android device has many benefits, such as controlling your camera remotely, previewing the results on a larger screen, performing advanced features with ease, saving battery power and storage space, and sharing your images instantly.
-
If you are looking for a heliconremoteandroidcrackwifi solution, then you should try Helicon Remote today and enjoy!
-
How to use Helicon Remote on your Android device
-
Once you have connected your camera to your Android device via Wi-Fi or USB, you can start using Helicon Remote to control your camera settings and functions. Here are some of the things you can do with Helicon Remote:
-
-
You can adjust the aperture, shutter speed, ISO, white balance, exposure compensation, and other settings on your camera from your device.
-
You can use the touch screen to focus on any point in the live view image. You can also use the focus peaking and focus areas highlighting features to check the focus accuracy.
-
You can take single shots or burst shots by pressing the shutter button on your device. You can also use the self-timer or intervalometer features to take delayed or timed shots.
-
You can perform automated focus bracketing by setting the number of shots, step size, and direction. You can also combine focus bracketing with exposure bracketing and time lapse shooting for more creative results.
-
You can preview the focus stacking result on your device by using the Helicon Remote server feature. You can also export the focus bracketed images to Helicon Focus for further processing.
-
You can record videos with your camera from your device. You can also adjust the video settings and start/stop recording from your device.
-
You can review the images and videos you have taken on your device. You can zoom in, delete, or share them via email, social media, or cloud services.
-
You can use the hyper focal distance and DOF calculator to determine the optimal focus distance and depth of field for your shots.
-
You can use the live histogram (grayscale/RGB) to check the exposure and tonal range of your shots.
-
-What are the advantages and disadvantages of using Helicon Remote on your Android device?
-
Using Helicon Remote on your Android device has some advantages and disadvantages that you should be aware of before using it. Here are some of them:
-
-
Advantages
Disadvantages
-
You can control your camera remotely from your device without touching the camera body.
You may experience some lag or delay in the live view image or camera response due to Wi-Fi or USB connection issues.
-
You can preview the results of your shots on a larger screen and zoom in to check the focus and details.
You may drain your battery power faster due to Wi-Fi or USB connection and live view usage.
-
You can perform automated focus bracketing, focus stacking, time lapse shooting, video recording, and other advanced features with ease.
You may need extra storage space on your device or camera to store the images and videos you take.
-
You can save battery power and storage space by transferring only the images you need to your device.
You may lose some image quality or metadata due to image compression or conversion when transferring images to your device.
-
You can share your images instantly with others via email, social media, or cloud services.
You may need a stable internet connection to upload or download images from your device.
-
-Conclusion
-
Helicon Remote is a utility for tethered shooting and camera remote control compatible with all recent Nikon and Canon DSLR cameras. It allows you to control your camera settings, focus, exposure, and more from your Android device via Wi-Fi or USB connection. You can also use Helicon Remote to perform automated focus bracketing, focus stacking, time lapse shooting, video recording, and other advanced features.
-
If you want to use Helicon Remote on your Android device, you will need to download and install the APK file from APKCombo or Google Play Store. You will also need a device with USB OTG (host mode) support and a USB OTG adapter if your device doesn't have a full size USB socket. Alternatively, you can use a Wi-Fi connection if your camera has a built-in Wi-Fi unit or an external wireless adapter.
-
Using Helicon Remote on your Android device has some advantages and disadvantages that you should be aware of before using it. Some of the advantages are controlling your camera remotely, previewing the results on a larger screen, performing advanced features with ease, saving battery power and storage space, and sharing your images instantly. Some of the disadvantages are experiencing some lag or delay in the live view image or camera response, draining your battery power faster, needing extra storage space on your device or camera, losing some image quality or metadata when transferring images to your device, and needing a stable internet connection to upload or download images from your device.
-
If you are looking for a heliconremoteandroidcrackwifi solution, then you should try Helicon Remote today and enjoy!
-How to perform automated focus bracketing with Helicon Remote on your Android device
-
One of the most powerful features of Helicon Remote is the ability to perform automated focus bracketing with your camera from your Android device. Focus bracketing is a technique that involves taking a series of shots with different focus points and then stacking them together to create an image with extended depth of field. This is especially useful for macro photography, where the depth of field is very shallow and it is impossible to get everything in focus in one shot.
-
To perform automated focus bracketing with Helicon Remote on your Android device, you will need to follow these steps:
-
-
Connect your camera to your Android device via Wi-Fi or USB as described above.
-
Switch to the Focus tab on Helicon Remote and tap on the Auto button.
-
Select the number of shots, step size, and direction for the focus bracketing. You can also adjust the exposure compensation and delay between shots if needed.
-
Tap on Start to begin the focus bracketing process. Helicon Remote will take a series of shots with different focus points and save them on your camera or device.
-
You can preview the focus stacking result on your device by tapping on the Stack button. Helicon Remote will send the images to the Helicon Remote server and display the stacked image on your device screen.
-
You can also export the focus bracketed images to Helicon Focus for further processing. Helicon Focus is a software that allows you to stack and edit focus bracketed images with advanced tools and algorithms.
-
-
By using Helicon Remote on your Android device, you can perform automated focus bracketing with ease and create stunning images with extended depth of field.
-How to crack Helicon Remote on your Android device
-
If you want to use Helicon Remote on your Android device without any limitations, you may be tempted to look for a heliconremoteandroidcrackwifi solution. However, we strongly advise you against doing so, as it may have serious consequences for your device and camera. Here are some of the reasons why you should not crack Helicon Remote on your Android device:
-
-
You may download a malicious file that contains viruses, malware, or spyware that can harm your device or steal your personal information.
-
You may violate the terms and conditions of Helicon Soft Ltd. and lose your right to use their products and services.
-
You may compromise the quality and functionality of Helicon Remote and experience bugs, errors, or crashes that can damage your device or camera.
-
You may miss out on the latest updates and features of Helicon Remote that can improve your user experience and performance.
-
You may lose access to the Helicon Remote server and support that can help you with any issues or questions you may have.
-
-
Instead of cracking Helicon Remote on your Android device, we recommend you to purchase a license from Helicon Soft Ltd. that will allow you to use Helicon Remote without any restrictions. By purchasing a license, you will also support the development and improvement of Helicon Remote and other products by Helicon Soft Ltd.
-
You can purchase a license for Helicon Remote from their website: https://www.heliconsoft.com/heliconsoft-products/helicon-remote/
-Conclusion
-
Helicon Remote is a utility for tethered shooting and camera remote control compatible with all recent Nikon and Canon DSLR cameras. It allows you to control your camera settings, focus, exposure, and more from your Android device via Wi-Fi or USB connection. You can also use Helicon Remote to perform automated focus bracketing, focus stacking, time lapse shooting, video recording, and other advanced features.
-
If you want to use Helicon Remote on your Android device, you will need to download and install the APK file from APKCombo or Google Play Store. You will also need a device with USB OTG (host mode) support and a USB OTG adapter if your device doesn't have a full size USB socket. Alternatively, you can use a Wi-Fi connection if your camera has a built-in Wi-Fi unit or an external wireless adapter.
-
Using Helicon Remote on your Android device has some advantages and disadvantages that you should be aware of before using it. Some of the advantages are controlling your camera remotely, previewing the results on a larger screen, performing advanced features with ease, saving battery power and storage space, and sharing your images instantly. Some of the disadvantages are experiencing some lag or delay in the live view image or camera response, draining your battery power faster, needing extra storage space on your device or camera, losing some image quality or metadata when transferring images to your device, and needing a stable internet connection to upload or download images from your device.
-
If you are looking for a heliconremoteandroidcrackwifi solution, then you should try Helicon Remote today and enjoy!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/MKV Converter Studio V2.0.1 InclSerial Tested - DeGun SCTV83 Utorrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/MKV Converter Studio V2.0.1 InclSerial Tested - DeGun SCTV83 Utorrent.md
deleted file mode 100644
index f39426c986a9b1553f791264b0a6610c45dfe391..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/MKV Converter Studio V2.0.1 InclSerial Tested - DeGun SCTV83 Utorrent.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
MKV Converter Studio v2.0.1 Incl;Serial Tested - DeGun SCTV83 utorrent
-
- d5da3c52bf
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Microsoft Windows Media Center Edition 2005 Activation HOT! Crack.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Microsoft Windows Media Center Edition 2005 Activation HOT! Crack.md
deleted file mode 100644
index 96b146cf1c5428f5a8774dcd4876dcbe5a3babe4..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Microsoft Windows Media Center Edition 2005 Activation HOT! Crack.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
The only big change with Windows 10 is that Windows Store is now your only choice if you want to run any of Microsoft's apps. Microsoft's online store lets you install Microsoft Edge, Microsoft Office, Microsoft security, Office 365, Windows Defender, and other apps that are nowhere else.
-
Microsoft is also asking Windows 10 to be your only choice because Windows 10 is the only version of Windows that will let you install the apps they offer through the Windows Store. Windows 8 and Windows 8.1 can install Office 365, and some security and management apps, but they cannot install the apps from Microsoft's store. Windows 10 is also the only version of Windows that can use the new Universal Windows Platform app, the Windows Store, and some of its technology.
-
microsoft windows media center edition 2005 activation crack
Windows 10 1903, the latest version of Windows 10, uses an exclusive version of Windows 7's codec, DirectX, and drivers to give you the best and smoothest experience. Windows 10 does not include Windows Media Center, but Windows 10 1903 does include a version of the Windows Media Center Classic Shell that looks and works like the Windows 7 version. If you only need the interface, go for it. If you want to play videos, browse the internet, and do more things that Windows Media Center does, go for the full version.
-
If you need the Windows Media Center 2016 and 2017 apps, you can download the Windows 10 version from Microsoft. The Windows 10 version will work with Windows 10 1903 and all later versions of Windows.
-
Everything is designed for regular people. I mean just run the software. No need to sign-in, no need to create an account, just download or go to the website and upgrade - it's that easy. Now that's not to say that "regular people" don't want to sign-in and create a Microsoft account - they do. But that's nothing new, and Microsoft has been providing features that allow users to switch off these settings in Windows from the start. That means it's relatively easy for the average person to set that up themselves.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/((BETTER)) Download Doa Nur Buat Pdf.md b/spaces/inreVtussa/clothingai/Examples/((BETTER)) Download Doa Nur Buat Pdf.md
deleted file mode 100644
index acd20c1dc9c8b1122d8cb25bb262ffe8887cf727..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/((BETTER)) Download Doa Nur Buat Pdf.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
Download Doa Nur Buat Pdf: A Guide to the Powerful Prayer
-
-
Do you want to learn more about Doa Nur Buat, a prayer that was revealed by Angel Jibril to Prophet Muhammad (peace be upon him)? Do you want to know the benefits and meaning of this prayer, and how to recite it properly? If so, you can download Doa Nur Buat Pdf from various sources online and read it at your convenience.
Doa Nur Buat, also known as Doa Nuurun Nubuwwah or Doa Nurbuat, is a prayer that contains praises and supplications to Allah, the Most High, the Most Merciful. It is said that this prayer has great virtues and blessings for those who recite it sincerely and regularly. Some of the benefits of Doa Nur Buat are:
-
-
-
It protects the reciter from evil eyes, sorcery, jinn, and enemies.
-
It grants the reciter health, longevity, wealth, and happiness.
-
It fulfills the reciter's needs and wishes.
-
It makes the reciter beloved by all people.
-
It confirms the truth of Islam and refutes the falsehood of disbelief.
-
It heals and soothes the reciter's heart and soul.
-
-
-
The meaning of Doa Nur Buat is as follows:
-
-
O Allah, the One Who has the great power, the One Who has the eternal will, and the One Who has the noble face and the protector of His words and the acceptor of prayers, protect Hasan and Husain (the grandsons of Prophet Muhammad) from their true souls, their eyesight, and from the eyesight of jinn and humans. And indeed, those who disbelieve almost make you slip with their eyes when they hear the reminder (the Quran), and they say: "Indeed, he is mad." But it is not except a reminder to all creation. And O Allah, Who answers through the great Quran. And Sulaiman (Solomon) inherited from Dawud (David). O Allah, the Most Merciful, Who has the glorious throne. Lengthen my life, heal my body, grant my need, bestow me with wealth and children, and make me beloved by all people. And keep away from me hostility and opposition from all descendants of Adam who are alive. And indeed, those who disbelieve will be punished. And indeed, You are Able to do all things. And say: "The truth has come and falsehood has perished. Indeed falsehood is ever bound to perish." And We have sent down of the Quran that which is healing and mercy for those who believe. And it does not increase the wrongdoers except in loss.
-
-
If you want to download Doa Nur Buat Pdf, you can find it on various websites that offer Islamic books and resources. Some of these websites are:
-
-
-
-
Scribd: This is a popular platform for reading and sharing documents online. You can find Doa Nur Buat Pdf on this website along with other Islamic books and materials.
-
Soul Search: This is a blog that shares spiritual insights and wisdom from various sources. You can download Doa Nur Buat Pdf from this blog as well as other prayers and supplications.
-
Scribd: This is another link from Scribd that offers Doa Nur Buat Pdf for free download. You can also read it online or print it out if you prefer.
-
-
-
By downloading Doa Nur Buat Pdf, you can learn more about this powerful prayer and its benefits. You can also practice reciting it regularly and experience its effects on your life. May Allah bless you and accept your prayers.
-
How to Recite Doa Nur Buat
-
-
Doa Nur Buat is a simple and easy prayer that can be recited anytime and anywhere. However, there are some recommended times and manners for reciting it to get the best results. Here are some tips on how to recite Doa Nur Buat:
-
-
-
Recite it after performing the obligatory prayers, especially the Fajr (dawn) and Maghrib (sunset) prayers.
-
Recite it with a sincere intention and a humble heart.
-
Recite it with a clear and loud voice, or at least in a whisper.
-
Recite it with understanding and reflection on its meaning.
-
Recite it with faith and trust in Allah's power and mercy.
-
-
-
You can download Doa Nur Buat Pdf from the websites mentioned above and follow along with the Arabic text, the transliteration, and the translation. You can also listen to the audio recitation of Doa Nur Buat by various reciters online or offline.
-
-
The History and Origin of Doa Nur Buat
-
-
Doa Nur Buat is a prayer that has a long and rich history. It is said that it was revealed by Angel Jibril (Gabriel) to Prophet Muhammad (peace be upon him) on a special occasion. According to some narrations, this happened when the Prophet (peace be upon him) was performing the morning prayer with his companions at the Masjid al-Haram (the Sacred Mosque) in Mecca. Suddenly, Angel Jibril appeared before him and said: "O Messenger of Allah, we have been sent by Allah to deliver to you Doa Nur Buat."
-
-
The Prophet (peace be upon him) accepted the prayer and taught it to his companions. He also explained its virtues and benefits, and encouraged them to recite it regularly. He said: "This prayer is a great gift from Allah, a light of prophethood, a protection from evil, and a source of blessings. Whoever recites it will be granted what he asks for, will be loved by all people, will be safe from harm, and will witness the truth of Islam."
-
-
Since then, Doa Nur Buat has been passed down from generation to generation among Muslims. It has been recited by many pious and righteous people throughout history, such as Imam Ali (the cousin and son-in-law of the Prophet), Imam Hasan and Imam Husain (the grandsons of the Prophet), Imam al-Ghazali (the famous scholar and mystic), and many others. It has also been included in many books of prayers and supplications, such as Majmu Syarif Kamil, Dalail al-Khayrat, Hizb al-Bahr, and others.
-
-
Conclusion
-
-
Doa Nur Buat is a powerful and beautiful prayer that can bring many benefits to those who recite it sincerely and regularly. It is a prayer that praises and supplicates to Allah, the Most High, the Most Merciful. It is a prayer that protects and blesses the reciter in this world and the hereafter. It is a prayer that confirms the truth of Islam and refutes the falsehood of disbelief.
-
-
If you want to learn more about Doa Nur Buat, you can download Doa Nur Buat Pdf from various sources online and read it at your convenience. You can also practice reciting it regularly and experience its effects on your life. May Allah bless you and accept your prayers.
-
How to Use Doa Nur Buat Pdf for Learning and Practicing
-
-
Doa Nur Buat Pdf is a useful and convenient resource for learning and practicing Doa Nur Buat. You can download it from online sources and use it for various purposes. Here are some ways to use Doa Nur Buat Pdf for learning and practicing:
-
-
-
Read it online or offline: You can read Doa Nur Buat Pdf online or offline on your device. You can also print it out if you prefer a hard copy. You can read it anytime and anywhere you want.
-
Listen to it: You can listen to Doa Nur Buat Pdf by using an audio player or a speaker. You can also find audio recitations of Doa Nur Buat by various reciters online or offline. You can listen to it while driving, working, relaxing, or sleeping.
-
Memorize it: You can memorize Doa Nur Buat Pdf by repeating it over and over again. You can also use flashcards, mnemonics, or other memory techniques to help you memorize it. You can test yourself by reciting it from memory or writing it down.
-
Understand it: You can understand Doa Nur Buat Pdf by studying its meaning and explanation. You can also use dictionaries, commentaries, or other references to help you understand it. You can also ask questions or seek guidance from scholars or experts.
-
Apply it: You can apply Doa Nur Buat Pdf by reciting it regularly and sincerely. You can also act upon its teachings and implications. You can also share it with others and invite them to recite it.
-
-
-
By using Doa Nur Buat Pdf for learning and practicing, you can benefit from this powerful prayer and its effects on your life. You can also improve your knowledge and skills in Arabic, Quran, and Islam.
-
-
The Challenges and Opportunities of Downloading Doa Nur Buat Pdf
-
-
Downloading Doa Nur Buat Pdf from online sources can be both challenging and rewarding. There are some advantages and disadvantages of downloading Doa Nur Buat Pdf that you need to be aware of. Here are some of them:
-
-
-
Advantages: Downloading Doa Nur Buat Pdf can be beneficial for you in many ways. Some of the advantages are:
-
-
It is free and easy: You can download Doa Nur Buat Pdf for free from various websites that offer Islamic books and resources. You can also download it easily by clicking a link or a button.
-
It is accessible and flexible: You can access Doa Nur Buat Pdf anytime and anywhere you want. You can also use it on any device that supports PDF format, such as computers, laptops, tablets, smartphones, etc.
-
It is informative and educational: You can learn more about Doa Nur Buat Pdf by reading its text, translation, transliteration, and commentary. You can also learn more about its history, origin, sources, authenticity, benefits, meaning, etc.
-
It is motivational and inspirational: You can get motivated and inspired by reading Doa Nur Buat Pdf. You can also feel the spiritual impact of this prayer on your heart and soul.
-
-
Disadvantages: Downloading Doa Nur Buat Pdf can also pose some challenges and risks for you. Some of the disadvantages are:
-
-
It is not authentic and reliable: You may not be sure about the authenticity and reliability of Doa Nur Buat Pdf that you download from online sources. Some websites may offer fake or corrupted versions of Doa Nur Buat Pdf that may contain errors, mistakes, or alterations.
-
It is not safe and secure: You may face some security issues when downloading Doa Nur Buat Pdf from online sources. Some websites may contain viruses, malware, spyware, or other harmful elements that may damage your device or compromise your privacy.
-
It is not sufficient and complete: You may not get enough information and guidance when downloading Doa Nur Buat Pdf from online sources. Some websites may offer incomplete or outdated versions of Doa Nur Buat Pdf that may lack some features or details.
-
It is not interactive and engaging: You may not get the full experience and enjoyment of reading Doa Nur Buat Pdf when downloading it from online sources. Some websites may offer dull or boring versions of Doa Nur Buat Pdf that may not capture your attention or interest.
-
-
-
-
Therefore, you need to be careful and cautious when downloading Doa Nur Buat Pdf from online sources. You need to check the credibility and quality of the websites that offer Doa Nur Buat Pdf. You also need to use other methods and resources to complement your learning and practice of Doa Nur Buat.
-
Conclusion
-
-
Doa Nur Buat is a powerful and beautiful prayer that can bring many benefits to those who recite it sincerely and regularly. It is a prayer that praises and supplicates to Allah, the Most High, the Most Merciful. It is a prayer that protects and blesses the reciter in this world and the hereafter. It is a prayer that confirms the truth of Islam and refutes the falsehood of disbelief.
-
-
If you want to learn more about Doa Nur Buat, you can download Doa Nur Buat Pdf from various sources online and read it at your convenience. You can also use it for learning and practicing this prayer in various ways. However, you need to be careful and cautious when downloading Doa Nur Buat Pdf from online sources. You need to check the authenticity, reliability, safety, and quality of the websites that offer Doa Nur Buat Pdf. You also need to use other methods and resources to complement your learning and practice of Doa Nur Buat.
-
-
By downloading Doa Nur Buat Pdf, you can benefit from this powerful prayer and its effects on your life. You can also improve your knowledge and skills in Arabic, Quran, and Islam. May Allah bless you and accept your prayers.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar.md b/spaces/inreVtussa/clothingai/Examples/Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar.md
deleted file mode 100644
index a022337a4b1aab9f662fe6e38fde435cc6dbf56e..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar: A Review
-
-
If you are looking for a powerful and professional video editing software that can handle any project, you might want to check out Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar. This is the latest version of Adobe's flagship video editor, which offers a lot of features and improvements to help you create stunning videos with high-quality effects and transitions.
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar
In this article, we will review some of the main features and benefits of Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, as well as how to download and install it on your PC.
-
-
What's New in Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
One of the most noticeable changes in Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is the new Lumetri Color panel, which allows you to perform selective color grading with ease. You can use the new color curves, hue saturation curves, and color wheels to adjust the colors of specific areas or objects in your video.
-
-
Another new feature is the DeNoise and DeReverb effects, which can help you remove background noise or reverb from your audio clips with just a few clicks. You can use the sliders to adjust the amount of noise or reverb reduction, and preview the results in real time.
-
-
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar also supports more native formats, such as ARRI Alexa LF, Sony Venice v2, and HEIF (HEIC) files. This means you can import and edit these files without any transcoding or conversion, saving you time and disk space.
-
-
Other improvements include hardware-based encoding and decoding for H264 and HEVC formats, theater mode in Adobe Immersive Environment, and better performance and stability.
-
-
How to Download and Install Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
If you want to try Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you can download it from various sources on the internet, such as torrent sites or file sharing platforms. However, be careful when downloading from these sources, as they may contain viruses or malware that can harm your PC.
-
-
A safer way to download Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is to use the official Adobe website or app. You can sign up for a free trial or a subscription plan that suits your needs and budget.
-
-
To install Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you need to have a PC that meets the minimum system requirements, such as Windows 10 (64-bit), Intel Core i5 or AMD FX processor, 8 GB of RAM, and 8 GB of available disk space.
-
-
Once you have downloaded the file, you need to extract it using a software like WinRAR or 7-Zip. Then, you need to run the setup.exe file and follow the instructions on the screen.
-
-
After the installation is complete, you can launch Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar and start editing your videos.
-
-
Conclusion
-
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is a great video editing software that offers a lot of features and enhancements to help you create amazing videos with ease.
-
-
If you want to download and install Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you can use the official Adobe website or app, or find other sources on the internet.
-
-
We hope this article has been helpful for you.
-
How to Use Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
Once you have installed Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you can start using it to edit your videos. You can import your media files from your PC, camera, or external drive, and organize them in the Project panel. You can also use the Media Browser to browse and preview your files before importing them.
-
-
To edit your videos, you can use the Timeline panel, where you can drag and drop your clips, trim them, add transitions, effects, titles, and more. You can also use the Source and Program monitors to view and edit your clips in detail.
-
-
To enhance your videos, you can use the Effects panel, where you can find hundreds of effects and presets that you can apply to your clips. You can also use the Effect Controls panel to adjust the parameters of each effect.
-
-
To export your videos, you can use the Export Settings dialog box, where you can choose the format, codec, resolution, frame rate, bitrate, and other settings for your output file. You can also use the Adobe Media Encoder to queue and render multiple files at once.
-
-
Why Choose Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is one of the best video editing software in the market, and here are some of the reasons why:
-
-
-
It has a user-friendly and intuitive interface that makes it easy to learn and use.
-
It has a powerful and versatile editing engine that can handle any type of video format and resolution.
-
It has a rich and diverse collection of effects and transitions that can enhance your videos with professional quality.
-
It has a multilingual support that allows you to work with different languages and subtitles.
-
It has a seamless integration with other Adobe products, such as Photoshop, After Effects, Audition, and more.
-
-
-
If you are looking for a video editing software that can meet your needs and expectations, you should definitely try Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar.
-
How to Edit Videos with Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
Editing videos with Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is easy and fun, as long as you know the basic steps and tools. Here are some of the things you can do with this software:
-
-
-
Cut and trim your clips to remove unwanted parts or adjust the duration.
-
Add transitions between your clips to create smooth and dynamic changes.
-
Add effects to your clips to enhance the look and feel of your video.
-
Add titles and graphics to your video to add information or branding.
-
Add audio to your video, such as music, voiceover, or sound effects.
-
Mix and edit your audio tracks to adjust the volume, balance, and quality.
-
Color grade your video to correct or change the colors of your footage.
-
Export your video to a file format that suits your purpose and platform.
-
-
-
To edit videos with Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you need to have a basic understanding of the interface and workflow of the software. You can learn more about these topics from the official Adobe website or from various online tutorials and courses.
-
-
What are the Benefits of Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is not only a powerful and professional video editing software, but also a beneficial one for many reasons. Here are some of the benefits of using this software:
-
-
-
It can save you time and money by allowing you to edit videos faster and more efficiently.
-
It can improve your skills and creativity by giving you access to advanced tools and features.
-
It can increase your reach and engagement by helping you create videos that appeal to your audience and platform.
-
It can boost your reputation and credibility by enabling you to produce videos that meet industry standards and expectations.
-
It can enhance your satisfaction and enjoyment by making video editing a fun and rewarding experience.
-
-
-
If you want to experience these benefits for yourself, you should download and install Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar today and start editing your videos with it.
-
How to Troubleshoot Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
While Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is a reliable and stable video editing software, it may sometimes encounter some issues or errors that can affect your workflow or output. Here are some of the common problems and solutions that you can try:
-
-
-
If you experience lagging or freezing while editing or playing back your videos, you may need to optimize your system performance by closing unnecessary programs, updating your drivers, clearing your cache files, or lowering your playback resolution.
-
If you encounter missing or corrupted files or media in your project, you may need to locate or relink them using the Link Media dialog box, or replace them with new files.
-
If you get an error message when exporting or rendering your video, you may need to check your export settings, change your renderer, disable any third-party effects or plugins, or reset your preferences.
-
If you have any other issues or errors with Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you can search for solutions on the official Adobe website or forums, or contact the Adobe support team for assistance.
-
-
-
By following these steps, you can hopefully resolve any issues or errors that you may encounter with Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar and enjoy editing your videos without any hassle.
-
-
How to Uninstall Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar?
-
-
If you want to uninstall Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar from your PC, you can follow these steps:
-
-
-
Open the Control Panel on your PC and go to Programs and Features.
-
Select Adobe Premiere Pro CC 2019 from the list of programs and click Uninstall.
-
Follow the instructions on the screen to complete the uninstallation process.
-
Restart your PC to remove any remaining files or registry entries.
-
-
-
If you want to reinstall Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you can download it again from the official Adobe website or app, or from other sources on the internet.
-
Conclusion
-
-
Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar is a great video editing software that offers a lot of features and benefits to help you create amazing videos with ease. Whether you are a beginner or a professional, you can use this software to edit videos for any purpose and platform.
-
-
In this article, we have reviewed some of the main features and benefits of Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, as well as how to download, install, use, troubleshoot, and uninstall it on your PC.
-
-
We hope this article has been helpful for you. If you want to download and install Adobe Premiere Pro CC 2019 13.0.1.13 (x64) Multilingual Medici .rar, you can use the official Adobe website or app, or find other sources on the internet.
-
-
Thank you for reading and happy editing!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/ivntl/MMS/vits/train.py b/spaces/ivntl/MMS/vits/train.py
deleted file mode 100644
index 703d30cf9ef2c414d9b35fe65545cc8fefad8821..0000000000000000000000000000000000000000
--- a/spaces/ivntl/MMS/vits/train.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-
-import commons
-import utils
-from data_utils import (
- TextAudioLoader,
- TextAudioCollate,
- DistributedBucketSampler
-)
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
-)
-from losses import (
- generator_loss,
- discriminator_loss,
- feature_loss,
- kl_loss
-)
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-from text.symbols import symbols
-
-
-torch.backends.cudnn.benchmark = True
-global_step = 0
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '80000'
-
- hps = utils.get_hparams()
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
-
- train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
- train_sampler = DistributedBucketSampler(
- train_dataset,
- hps.train.batch_size,
- [32,300,400,500,600,700,800,900,1000],
- num_replicas=n_gpus,
- rank=rank,
- shuffle=True)
- collate_fn = TextAudioCollate()
- train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
- collate_fn=collate_fn, batch_sampler=train_sampler)
- if rank == 0:
- eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data)
- eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
- batch_size=hps.train.batch_size, pin_memory=True,
- drop_last=False, collate_fn=collate_fn)
-
- net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- **hps.model).cuda(rank)
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- net_g.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- net_g = DDP(net_g, device_ids=[rank])
- net_d = DDP(net_d, device_ids=[rank])
-
- try:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
- global_step = (epoch_str - 1) * len(train_loader)
- except:
- epoch_str = 1
- global_step = 0
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
-
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank==0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d = nets
- optim_g, optim_d = optims
- scheduler_g, scheduler_d = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader):
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
- (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths)
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- with autocast(enabled=False):
- loss_dur = torch.sum(l_length.float())
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
-
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank==0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
-
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
- }
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict)
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
- global_step += 1
-
- if rank == 0:
- logger.info('====> Epoch: {}'.format(epoch))
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- with torch.no_grad():
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader):
- x, x_lengths = x.cuda(0), x_lengths.cuda(0)
- spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
- y, y_lengths = y.cuda(0), y_lengths.cuda(0)
-
- # remove else
- x = x[:1]
- x_lengths = x_lengths[:1]
- spec = spec[:1]
- spec_lengths = spec_lengths[:1]
- y = y[:1]
- y_lengths = y_lengths[:1]
- break
- y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000)
- y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- image_dict = {
- "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
- }
- audio_dict = {
- "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
- }
- if global_step == 0:
- image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
- audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
-
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/ivy-1911/vits-uma-genshin-honkai/transforms.py b/spaces/ivy-1911/vits-uma-genshin-honkai/transforms.py
deleted file mode 100644
index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000
--- a/spaces/ivy-1911/vits-uma-genshin-honkai/transforms.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/jackli888/stable-diffusion-webui/launch.py b/spaces/jackli888/stable-diffusion-webui/launch.py
deleted file mode 100644
index c83dd5b72591d422cfc5f64cbe15f19021d8b159..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/launch.py
+++ /dev/null
@@ -1,361 +0,0 @@
-# this scripts installs necessary requirements and launches main program in webui.py
-import subprocess
-import os
-import sys
-import importlib.util
-import shlex
-import platform
-import argparse
-import json
-
-dir_repos = "repositories"
-dir_extensions = "extensions"
-python = sys.executable
-git = os.environ.get('GIT', "git")
-index_url = os.environ.get('INDEX_URL', "")
-stored_commit_hash = None
-skip_install = False
-
-
-def check_python_version():
- is_windows = platform.system() == "Windows"
- major = sys.version_info.major
- minor = sys.version_info.minor
- micro = sys.version_info.micro
-
- if is_windows:
- supported_minors = [10]
- else:
- supported_minors = [7, 8, 9, 10, 11]
-
- if not (major == 3 and minor in supported_minors):
- import modules.errors
-
- modules.errors.print_error_explanation(f"""
-INCOMPATIBLE PYTHON VERSION
-
-This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
-If you encounter an error with "RuntimeError: Couldn't install torch." message,
-or any other error regarding unsuccessful package (library) installation,
-please downgrade (or upgrade) to the latest version of 3.10 Python
-and delete current Python and "venv" folder in WebUI's directory.
-
-You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/
-
-{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
-
-Use --skip-python-version-check to suppress this warning.
-""")
-
-
-def commit_hash():
- global stored_commit_hash
-
- if stored_commit_hash is not None:
- return stored_commit_hash
-
- try:
- stored_commit_hash = run(f"{git} rev-parse HEAD").strip()
- except Exception:
- stored_commit_hash = ""
-
- return stored_commit_hash
-
-
-def extract_arg(args, name):
- return [x for x in args if x != name], name in args
-
-
-def extract_opt(args, name):
- opt = None
- is_present = False
- if name in args:
- is_present = True
- idx = args.index(name)
- del args[idx]
- if idx < len(args) and args[idx][0] != "-":
- opt = args[idx]
- del args[idx]
- return args, is_present, opt
-
-
-def run(command, desc=None, errdesc=None, custom_env=None, live=False):
- if desc is not None:
- print(desc)
-
- if live:
- result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
- if result.returncode != 0:
- raise RuntimeError(f"""{errdesc or 'Error running command'}.
-Command: {command}
-Error code: {result.returncode}""")
-
- return ""
-
- result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
-
- if result.returncode != 0:
-
- message = f"""{errdesc or 'Error running command'}.
-Command: {command}
-Error code: {result.returncode}
-stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''}
-stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''}
-"""
- raise RuntimeError(message)
-
- return result.stdout.decode(encoding="utf8", errors="ignore")
-
-
-def check_run(command):
- result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
- return result.returncode == 0
-
-
-def is_installed(package):
- try:
- spec = importlib.util.find_spec(package)
- except ModuleNotFoundError:
- return False
-
- return spec is not None
-
-
-def repo_dir(name):
- return os.path.join(dir_repos, name)
-
-
-def run_python(code, desc=None, errdesc=None):
- return run(f'"{python}" -c "{code}"', desc, errdesc)
-
-
-def run_pip(args, desc=None):
- if skip_install:
- return
-
- index_url_line = f' --index-url {index_url}' if index_url != '' else ''
- return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
-
-
-def check_run_python(code):
- return check_run(f'"{python}" -c "{code}"')
-
-
-def git_clone(url, dir, name, commithash=None):
- # TODO clone into temporary dir and move if successful
-
- if os.path.exists(dir):
- if commithash is None:
- return
-
- current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
- if current_hash == commithash:
- return
-
- run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
- return
-
- run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
-
- if commithash is not None:
- run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
-
-
-def version_check(commit):
- try:
- import requests
- commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
- if commit != "" and commits['commit']['sha'] != commit:
- print("--------------------------------------------------------")
- print("| You are not up to date with the most recent release. |")
- print("| Consider running `git pull` to update. |")
- print("--------------------------------------------------------")
- elif commits['commit']['sha'] == commit:
- print("You are up to date with the most recent release.")
- else:
- print("Not a git clone, can't perform version check.")
- except Exception as e:
- print("version check failed", e)
-
-
-def run_extension_installer(extension_dir):
- path_installer = os.path.join(extension_dir, "install.py")
- if not os.path.isfile(path_installer):
- return
-
- try:
- env = os.environ.copy()
- env['PYTHONPATH'] = os.path.abspath(".")
-
- print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
- except Exception as e:
- print(e, file=sys.stderr)
-
-
-def list_extensions(settings_file):
- settings = {}
-
- try:
- if os.path.isfile(settings_file):
- with open(settings_file, "r", encoding="utf8") as file:
- settings = json.load(file)
- except Exception as e:
- print(e, file=sys.stderr)
-
- disabled_extensions = set(settings.get('disabled_extensions', []))
-
- return [x for x in os.listdir(dir_extensions) if x not in disabled_extensions]
-
-
-def run_extensions_installers(settings_file):
- if not os.path.isdir(dir_extensions):
- return
-
- for dirname_extension in list_extensions(settings_file):
- run_extension_installer(os.path.join(dir_extensions, dirname_extension))
-
-
-def prepare_environment():
- global skip_install
-
- torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117")
- requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
- commandline_args = os.environ.get('COMMANDLINE_ARGS', "--skip-torch-cuda-test --use-cpu all --precision full --no-half")
-
- xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425')
- gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
- clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
- openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
-
- stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
- taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
- k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
- codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
- blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
-
- stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e")
- taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
- k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec")
- codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
- blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-
- sys.argv += shlex.split(commandline_args)
-
- parser = argparse.ArgumentParser(add_help=False)
- parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json')
- args, _ = parser.parse_known_args(sys.argv)
-
- sys.argv, _ = extract_arg(sys.argv, '-f')
- sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
- sys.argv, skip_python_version_check = extract_arg(sys.argv, '--skip-python-version-check')
- sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
- sys.argv, reinstall_torch = extract_arg(sys.argv, '--reinstall-torch')
- sys.argv, update_check = extract_arg(sys.argv, '--update-check')
- sys.argv, run_tests, test_dir = extract_opt(sys.argv, '--tests')
- sys.argv, skip_install = extract_arg(sys.argv, '--skip-install')
- xformers = '--xformers' in sys.argv
- ngrok = '--ngrok' in sys.argv
-
- if not skip_python_version_check:
- check_python_version()
-
- commit = commit_hash()
-
- print(f"Python {sys.version}")
- print(f"Commit hash: {commit}")
-
- if reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
- run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
-
- if not skip_torch_cuda_test:
- run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
-
- if not is_installed("gfpgan"):
- run_pip(f"install {gfpgan_package}", "gfpgan")
-
- if not is_installed("clip"):
- run_pip(f"install {clip_package}", "clip")
-
- if not is_installed("open_clip"):
- run_pip(f"install {openclip_package}", "open_clip")
-
- if (not is_installed("xformers") or reinstall_xformers) and xformers:
- if platform.system() == "Windows":
- if platform.python_version().startswith("3.10"):
- run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
- else:
- print("Installation of xformers is not supported in this version of Python.")
- print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
- if not is_installed("xformers"):
- exit(0)
- elif platform.system() == "Linux":
- run_pip(f"install {xformers_package}", "xformers")
-
- if not is_installed("pyngrok") and ngrok:
- run_pip("install pyngrok", "ngrok")
-
- os.makedirs(dir_repos, exist_ok=True)
-
- git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
- git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
- git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
- git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
- git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
-
- if not is_installed("lpips"):
- run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
-
- run_pip(f"install -r {requirements_file}", "requirements for Web UI")
-
- run_extensions_installers(settings_file=args.ui_settings_file)
-
- if update_check:
- version_check(commit)
-
- if "--exit" in sys.argv:
- print("Exiting because of --exit argument")
- exit(0)
-
- if run_tests:
- exitcode = tests(test_dir)
- exit(exitcode)
-
-
-def tests(test_dir):
- if "--api" not in sys.argv:
- sys.argv.append("--api")
- if "--ckpt" not in sys.argv:
- sys.argv.append("--ckpt")
- sys.argv.append("./test/test_files/empty.pt")
- if "--skip-torch-cuda-test" not in sys.argv:
- sys.argv.append("--skip-torch-cuda-test")
- if "--disable-nan-check" not in sys.argv:
- sys.argv.append("--disable-nan-check")
-
- print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}")
-
- os.environ['COMMANDLINE_ARGS'] = ""
- with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
- proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr)
-
- import test.server_poll
- exitcode = test.server_poll.run_tests(proc, test_dir)
-
- print(f"Stopping Web UI process with id {proc.pid}")
- proc.kill()
- return exitcode
-
-
-def start():
- print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
- import webui
- if '--nowebui' in sys.argv:
- webui.api_only()
- else:
- webui.webui()
-
-
-if __name__ == "__main__":
- prepare_environment()
- start()
diff --git a/spaces/jackli888/stable-diffusion-webui/scripts/img2imgalt.py b/spaces/jackli888/stable-diffusion-webui/scripts/img2imgalt.py
deleted file mode 100644
index 65b61533929a018f0cb97a89266154bf569cd40e..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/scripts/img2imgalt.py
+++ /dev/null
@@ -1,216 +0,0 @@
-from collections import namedtuple
-
-import numpy as np
-from tqdm import trange
-
-import modules.scripts as scripts
-import gradio as gr
-
-from modules import processing, shared, sd_samplers, prompt_parser, sd_samplers_common
-from modules.processing import Processed
-from modules.shared import opts, cmd_opts, state
-
-import torch
-import k_diffusion as K
-
-from PIL import Image
-from torch import autocast
-from einops import rearrange, repeat
-
-
-def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
- x = p.init_latent
-
- s_in = x.new_ones([x.shape[0]])
- dnw = K.external.CompVisDenoiser(shared.sd_model)
- sigmas = dnw.get_sigmas(steps).flip(0)
-
- shared.state.sampling_steps = steps
-
- for i in trange(1, len(sigmas)):
- shared.state.sampling_step += 1
-
- x_in = torch.cat([x] * 2)
- sigma_in = torch.cat([sigmas[i] * s_in] * 2)
- cond_in = torch.cat([uncond, cond])
-
- image_conditioning = torch.cat([p.image_conditioning] * 2)
- cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
-
- c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
- t = dnw.sigma_to_t(sigma_in)
-
- eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
- denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
-
- denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
-
- d = (x - denoised) / sigmas[i]
- dt = sigmas[i] - sigmas[i - 1]
-
- x = x + d * dt
-
- sd_samplers_common.store_latent(x)
-
- # This shouldn't be necessary, but solved some VRAM issues
- del x_in, sigma_in, cond_in, c_out, c_in, t,
- del eps, denoised_uncond, denoised_cond, denoised, d, dt
-
- shared.state.nextjob()
-
- return x / x.std()
-
-
-Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"])
-
-
-# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
-def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
- x = p.init_latent
-
- s_in = x.new_ones([x.shape[0]])
- dnw = K.external.CompVisDenoiser(shared.sd_model)
- sigmas = dnw.get_sigmas(steps).flip(0)
-
- shared.state.sampling_steps = steps
-
- for i in trange(1, len(sigmas)):
- shared.state.sampling_step += 1
-
- x_in = torch.cat([x] * 2)
- sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
- cond_in = torch.cat([uncond, cond])
-
- image_conditioning = torch.cat([p.image_conditioning] * 2)
- cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
-
- c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
-
- if i == 1:
- t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
- else:
- t = dnw.sigma_to_t(sigma_in)
-
- eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
- denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
-
- denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
-
- if i == 1:
- d = (x - denoised) / (2 * sigmas[i])
- else:
- d = (x - denoised) / sigmas[i - 1]
-
- dt = sigmas[i] - sigmas[i - 1]
- x = x + d * dt
-
- sd_samplers_common.store_latent(x)
-
- # This shouldn't be necessary, but solved some VRAM issues
- del x_in, sigma_in, cond_in, c_out, c_in, t,
- del eps, denoised_uncond, denoised_cond, denoised, d, dt
-
- shared.state.nextjob()
-
- return x / sigmas[-1]
-
-
-class Script(scripts.Script):
- def __init__(self):
- self.cache = None
-
- def title(self):
- return "img2img alternative test"
-
- def show(self, is_img2img):
- return is_img2img
-
- def ui(self, is_img2img):
- info = gr.Markdown('''
- * `CFG Scale` should be 2 or lower.
- ''')
-
- override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=self.elem_id("override_sampler"))
-
- override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=self.elem_id("override_prompt"))
- original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=self.elem_id("original_prompt"))
- original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=self.elem_id("original_negative_prompt"))
-
- override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=self.elem_id("override_steps"))
- st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=self.elem_id("st"))
-
- override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=self.elem_id("override_strength"))
-
- cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=self.elem_id("cfg"))
- randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=self.elem_id("randomness"))
- sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment"))
-
- return [
- info,
- override_sampler,
- override_prompt, original_prompt, original_negative_prompt,
- override_steps, st,
- override_strength,
- cfg, randomness, sigma_adjustment,
- ]
-
- def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):
- # Override
- if override_sampler:
- p.sampler_name = "Euler"
- if override_prompt:
- p.prompt = original_prompt
- p.negative_prompt = original_negative_prompt
- if override_steps:
- p.steps = st
- if override_strength:
- p.denoising_strength = 1.0
-
- def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
- lat = (p.init_latent.cpu().numpy() * 10).astype(int)
-
- same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
- and self.cache.original_prompt == original_prompt \
- and self.cache.original_negative_prompt == original_negative_prompt \
- and self.cache.sigma_adjustment == sigma_adjustment
- same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
-
- if same_everything:
- rec_noise = self.cache.noise
- else:
- shared.state.job_count += 1
- cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
- uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
- if sigma_adjustment:
- rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)
- else:
- rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
- self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
-
- rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)
-
- combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
-
- sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model)
-
- sigmas = sampler.model_wrap.get_sigmas(p.steps)
-
- noise_dt = combined_noise - (p.init_latent / sigmas[0])
-
- p.seed = p.seed + 1
-
- return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)
-
- p.sample = sample_extra
-
- p.extra_generation_params["Decode prompt"] = original_prompt
- p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
- p.extra_generation_params["Decode CFG scale"] = cfg
- p.extra_generation_params["Decode steps"] = st
- p.extra_generation_params["Randomness"] = randomness
- p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment
-
- processed = processing.process_images(p)
-
- return processed
-
diff --git a/spaces/jbilcke-hf/VideoQuest/src/components/ui/calendar.tsx b/spaces/jbilcke-hf/VideoQuest/src/components/ui/calendar.tsx
deleted file mode 100644
index 331cba258c3c94f84479b41ddadf6e99403504d8..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/VideoQuest/src/components/ui/calendar.tsx
+++ /dev/null
@@ -1,64 +0,0 @@
-"use client"
-
-import * as React from "react"
-import { ChevronLeft, ChevronRight } from "lucide-react"
-import { DayPicker } from "react-day-picker"
-
-import { cn } from "@/lib/utils"
-import { buttonVariants } from "@/components/ui/button"
-
-export type CalendarProps = React.ComponentProps
-
-function Calendar({
- className,
- classNames,
- showOutsideDays = true,
- ...props
-}: CalendarProps) {
- return (
- ,
- IconRight: ({ ...props }) => ,
- }}
- {...props}
- />
- )
-}
-Calendar.displayName = "Calendar"
-
-export { Calendar }
diff --git a/spaces/jessica198601/jzlqy/app.py b/spaces/jessica198601/jzlqy/app.py
deleted file mode 100644
index ceb538459b5854b81892c9d20bdd88ddcd169b94..0000000000000000000000000000000000000000
--- a/spaces/jessica198601/jzlqy/app.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import openai
-import gradio as gr
-
-openai.api_key =""
-def predict(message, history):
- history_openai_format = []
- for human, assistant in history:
- history_openai_format.append({"role": "user", "content": human})
- history_openai_format.append({"role": "assistant", "content": assistant})
-
- history_openai_format.append({"role": "user", "content": message})
- response = openai.ChatCompletion.create(
- model='gpt-3.5-turbo-0613',
- messages=history_openai_format,
- temperature=1.0,
- stream=True
- )
-
- partial_message = ""
- for chunk in response:
- if len(chunk['choices'][0]['delta']) != 0:
- partial_message = partial_message + chunk['choices'][0]['delta']['content']
- yield partial_message
-
-
-gr.ChatInterface(predict).queue().launch()
\ No newline at end of file
diff --git a/spaces/jiejiejie0420/bingo/src/components/ui/icons.tsx b/spaces/jiejiejie0420/bingo/src/components/ui/icons.tsx
deleted file mode 100644
index 742b489b50437c5b64c86082f2ebc712eeb6a2b0..0000000000000000000000000000000000000000
--- a/spaces/jiejiejie0420/bingo/src/components/ui/icons.tsx
+++ /dev/null
@@ -1,504 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-import { cn } from '@/lib/utils'
-
-function IconNextChat({
- className,
- inverted,
- ...props
-}: React.ComponentProps<'svg'> & { inverted?: boolean }) {
- const id = React.useId()
-
- return (
-
- )
-}
-
-function IconOpenAI({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconGitHub({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconSeparator({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconArrowDown({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconArrowRight({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconUser({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconPlus({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconArrowElbow({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconSpinner({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconMessage({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconTrash({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconMore({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconRefresh({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconStop({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconSidebar({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconMoon({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconSun({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconCopy({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconCheck({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconDownload({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconClose({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconEdit({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconShare({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconUsers({ className, ...props }: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconExternalLink({
- className,
- ...props
-}: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-function IconChevronUpDown({
- className,
- ...props
-}: React.ComponentProps<'svg'>) {
- return (
-
- )
-}
-
-export {
- IconEdit,
- IconNextChat,
- IconOpenAI,
- IconGitHub,
- IconSeparator,
- IconArrowDown,
- IconArrowRight,
- IconUser,
- IconPlus,
- IconArrowElbow,
- IconSpinner,
- IconMessage,
- IconTrash,
- IconMore,
- IconRefresh,
- IconStop,
- IconSidebar,
- IconMoon,
- IconSun,
- IconCopy,
- IconCheck,
- IconDownload,
- IconClose,
- IconShare,
- IconUsers,
- IconExternalLink,
- IconChevronUpDown
-}
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/bin/activate.ps1 b/spaces/joaopereirajp/livvieChatBot/venv/bin/activate.ps1
deleted file mode 100644
index 04c247226219b1a0c1752bba983b75885f9b96f5..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/bin/activate.ps1
+++ /dev/null
@@ -1,60 +0,0 @@
-$script:THIS_PATH = $myinvocation.mycommand.path
-$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent
-
-function global:deactivate([switch] $NonDestructive) {
- if (Test-Path variable:_OLD_VIRTUAL_PATH) {
- $env:PATH = $variable:_OLD_VIRTUAL_PATH
- Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global
- }
-
- if (Test-Path function:_old_virtual_prompt) {
- $function:prompt = $function:_old_virtual_prompt
- Remove-Item function:\_old_virtual_prompt
- }
-
- if ($env:VIRTUAL_ENV) {
- Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue
- }
-
- if (!$NonDestructive) {
- # Self destruct!
- Remove-Item function:deactivate
- Remove-Item function:pydoc
- }
-}
-
-function global:pydoc {
- python -m pydoc $args
-}
-
-# unset irrelevant variables
-deactivate -nondestructive
-
-$VIRTUAL_ENV = $BASE_DIR
-$env:VIRTUAL_ENV = $VIRTUAL_ENV
-
-New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH
-
-$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH
-if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) {
- function global:_old_virtual_prompt {
- ""
- }
- $function:_old_virtual_prompt = $function:prompt
-
- if ("" -ne "") {
- function global:prompt {
- # Add the custom prefix to the existing prompt
- $previous_prompt_value = & $function:_old_virtual_prompt
- ("() " + $previous_prompt_value)
- }
- }
- else {
- function global:prompt {
- # Add a prefix to the current prompt, but don't discard it.
- $previous_prompt_value = & $function:_old_virtual_prompt
- $new_prompt_value = "($( Split-Path $env:VIRTUAL_ENV -Leaf )) "
- ($new_prompt_value + $previous_prompt_value)
- }
- }
-}
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Hash/test_SHA512.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Hash/test_SHA512.py
deleted file mode 100644
index 20961aca993f588a0d8a7b381d92958af8dba159..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Hash/test_SHA512.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# SelfTest/Hash/test_SHA512.py: Self-test for the SHA-512 hash function
-#
-# Written in 2008 by Dwayne C. Litzenberger
-#
-# ===================================================================
-# The contents of this file are dedicated to the public domain. To
-# the extent that dedication to the public domain is not available,
-# everyone is granted a worldwide, perpetual, royalty-free,
-# non-exclusive license to exercise all rights associated with the
-# contents of this file for any purpose whatsoever.
-# No rights are reserved.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-# ===================================================================
-
-"""Self-test suite for Crypto.Hash.SHA512"""
-
-from binascii import hexlify
-
-from Crypto.Hash import SHA512
-from .common import make_hash_tests
-from Crypto.SelfTest.loader import load_test_vectors
-
-# Test vectors from various sources
-# This is a list of (expected_result, input[, description]) tuples.
-test_data_512_other = [
-
- # RFC 4634: Section Page 8.4, "Test 1"
- ('ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f', 'abc'),
-
- # RFC 4634: Section Page 8.4, "Test 2.1"
- ('8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909', 'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu'),
-
- # RFC 4634: Section Page 8.4, "Test 3"
- ('e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973ebde0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b', 'a' * 10**6, "'a' * 10**6"),
-
- # Taken from http://de.wikipedia.org/wiki/Secure_Hash_Algorithm
- ('cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e', ''),
-
- ('af9ed2de700433b803240a552b41b5a472a6ef3fe1431a722b2063c75e9f07451f67a28e37d09cde769424c96aea6f8971389db9e1993d6c565c3c71b855723c', 'Franz jagt im komplett verwahrlosten Taxi quer durch Bayern'),
-]
-
-
-def get_tests_SHA512():
-
- test_vectors = load_test_vectors(("Hash", "SHA2"),
- "SHA512ShortMsg.rsp",
- "KAT SHA-512",
- {"len": lambda x: int(x)}) or []
-
- test_data = test_data_512_other[:]
- for tv in test_vectors:
- try:
- if tv.startswith('['):
- continue
- except AttributeError:
- pass
- if tv.len == 0:
- tv.msg = b""
- test_data.append((hexlify(tv.md), tv.msg, tv.desc))
-
- tests = make_hash_tests(SHA512, "SHA512", test_data,
- digest_size=64,
- oid="2.16.840.1.101.3.4.2.3")
- return tests
-
-
-def get_tests_SHA512_224():
-
- test_vectors = load_test_vectors(("Hash", "SHA2"),
- "SHA512_224ShortMsg.rsp",
- "KAT SHA-512/224",
- {"len": lambda x: int(x)}) or []
-
- test_data = []
- for tv in test_vectors:
- try:
- if tv.startswith('['):
- continue
- except AttributeError:
- pass
- if tv.len == 0:
- tv.msg = b""
- test_data.append((hexlify(tv.md), tv.msg, tv.desc))
-
- tests = make_hash_tests(SHA512, "SHA512/224", test_data,
- digest_size=28,
- oid="2.16.840.1.101.3.4.2.5",
- extra_params={ "truncate" : "224" })
- return tests
-
-
-def get_tests_SHA512_256():
-
- test_vectors = load_test_vectors(("Hash", "SHA2"),
- "SHA512_256ShortMsg.rsp",
- "KAT SHA-512/256",
- {"len": lambda x: int(x)}) or []
-
- test_data = []
- for tv in test_vectors:
- try:
- if tv.startswith('['):
- continue
- except AttributeError:
- pass
- if tv.len == 0:
- tv.msg = b""
- test_data.append((hexlify(tv.md), tv.msg, tv.desc))
-
- tests = make_hash_tests(SHA512, "SHA512/256", test_data,
- digest_size=32,
- oid="2.16.840.1.101.3.4.2.6",
- extra_params={ "truncate" : "256" })
- return tests
-
-
-def get_tests(config={}):
-
- tests = []
- tests += get_tests_SHA512()
- tests += get_tests_SHA512_224()
- tests += get_tests_SHA512_256()
- return tests
-
-if __name__ == '__main__':
- import unittest
- suite = lambda: unittest.TestSuite(get_tests())
- unittest.main(defaultTest='suite')
-
-# vim:set ts=4 sw=4 sts=4 expandtab:
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/varLib/mutator.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/varLib/mutator.py
deleted file mode 100644
index d1d123ab690f5db5b2a6ae05369db233aee3c92d..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/varLib/mutator.py
+++ /dev/null
@@ -1,509 +0,0 @@
-"""
-Instantiate a variation font. Run, eg:
-
-$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
-"""
-from fontTools.misc.fixedTools import floatToFixedToFloat, floatToFixed
-from fontTools.misc.roundTools import otRound
-from fontTools.pens.boundsPen import BoundsPen
-from fontTools.ttLib import TTFont, newTable
-from fontTools.ttLib.tables import ttProgram
-from fontTools.ttLib.tables._g_l_y_f import (
- GlyphCoordinates,
- flagOverlapSimple,
- OVERLAP_COMPOUND,
-)
-from fontTools.varLib.models import (
- supportScalar,
- normalizeLocation,
- piecewiseLinearMap,
-)
-from fontTools.varLib.merger import MutatorMerger
-from fontTools.varLib.varStore import VarStoreInstancer
-from fontTools.varLib.mvar import MVAR_ENTRIES
-from fontTools.varLib.iup import iup_delta
-import fontTools.subset.cff
-import os.path
-import logging
-from io import BytesIO
-
-
-log = logging.getLogger("fontTools.varlib.mutator")
-
-# map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest
-OS2_WIDTH_CLASS_VALUES = {}
-percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
-for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
- half = (prev + curr) / 2
- OS2_WIDTH_CLASS_VALUES[half] = i
-
-
-def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
- pd_blend_lists = (
- "BlueValues",
- "OtherBlues",
- "FamilyBlues",
- "FamilyOtherBlues",
- "StemSnapH",
- "StemSnapV",
- )
- pd_blend_values = ("BlueScale", "BlueShift", "BlueFuzz", "StdHW", "StdVW")
- for fontDict in topDict.FDArray:
- pd = fontDict.Private
- vsindex = pd.vsindex if (hasattr(pd, "vsindex")) else 0
- for key, value in pd.rawDict.items():
- if (key in pd_blend_values) and isinstance(value, list):
- delta = interpolateFromDeltas(vsindex, value[1:])
- pd.rawDict[key] = otRound(value[0] + delta)
- elif (key in pd_blend_lists) and isinstance(value[0], list):
- """If any argument in a BlueValues list is a blend list,
- then they all are. The first value of each list is an
- absolute value. The delta tuples are calculated from
- relative master values, hence we need to append all the
- deltas to date to each successive absolute value."""
- delta = 0
- for i, val_list in enumerate(value):
- delta += otRound(interpolateFromDeltas(vsindex, val_list[1:]))
- value[i] = val_list[0] + delta
-
-
-def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
- charstrings = topDict.CharStrings
- for gname in glyphOrder:
- # Interpolate charstring
- # e.g replace blend op args with regular args,
- # and use and discard vsindex op.
- charstring = charstrings[gname]
- new_program = []
- vsindex = 0
- last_i = 0
- for i, token in enumerate(charstring.program):
- if token == "vsindex":
- vsindex = charstring.program[i - 1]
- if last_i != 0:
- new_program.extend(charstring.program[last_i : i - 1])
- last_i = i + 1
- elif token == "blend":
- num_regions = charstring.getNumRegions(vsindex)
- numMasters = 1 + num_regions
- num_args = charstring.program[i - 1]
- # The program list starting at program[i] is now:
- # ..args for following operations
- # num_args values from the default font
- # num_args tuples, each with numMasters-1 delta values
- # num_blend_args
- # 'blend'
- argi = i - (num_args * numMasters + 1)
- end_args = tuplei = argi + num_args
- while argi < end_args:
- next_ti = tuplei + num_regions
- deltas = charstring.program[tuplei:next_ti]
- delta = interpolateFromDeltas(vsindex, deltas)
- charstring.program[argi] += otRound(delta)
- tuplei = next_ti
- argi += 1
- new_program.extend(charstring.program[last_i:end_args])
- last_i = i + 1
- if last_i != 0:
- new_program.extend(charstring.program[last_i:])
- charstring.program = new_program
-
-
-def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
- """Unlike TrueType glyphs, neither advance width nor bounding box
- info is stored in a CFF2 charstring. The width data exists only in
- the hmtx and HVAR tables. Since LSB data cannot be interpolated
- reliably from the master LSB values in the hmtx table, we traverse
- the charstring to determine the actual bound box."""
-
- charstrings = topDict.CharStrings
- boundsPen = BoundsPen(glyphOrder)
- hmtx = varfont["hmtx"]
- hvar_table = None
- if "HVAR" in varfont:
- hvar_table = varfont["HVAR"].table
- fvar = varfont["fvar"]
- varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
-
- for gid, gname in enumerate(glyphOrder):
- entry = list(hmtx[gname])
- # get width delta.
- if hvar_table:
- if hvar_table.AdvWidthMap:
- width_idx = hvar_table.AdvWidthMap.mapping[gname]
- else:
- width_idx = gid
- width_delta = otRound(varStoreInstancer[width_idx])
- else:
- width_delta = 0
-
- # get LSB.
- boundsPen.init()
- charstring = charstrings[gname]
- charstring.draw(boundsPen)
- if boundsPen.bounds is None:
- # Happens with non-marking glyphs
- lsb_delta = 0
- else:
- lsb = otRound(boundsPen.bounds[0])
- lsb_delta = entry[1] - lsb
-
- if lsb_delta or width_delta:
- if width_delta:
- entry[0] = max(0, entry[0] + width_delta)
- if lsb_delta:
- entry[1] = lsb
- hmtx[gname] = tuple(entry)
-
-
-def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
- """Generate a static instance from a variable TTFont and a dictionary
- defining the desired location along the variable font's axes.
- The location values must be specified as user-space coordinates, e.g.:
-
- {'wght': 400, 'wdth': 100}
-
- By default, a new TTFont object is returned. If ``inplace`` is True, the
- input varfont is modified and reduced to a static font.
-
- When the overlap parameter is defined as True,
- OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
- https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
- """
- if not inplace:
- # make a copy to leave input varfont unmodified
- stream = BytesIO()
- varfont.save(stream)
- stream.seek(0)
- varfont = TTFont(stream)
-
- fvar = varfont["fvar"]
- axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes}
- loc = normalizeLocation(location, axes)
- if "avar" in varfont:
- maps = varfont["avar"].segments
- loc = {k: piecewiseLinearMap(v, maps[k]) for k, v in loc.items()}
- # Quantize to F2Dot14, to avoid surprise interpolations.
- loc = {k: floatToFixedToFloat(v, 14) for k, v in loc.items()}
- # Location is normalized now
- log.info("Normalized location: %s", loc)
-
- if "gvar" in varfont:
- log.info("Mutating glyf/gvar tables")
- gvar = varfont["gvar"]
- glyf = varfont["glyf"]
- hMetrics = varfont["hmtx"].metrics
- vMetrics = getattr(varfont.get("vmtx"), "metrics", None)
- # get list of glyph names in gvar sorted by component depth
- glyphnames = sorted(
- gvar.variations.keys(),
- key=lambda name: (
- glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
- if glyf[name].isComposite() or glyf[name].isVarComposite()
- else 0,
- name,
- ),
- )
- for glyphname in glyphnames:
- variations = gvar.variations[glyphname]
- coordinates, _ = glyf._getCoordinatesAndControls(
- glyphname, hMetrics, vMetrics
- )
- origCoords, endPts = None, None
- for var in variations:
- scalar = supportScalar(loc, var.axes)
- if not scalar:
- continue
- delta = var.coordinates
- if None in delta:
- if origCoords is None:
- origCoords, g = glyf._getCoordinatesAndControls(
- glyphname, hMetrics, vMetrics
- )
- delta = iup_delta(delta, origCoords, g.endPts)
- coordinates += GlyphCoordinates(delta) * scalar
- glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
- else:
- glyf = None
-
- if "DSIG" in varfont:
- del varfont["DSIG"]
-
- if "cvar" in varfont:
- log.info("Mutating cvt/cvar tables")
- cvar = varfont["cvar"]
- cvt = varfont["cvt "]
- deltas = {}
- for var in cvar.variations:
- scalar = supportScalar(loc, var.axes)
- if not scalar:
- continue
- for i, c in enumerate(var.coordinates):
- if c is not None:
- deltas[i] = deltas.get(i, 0) + scalar * c
- for i, delta in deltas.items():
- cvt[i] += otRound(delta)
-
- if "CFF2" in varfont:
- log.info("Mutating CFF2 table")
- glyphOrder = varfont.getGlyphOrder()
- CFF2 = varfont["CFF2"]
- topDict = CFF2.cff.topDictIndex[0]
- vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
- interpolateFromDeltas = vsInstancer.interpolateFromDeltas
- interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
- CFF2.desubroutinize()
- interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
- interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
- del topDict.rawDict["VarStore"]
- del topDict.VarStore
-
- if "MVAR" in varfont:
- log.info("Mutating MVAR table")
- mvar = varfont["MVAR"].table
- varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
- records = mvar.ValueRecord
- for rec in records:
- mvarTag = rec.ValueTag
- if mvarTag not in MVAR_ENTRIES:
- continue
- tableTag, itemName = MVAR_ENTRIES[mvarTag]
- delta = otRound(varStoreInstancer[rec.VarIdx])
- if not delta:
- continue
- setattr(
- varfont[tableTag],
- itemName,
- getattr(varfont[tableTag], itemName) + delta,
- )
-
- log.info("Mutating FeatureVariations")
- for tableTag in "GSUB", "GPOS":
- if not tableTag in varfont:
- continue
- table = varfont[tableTag].table
- if not getattr(table, "FeatureVariations", None):
- continue
- variations = table.FeatureVariations
- for record in variations.FeatureVariationRecord:
- applies = True
- for condition in record.ConditionSet.ConditionTable:
- if condition.Format == 1:
- axisIdx = condition.AxisIndex
- axisTag = fvar.axes[axisIdx].axisTag
- Min = condition.FilterRangeMinValue
- Max = condition.FilterRangeMaxValue
- v = loc[axisTag]
- if not (Min <= v <= Max):
- applies = False
- else:
- applies = False
- if not applies:
- break
-
- if applies:
- assert record.FeatureTableSubstitution.Version == 0x00010000
- for rec in record.FeatureTableSubstitution.SubstitutionRecord:
- table.FeatureList.FeatureRecord[
- rec.FeatureIndex
- ].Feature = rec.Feature
- break
- del table.FeatureVariations
-
- if "GDEF" in varfont and varfont["GDEF"].table.Version >= 0x00010003:
- log.info("Mutating GDEF/GPOS/GSUB tables")
- gdef = varfont["GDEF"].table
- instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
-
- merger = MutatorMerger(varfont, instancer)
- merger.mergeTables(varfont, [varfont], ["GDEF", "GPOS"])
-
- # Downgrade GDEF.
- del gdef.VarStore
- gdef.Version = 0x00010002
- if gdef.MarkGlyphSetsDef is None:
- del gdef.MarkGlyphSetsDef
- gdef.Version = 0x00010000
-
- if not (
- gdef.LigCaretList
- or gdef.MarkAttachClassDef
- or gdef.GlyphClassDef
- or gdef.AttachList
- or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)
- ):
- del varfont["GDEF"]
-
- addidef = False
- if glyf:
- for glyph in glyf.glyphs.values():
- if hasattr(glyph, "program"):
- instructions = glyph.program.getAssembly()
- # If GETVARIATION opcode is used in bytecode of any glyph add IDEF
- addidef = any(op.startswith("GETVARIATION") for op in instructions)
- if addidef:
- break
- if overlap:
- for glyph_name in glyf.keys():
- glyph = glyf[glyph_name]
- # Set OVERLAP_COMPOUND bit for compound glyphs
- if glyph.isComposite():
- glyph.components[0].flags |= OVERLAP_COMPOUND
- # Set OVERLAP_SIMPLE bit for simple glyphs
- elif glyph.numberOfContours > 0:
- glyph.flags[0] |= flagOverlapSimple
- if addidef:
- log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
- asm = []
- if "fpgm" in varfont:
- fpgm = varfont["fpgm"]
- asm = fpgm.program.getAssembly()
- else:
- fpgm = newTable("fpgm")
- fpgm.program = ttProgram.Program()
- varfont["fpgm"] = fpgm
- asm.append("PUSHB[000] 145")
- asm.append("IDEF[ ]")
- args = [str(len(loc))]
- for a in fvar.axes:
- args.append(str(floatToFixed(loc[a.axisTag], 14)))
- asm.append("NPUSHW[ ] " + " ".join(args))
- asm.append("ENDF[ ]")
- fpgm.program.fromAssembly(asm)
-
- # Change maxp attributes as IDEF is added
- if "maxp" in varfont:
- maxp = varfont["maxp"]
- setattr(
- maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0)
- )
- setattr(
- maxp,
- "maxStackElements",
- max(len(loc), getattr(maxp, "maxStackElements", 0)),
- )
-
- if "name" in varfont:
- log.info("Pruning name table")
- exclude = {a.axisNameID for a in fvar.axes}
- for i in fvar.instances:
- exclude.add(i.subfamilyNameID)
- exclude.add(i.postscriptNameID)
- if "ltag" in varfont:
- # Drop the whole 'ltag' table if all its language tags are referenced by
- # name records to be pruned.
- # TODO: prune unused ltag tags and re-enumerate langIDs accordingly
- excludedUnicodeLangIDs = [
- n.langID
- for n in varfont["name"].names
- if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
- ]
- if set(excludedUnicodeLangIDs) == set(range(len((varfont["ltag"].tags)))):
- del varfont["ltag"]
- varfont["name"].names[:] = [
- n for n in varfont["name"].names if n.nameID not in exclude
- ]
-
- if "wght" in location and "OS/2" in varfont:
- varfont["OS/2"].usWeightClass = otRound(max(1, min(location["wght"], 1000)))
- if "wdth" in location:
- wdth = location["wdth"]
- for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
- if wdth < percent:
- varfont["OS/2"].usWidthClass = widthClass
- break
- else:
- varfont["OS/2"].usWidthClass = 9
- if "slnt" in location and "post" in varfont:
- varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
-
- log.info("Removing variable tables")
- for tag in ("avar", "cvar", "fvar", "gvar", "HVAR", "MVAR", "VVAR", "STAT"):
- if tag in varfont:
- del varfont[tag]
-
- return varfont
-
-
-def main(args=None):
- """Instantiate a variation font"""
- from fontTools import configLogger
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools varLib.mutator", description="Instantiate a variable font"
- )
- parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.")
- parser.add_argument(
- "locargs",
- metavar="AXIS=LOC",
- nargs="*",
- help="List of space separated locations. A location consist in "
- "the name of a variation axis, followed by '=' and a number. E.g.: "
- " wght=700 wdth=80. The default is the location of the base master.",
- )
- parser.add_argument(
- "-o",
- "--output",
- metavar="OUTPUT.ttf",
- default=None,
- help="Output instance TTF file (default: INPUT-instance.ttf).",
- )
- parser.add_argument(
- "--no-recalc-timestamp",
- dest="recalc_timestamp",
- action="store_false",
- help="Don't set the output font's timestamp to the current time.",
- )
- logging_group = parser.add_mutually_exclusive_group(required=False)
- logging_group.add_argument(
- "-v", "--verbose", action="store_true", help="Run more verbosely."
- )
- logging_group.add_argument(
- "-q", "--quiet", action="store_true", help="Turn verbosity off."
- )
- parser.add_argument(
- "--no-overlap",
- dest="overlap",
- action="store_false",
- help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags.",
- )
- options = parser.parse_args(args)
-
- varfilename = options.input
- outfile = (
- os.path.splitext(varfilename)[0] + "-instance.ttf"
- if not options.output
- else options.output
- )
- configLogger(
- level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
- )
-
- loc = {}
- for arg in options.locargs:
- try:
- tag, val = arg.split("=")
- assert len(tag) <= 4
- loc[tag.ljust(4)] = float(val)
- except (ValueError, AssertionError):
- parser.error("invalid location argument format: %r" % arg)
- log.info("Location: %s", loc)
-
- log.info("Loading variable font")
- varfont = TTFont(varfilename, recalcTimestamp=options.recalc_timestamp)
-
- instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap)
-
- log.info("Saving instance font %s", outfile)
- varfont.save(outfile)
-
-
-if __name__ == "__main__":
- import sys
-
- if len(sys.argv) > 1:
- sys.exit(main())
- import doctest
-
- sys.exit(doctest.testmod().failed)
diff --git a/spaces/josedolot/HybridNet_Demo2/encoders/__init__.py b/spaces/josedolot/HybridNet_Demo2/encoders/__init__.py
deleted file mode 100644
index 55af5df80660a22c83a524f4e01e9ad7a8deb642..0000000000000000000000000000000000000000
--- a/spaces/josedolot/HybridNet_Demo2/encoders/__init__.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import functools
-import torch.utils.model_zoo as model_zoo
-
-from .resnet import resnet_encoders
-from .dpn import dpn_encoders
-from .vgg import vgg_encoders
-from .senet import senet_encoders
-from .densenet import densenet_encoders
-from .inceptionresnetv2 import inceptionresnetv2_encoders
-from .inceptionv4 import inceptionv4_encoders
-from .efficientnet import efficient_net_encoders
-from .mobilenet import mobilenet_encoders
-from .xception import xception_encoders
-from .timm_efficientnet import timm_efficientnet_encoders
-from .timm_resnest import timm_resnest_encoders
-from .timm_res2net import timm_res2net_encoders
-from .timm_regnet import timm_regnet_encoders
-from .timm_sknet import timm_sknet_encoders
-from .timm_mobilenetv3 import timm_mobilenetv3_encoders
-from .timm_gernet import timm_gernet_encoders
-
-from .timm_universal import TimmUniversalEncoder
-
-from ._preprocessing import preprocess_input
-
-encoders = {}
-encoders.update(resnet_encoders)
-encoders.update(dpn_encoders)
-encoders.update(vgg_encoders)
-encoders.update(senet_encoders)
-encoders.update(densenet_encoders)
-encoders.update(inceptionresnetv2_encoders)
-encoders.update(inceptionv4_encoders)
-encoders.update(efficient_net_encoders)
-encoders.update(mobilenet_encoders)
-encoders.update(xception_encoders)
-encoders.update(timm_efficientnet_encoders)
-encoders.update(timm_resnest_encoders)
-encoders.update(timm_res2net_encoders)
-encoders.update(timm_regnet_encoders)
-encoders.update(timm_sknet_encoders)
-encoders.update(timm_mobilenetv3_encoders)
-encoders.update(timm_gernet_encoders)
-
-
-def get_encoder(name, in_channels=3, depth=5, weights=None, output_stride=32, **kwargs):
-
- if name.startswith("tu-"):
- name = name[3:]
- encoder = TimmUniversalEncoder(
- name=name,
- in_channels=in_channels,
- depth=depth,
- output_stride=output_stride,
- pretrained=weights is not None,
- **kwargs
- )
- return encoder
-
- try:
- Encoder = encoders[name]["encoder"]
- except KeyError:
- raise KeyError("Wrong encoder name `{}`, supported encoders: {}".format(name, list(encoders.keys())))
-
- params = encoders[name]["params"]
- params.update(depth=depth)
- encoder = Encoder(**params)
-
- if weights is not None:
- try:
- settings = encoders[name]["pretrained_settings"][weights]
- except KeyError:
- raise KeyError("Wrong pretrained weights `{}` for encoder `{}`. Available options are: {}".format(
- weights, name, list(encoders[name]["pretrained_settings"].keys()),
- ))
- encoder.load_state_dict(model_zoo.load_url(settings["url"]))
-
- encoder.set_in_channels(in_channels, pretrained=weights is not None)
- if output_stride != 32:
- encoder.make_dilated(output_stride)
-
- return encoder
-
-
-def get_encoder_names():
- return list(encoders.keys())
-
-
-def get_preprocessing_params(encoder_name, pretrained="imagenet"):
- settings = encoders[encoder_name]["pretrained_settings"]
-
- if pretrained not in settings.keys():
- raise ValueError("Available pretrained options {}".format(settings.keys()))
-
- formatted_settings = {}
- formatted_settings["input_space"] = settings[pretrained].get("input_space")
- formatted_settings["input_range"] = settings[pretrained].get("input_range")
- formatted_settings["mean"] = settings[pretrained].get("mean")
- formatted_settings["std"] = settings[pretrained].get("std")
- return formatted_settings
-
-
-def get_preprocessing_fn(encoder_name, pretrained="imagenet"):
- params = get_preprocessing_params(encoder_name, pretrained=pretrained)
- return functools.partial(preprocess_input, **params)
diff --git a/spaces/justin-zk/Personalize-SAM/per_segment_anything/modeling/prompt_encoder.py b/spaces/justin-zk/Personalize-SAM/per_segment_anything/modeling/prompt_encoder.py
deleted file mode 100644
index c3143f4f8e02ddd7ca8587b40ff5d47c3a6b7ef3..0000000000000000000000000000000000000000
--- a/spaces/justin-zk/Personalize-SAM/per_segment_anything/modeling/prompt_encoder.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-from torch import nn
-
-from typing import Any, Optional, Tuple, Type
-
-from .common import LayerNorm2d
-
-
-class PromptEncoder(nn.Module):
- def __init__(
- self,
- embed_dim: int,
- image_embedding_size: Tuple[int, int],
- input_image_size: Tuple[int, int],
- mask_in_chans: int,
- activation: Type[nn.Module] = nn.GELU,
- ) -> None:
- """
- Encodes prompts for input to SAM's mask decoder.
-
- Arguments:
- embed_dim (int): The prompts' embedding dimension
- image_embedding_size (tuple(int, int)): The spatial size of the
- image embedding, as (H, W).
- input_image_size (int): The padded size of the image as input
- to the image encoder, as (H, W).
- mask_in_chans (int): The number of hidden channels used for
- encoding input masks.
- activation (nn.Module): The activation to use when encoding
- input masks.
- """
- super().__init__()
- self.embed_dim = embed_dim
- self.input_image_size = input_image_size
- self.image_embedding_size = image_embedding_size
- self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
-
- self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
- point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
- self.point_embeddings = nn.ModuleList(point_embeddings)
- self.not_a_point_embed = nn.Embedding(1, embed_dim)
-
- self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
- self.mask_downscaling = nn.Sequential(
- nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
- LayerNorm2d(mask_in_chans // 4),
- activation(),
- nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
- LayerNorm2d(mask_in_chans),
- activation(),
- nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
- )
- self.no_mask_embed = nn.Embedding(1, embed_dim)
-
- def get_dense_pe(self) -> torch.Tensor:
- """
- Returns the positional encoding used to encode point prompts,
- applied to a dense set of points the shape of the image encoding.
-
- Returns:
- torch.Tensor: Positional encoding with shape
- 1x(embed_dim)x(embedding_h)x(embedding_w)
- """
- return self.pe_layer(self.image_embedding_size).unsqueeze(0)
-
- def _embed_points(
- self,
- points: torch.Tensor,
- labels: torch.Tensor,
- pad: bool,
- ) -> torch.Tensor:
- """Embeds point prompts."""
- points = points + 0.5 # Shift to center of pixel
- if pad:
- padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
- padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
- points = torch.cat([points, padding_point], dim=1)
- labels = torch.cat([labels, padding_label], dim=1)
- point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
- point_embedding[labels == -1] = 0.0
- point_embedding[labels == -1] += self.not_a_point_embed.weight
- point_embedding[labels == 0] += self.point_embeddings[0].weight
- point_embedding[labels == 1] += self.point_embeddings[1].weight
- return point_embedding
-
- def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
- """Embeds box prompts."""
- boxes = boxes + 0.5 # Shift to center of pixel
- coords = boxes.reshape(-1, 2, 2)
- corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
- corner_embedding[:, 0, :] += self.point_embeddings[2].weight
- corner_embedding[:, 1, :] += self.point_embeddings[3].weight
- return corner_embedding
-
- def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
- """Embeds mask inputs."""
- mask_embedding = self.mask_downscaling(masks)
- return mask_embedding
-
- def _get_batch_size(
- self,
- points: Optional[Tuple[torch.Tensor, torch.Tensor]],
- boxes: Optional[torch.Tensor],
- masks: Optional[torch.Tensor],
- ) -> int:
- """
- Gets the batch size of the output given the batch size of the input prompts.
- """
- if points is not None:
- return points[0].shape[0]
- elif boxes is not None:
- return boxes.shape[0]
- elif masks is not None:
- return masks.shape[0]
- else:
- return 1
-
- def _get_device(self) -> torch.device:
- return self.point_embeddings[0].weight.device
-
- def forward(
- self,
- points: Optional[Tuple[torch.Tensor, torch.Tensor]],
- boxes: Optional[torch.Tensor],
- masks: Optional[torch.Tensor],
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Embeds different types of prompts, returning both sparse and dense
- embeddings.
-
- Arguments:
- points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
- and labels to embed.
- boxes (torch.Tensor or none): boxes to embed
- masks (torch.Tensor or none): masks to embed
-
- Returns:
- torch.Tensor: sparse embeddings for the points and boxes, with shape
- BxNx(embed_dim), where N is determined by the number of input points
- and boxes.
- torch.Tensor: dense embeddings for the masks, in the shape
- Bx(embed_dim)x(embed_H)x(embed_W)
- """
- bs = self._get_batch_size(points, boxes, masks)
- sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
- if points is not None:
- coords, labels = points
- point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
- sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
- if boxes is not None:
- box_embeddings = self._embed_boxes(boxes)
- sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
-
- if masks is not None:
- dense_embeddings = self._embed_masks(masks)
- else:
- dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
- bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
- )
-
- return sparse_embeddings, dense_embeddings
-
-
-class PositionEmbeddingRandom(nn.Module):
- """
- Positional encoding using random spatial frequencies.
- """
-
- def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
- super().__init__()
- if scale is None or scale <= 0.0:
- scale = 1.0
- self.register_buffer(
- "positional_encoding_gaussian_matrix",
- scale * torch.randn((2, num_pos_feats)),
- )
-
- def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
- """Positionally encode points that are normalized to [0,1]."""
- # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
- coords = 2 * coords - 1
- coords = coords @ self.positional_encoding_gaussian_matrix
- coords = 2 * np.pi * coords
- # outputs d_1 x ... x d_n x C shape
- return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
-
- def forward(self, size: Tuple[int, int]) -> torch.Tensor:
- """Generate positional encoding for a grid of the specified size."""
- h, w = size
- device: Any = self.positional_encoding_gaussian_matrix.device
- grid = torch.ones((h, w), device=device, dtype=torch.float32)
- y_embed = grid.cumsum(dim=0) - 0.5
- x_embed = grid.cumsum(dim=1) - 0.5
- y_embed = y_embed / h
- x_embed = x_embed / w
-
- pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
- return pe.permute(2, 0, 1) # C x H x W
-
- def forward_with_coords(
- self, coords_input: torch.Tensor, image_size: Tuple[int, int]
- ) -> torch.Tensor:
- """Positionally encode points that are not normalized to [0,1]."""
- coords = coords_input.clone()
- coords[:, :, 0] = coords[:, :, 0] / image_size[1]
- coords[:, :, 1] = coords[:, :, 1] / image_size[0]
- return self._pe_encoding(coords.to(torch.float)) # B x N x C
diff --git a/spaces/k1ngtai/MMS/uroman/lib/NLP/Romanizer.pm b/spaces/k1ngtai/MMS/uroman/lib/NLP/Romanizer.pm
deleted file mode 100644
index b504ec6eefcf1b6b28e216c9fe3d69d2735b2b25..0000000000000000000000000000000000000000
--- a/spaces/k1ngtai/MMS/uroman/lib/NLP/Romanizer.pm
+++ /dev/null
@@ -1,2020 +0,0 @@
-################################################################
-# #
-# Romanizer #
-# #
-################################################################
-
-package NLP::Romanizer;
-
-use NLP::Chinese;
-use NLP::UTF8;
-use NLP::utilities;
-use JSON;
-$utf8 = NLP::UTF8;
-$util = NLP::utilities;
-$chinesePM = NLP::Chinese;
-
-my $verbosePM = 0;
-%empty_ht = ();
-
-my $braille_capital_letter_indicator = "\xE2\xA0\xA0";
-my $braille_number_indicator = "\xE2\xA0\xBC";
-my $braille_decimal_point = "\xE2\xA0\xA8";
-my $braille_comma = "\xE2\xA0\x82";
-my $braille_solidus = "\xE2\xA0\x8C";
-my $braille_numeric_space = "\xE2\xA0\x90";
-my $braille_letter_indicator = "\xE2\xA0\xB0";
-my $braille_period = "\xE2\xA0\xB2";
-
-sub new {
- local($caller) = @_;
-
- my $object = {};
- my $class = ref( $caller ) || $caller;
- bless($object, $class);
- return $object;
-}
-
-sub load_unicode_data {
- local($this, *ht, $filename) = @_;
- # ../../data/UnicodeData.txt
-
- $n = 0;
- if (open(IN, $filename)) {
- while () {
- if (($unicode_value, $char_name, $general_category, $canon_comb_classes, $bidir_category, $char_decomp_mapping, $decimal_digit_value, $digit_value, $numeric_value, $mirrored, $unicode_1_0_name, $comment_field, $uc_mapping, $lc_mapping, $title_case_mapping) = split(";", $_)) {
- $utf8_code = $utf8->unicode_hex_string2string($unicode_value);
- $ht{UTF_TO_CHAR_NAME}->{$utf8_code} = $char_name;
- $ht{UTF_NAME_TO_UNICODE}->{$char_name} = $unicode_value;
- $ht{UTF_NAME_TO_CODE}->{$char_name} = $utf8_code;
- $ht{UTF_TO_CAT}->{$utf8_code} = $general_category;
- $ht{UTF_TO_NUMERIC}->{$utf8_code} = $numeric_value unless $numeric_value eq "";
- $n++;
- }
- }
- close(IN);
- # print STDERR "Loaded $n entries from $filename\n";
- } else {
- print STDERR "Can't open $filename\n";
- }
-}
-
-sub load_unicode_overwrite_romanization {
- local($this, *ht, $filename) = @_;
- # ../../data/UnicodeDataOverwrite.txt
-
- $n = 0;
- if (open(IN, $filename)) {
- while () {
- next if /^#/;
- $unicode_value = $util->slot_value_in_double_colon_del_list($_, "u");
- $romanization = $util->slot_value_in_double_colon_del_list($_, "r");
- $numeric = $util->slot_value_in_double_colon_del_list($_, "num");
- $picture = $util->slot_value_in_double_colon_del_list($_, "pic");
- $syllable_info = $util->slot_value_in_double_colon_del_list($_, "syllable-info");
- $tone_mark = $util->slot_value_in_double_colon_del_list($_, "tone-mark");
- $char_name = $util->slot_value_in_double_colon_del_list($_, "name");
- $entry_processed_p = 0;
- $utf8_code = $utf8->unicode_hex_string2string($unicode_value);
- if ($unicode_value) {
- $ht{UTF_TO_CHAR_ROMANIZATION}->{$utf8_code} = $romanization if $romanization;
- $ht{UTF_TO_NUMERIC}->{$utf8_code} = $numeric if defined($numeric) && ($numeric ne "");
- $ht{UTF_TO_PICTURE_DESCR}->{$utf8_code} = $picture if $picture;
- $ht{UTF_TO_SYLLABLE_INFO}->{$utf8_code} = $syllable_info if $syllable_info;
- $ht{UTF_TO_TONE_MARK}->{$utf8_code} = $tone_mark if $tone_mark;
- $ht{UTF_TO_CHAR_NAME}->{$utf8_code} = $char_name if $char_name;
- $entry_processed_p = 1 if $romanization || $numeric || $picture || $syllable_info || $tone_mark;
- }
- $n++ if $entry_processed_p;
- }
- close(IN);
- } else {
- print STDERR "Can't open $filename\n";
- }
-}
-
-sub load_script_data {
- local($this, *ht, $filename) = @_;
- # ../../data/Scripts.txt
-
- $n = 0;
- if (open(IN, $filename)) {
- while () {
- next unless $script_name = $util->slot_value_in_double_colon_del_list($_, "script-name");
- $abugida_default_vowel_s = $util->slot_value_in_double_colon_del_list($_, "abugida-default-vowel");
- $alt_script_name_s = $util->slot_value_in_double_colon_del_list($_, "alt-script-name");
- $language_s = $util->slot_value_in_double_colon_del_list($_, "language");
- $direction = $util->slot_value_in_double_colon_del_list($_, "direction"); # right-to-left
- $font_family_s = $util->slot_value_in_double_colon_del_list($_, "font-family");
- $ht{SCRIPT_P}->{$script_name} = 1;
- $ht{SCRIPT_NORM}->{(uc $script_name)} = $script_name;
- $ht{DIRECTION}->{$script_name} = $direction if $direction;
- foreach $language (split(/,\s*/, $language_s)) {
- $ht{SCRIPT_LANGUAGE}->{$script_name}->{$language} = 1;
- $ht{LANGUAGE_SCRIPT}->{$language}->{$script_name} = 1;
- }
- foreach $alt_script_name (split(/,\s*/, $alt_script_name_s)) {
- $ht{SCRIPT_NORM}->{$alt_script_name} = $script_name;
- $ht{SCRIPT_NORM}->{(uc $alt_script_name)} = $script_name;
- }
- foreach $abugida_default_vowel (split(/,\s*/, $abugida_default_vowel_s)) {
- $ht{SCRIPT_ABUDIGA_DEFAULT_VOWEL}->{$script_name}->{$abugida_default_vowel} = 1 if $abugida_default_vowel;
- }
- foreach $font_family (split(/,\s*/, $font_family_s)) {
- $ht{SCRIPT_FONT}->{$script_name}->{$font_family} = 1 if $font_family;
- }
- $n++;
- }
- close(IN);
- # print STDERR "Loaded $n entries from $filename\n";
- } else {
- print STDERR "Can't open $filename\n";
- }
-}
-
-sub unicode_hangul_romanization {
- local($this, $s, $pass_through_p) = @_;
-
- $pass_through_p = 0 unless defined($pass_through_p);
- @leads = split(/\s+/, "g gg n d dd r m b bb s ss - j jj c k t p h");
- # @vowels = split(/\s+/, "a ae ya yai e ei ye yei o oa oai oi yo u ue uei ui yu w wi i");
- @vowels = split(/\s+/, "a ae ya yae eo e yeo ye o wa wai oe yo u weo we wi yu eu yi i");
- @tails = split(/\s+/, "- g gg gs n nj nh d l lg lm lb ls lt lp lh m b bs s ss ng j c k t p h");
- $result = "";
- @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht);
- foreach $char (@chars) {
- $unicode = $utf8->utf8_to_unicode($char);
- if (($unicode >= 0xAC00) && ($unicode <= 0xD7A3)) {
- $code = $unicode - 0xAC00;
- $lead_index = int($code / (28*21));
- $vowel_index = int($code/28) % 21;
- $tail_index = $code % 28;
- $rom = $leads[$lead_index] . $vowels[$vowel_index] . $tails[$tail_index];
- $rom =~ s/-//g;
- $result .= $rom;
- } elsif ($pass_through_p) {
- $result .= $char;
- }
- }
- return $result;
-}
-
-sub listify_comma_sep_string {
- local($this, $s) = @_;
-
- @result_list = ();
- return @result_list unless $s =~ /\S/;
- $s = $util->trim2($s);
- my $elem;
-
- while (($elem, $rest) = ($s =~ /^("(?:\\"|[^"])*"|'(?:\\'|[^'])*'|[^"', ]+),\s*(.*)$/)) {
- push(@result_list, $util->dequote_string($elem));
- $s = $rest;
- }
- push(@result_list, $util->dequote_string($s)) if $s =~ /\S/;
-
- return @result_list;
-}
-
-sub braille_string_p {
- local($this, $s) = @_;
-
- return ($s =~ /^(\xE2[\xA0-\xA3][\x80-\xBF])+$/);
-}
-
-sub register_word_boundary_info {
- local($this, *ht, $lang_code, $utf8_source_string, $utf8_target_string, $use_only_for_whole_word_p,
- $use_only_at_start_of_word_p, $use_only_at_end_of_word_p,
- $dont_use_at_start_of_word_p, $dont_use_at_end_of_word_p) = @_;
-
- if ($use_only_for_whole_word_p) {
- if ($lang_code) {
- $ht{USE_ONLY_FOR_WHOLE_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = 1;
- } else {
- $ht{USE_ONLY_FOR_WHOLE_WORD}->{$utf8_source_string}->{$utf8_target_string} = 1;
- }
- }
- if ($use_only_at_start_of_word_p) {
- if ($lang_code) {
- $ht{USE_ONLY_AT_START_OF_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = 1;
- } else {
- $ht{USE_ONLY_AT_START_OF_WORD}->{$utf8_source_string}->{$utf8_target_string} = 1;
- }
- }
- if ($use_only_at_end_of_word_p) {
- if ($lang_code) {
- $ht{USE_ONLY_AT_END_OF_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = 1;
- } else {
- $ht{USE_ONLY_AT_END_OF_WORD}->{$utf8_source_string}->{$utf8_target_string} = 1;
- }
- }
- if ($dont_use_at_start_of_word_p) {
- if ($lang_code) {
- $ht{DONT_USE_AT_START_OF_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = 1;
- } else {
- $ht{DONT_USE_AT_START_OF_WORD}->{$utf8_source_string}->{$utf8_target_string} = 1;
- }
- }
- if ($dont_use_at_end_of_word_p) {
- if ($lang_code) {
- $ht{DONT_USE_AT_END_OF_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = 1;
- } else {
- $ht{DONT_USE_AT_END_OF_WORD}->{$utf8_source_string}->{$utf8_target_string} = 1;
- }
- }
-}
-
-sub load_romanization_table {
- local($this, *ht, $filename) = @_;
- # ../../data/romanization-table.txt
-
- $n = 0;
- $line_number = 0;
- if (open(IN, $filename)) {
- while () {
- $line_number++;
- next if /^#/;
- if ($_ =~ /^::preserve\s/) {
- $from_unicode = $util->slot_value_in_double_colon_del_list($_, "from");
- $to_unicode = $util->slot_value_in_double_colon_del_list($_, "to");
- if ($from_unicode =~ /^(?:U\+|\\u)[0-9A-F]{4,}$/i) {
- $from_unicode =~ s/^(?:U\+|\\u)//;
- $from_code_point = hex($from_unicode);
- } else {
- $from_code_point = "";
- }
- if ($to_unicode =~ /^(?:U\+|\\u)[0-9A-F]{4,}$/i) {
- $to_unicode =~ s/^(?:U\+|\\u)//;
- $to_code_point = hex($to_unicode);
- } else {
- $to_code_point = $from_code_point;
- }
- if ($from_code_point ne "") {
- # print STDERR "Preserve code-points $from_unicode--$to_unicode = $from_code_point--$to_code_point\n";
- foreach $code_point (($from_code_point .. $to_code_point)) {
- $utf8_string = $utf8->unicode2string($code_point);
- $ht{UTF_CHAR_MAPPING}->{$utf8_string}->{$utf8_string} = 1;
- }
- $n++;
- }
- next;
- }
- $utf8_source_string = $util->slot_value_in_double_colon_del_list($_, "s");
- $utf8_target_string = $util->slot_value_in_double_colon_del_list($_, "t");
- $utf8_alt_target_string_s = $util->slot_value_in_double_colon_del_list($_, "t-alt");
- $use_alt_in_pointed_p = ($_ =~ /::use-alt-in-pointed\b/);
- $use_only_for_whole_word_p = ($_ =~ /::use-only-for-whole-word\b/);
- $use_only_at_start_of_word_p = ($_ =~ /::use-only-at-start-of-word\b/);
- $use_only_at_end_of_word_p = ($_ =~ /::use-only-at-end-of-word\b/);
- $dont_use_at_start_of_word_p = ($_ =~ /::dont-use-at-start-of-word\b/);
- $dont_use_at_end_of_word_p = ($_ =~ /::dont-use-at-end-of-word\b/);
- $use_only_in_lower_case_enviroment_p = ($_ =~ /::use-only-in-lower-case-enviroment\b/);
- $word_external_punctuation_p = ($_ =~ /::word-external-punctuation\b/);
- $utf8_source_string =~ s/\s*$//;
- $utf8_target_string =~ s/\s*$//;
- $utf8_alt_target_string_s =~ s/\s*$//;
- $utf8_target_string =~ s/^"(.*)"$/$1/;
- $utf8_target_string =~ s/^'(.*)'$/$1/;
- @utf8_alt_targets = $this->listify_comma_sep_string($utf8_alt_target_string_s);
- $numeric = $util->slot_value_in_double_colon_del_list($_, "num");
- $numeric =~ s/\s*$//;
- $annotation = $util->slot_value_in_double_colon_del_list($_, "annotation");
- $annotation =~ s/\s*$//;
- $lang_code = $util->slot_value_in_double_colon_del_list($_, "lcode");
- $prob = $util->slot_value_in_double_colon_del_list($_, "p") || 1;
- unless (($utf8_target_string eq "") && ($numeric =~ /\d/)) {
- if ($lang_code) {
- $ht{UTF_CHAR_MAPPING_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = $prob;
- } else {
- $ht{UTF_CHAR_MAPPING}->{$utf8_source_string}->{$utf8_target_string} = $prob;
- }
- if ($word_external_punctuation_p) {
- if ($lang_code) {
- $ht{WORD_EXTERNAL_PUNCTUATION_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = $prob;
- } else {
- $ht{WORD_EXTERNAL_PUNCTUATION}->{$utf8_source_string}->{$utf8_target_string} = $prob;
- }
- }
- if ($this->braille_string_p($utf8_source_string)) {
- if (($utf8_target_string =~ /^[a-z]+$/)
- && (! ($utf8_source_string =~ /^$braille_capital_letter_indicator/))) {
- my $uc_utf8_source_string = "$braille_capital_letter_indicator$utf8_source_string";
- my $uc_utf8_target_string = ucfirst $utf8_target_string;
- if ($lang_code) {
- $ht{UTF_CHAR_MAPPING_LANG_SPEC}->{$lang_code}->{$uc_utf8_source_string}->{$uc_utf8_target_string} = $prob;
- } else {
- $ht{UTF_CHAR_MAPPING}->{$uc_utf8_source_string}->{$uc_utf8_target_string} = $prob;
- }
- $this->register_word_boundary_info(*ht, $lang_code, $uc_utf8_source_string, $uc_utf8_target_string,
- $use_only_for_whole_word_p, $use_only_at_start_of_word_p, $use_only_at_end_of_word_p,
- $dont_use_at_start_of_word_p, $dont_use_at_end_of_word_p);
- }
- if (($utf8_target_string =~ /^[0-9]$/)
- && ($utf8_source_string =~ /^$braille_number_indicator./)) {
- my $core_number_char = $utf8_source_string;
- $core_number_char =~ s/$braille_number_indicator//;
- $ht{BRAILLE_TO_DIGIT}->{$core_number_char} = $utf8_target_string;
- }
- }
- }
- if ($use_only_in_lower_case_enviroment_p) {
- if ($lang_code) {
- $ht{USE_ONLY_IN_LOWER_CASE_ENVIROMENT_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_target_string} = 1;
- } else {
- $ht{USE_ONLY_IN_LOWER_CASE_ENVIROMENT}->{$utf8_source_string}->{$utf8_target_string} = 1;
- }
- }
- $this->register_word_boundary_info(*ht, $lang_code, $utf8_source_string, $utf8_target_string,
- $use_only_for_whole_word_p, $use_only_at_start_of_word_p, $use_only_at_end_of_word_p,
- $dont_use_at_start_of_word_p, $dont_use_at_end_of_word_p);
- foreach $utf8_alt_target (@utf8_alt_targets) {
- if ($lang_code) {
- $ht{UTF_CHAR_ALT_MAPPING_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_alt_target} = $prob;
- $ht{USE_ALT_IN_POINTED_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_alt_target} = 1 if $use_alt_in_pointed_p;
- } else {
- $ht{UTF_CHAR_ALT_MAPPING}->{$utf8_source_string}->{$utf8_alt_target} = $prob;
- $ht{USE_ALT_IN_POINTED}->{$utf8_source_string}->{$utf8_alt_target} = 1 if $use_alt_in_pointed_p;
- }
- if ($use_only_for_whole_word_p) {
- if ($lang_code) {
- $ht{USE_ALT_ONLY_FOR_WHOLE_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_alt_target} = 1;
- } else {
- $ht{USE_ALT_ONLY_FOR_WHOLE_WORD}->{$utf8_source_string}->{$utf8_alt_target} = 1;
- }
- }
- if ($use_only_at_start_of_word_p) {
- if ($lang_code) {
- $ht{USE_ALT_ONLY_AT_START_OF_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_alt_target} = 1;
- } else {
- $ht{USE_ALT_ONLY_AT_START_OF_WORD}->{$utf8_source_string}->{$utf8_alt_target} = 1;
- }
- }
- if ($use_only_at_end_of_word_p) {
- if ($lang_code) {
- $ht{USE_ALT_ONLY_AT_END_OF_WORD_LANG_SPEC}->{$lang_code}->{$utf8_source_string}->{$utf8_alt_target} = 1;
- } else {
- $ht{USE_ALT_ONLY_AT_END_OF_WORD}->{$utf8_source_string}->{$utf8_alt_target} = 1;
- }
- }
- }
- if ($numeric =~ /\d/) {
- $ht{UTF_TO_NUMERIC}->{$utf8_source_string} = $numeric;
- }
- if ($annotation =~ /\S/) {
- $ht{UTF_ANNOTATION}->{$utf8_source_string} = $annotation;
- }
- $n++;
- }
- close(IN);
- # print STDERR "Loaded $n entries from $filename\n";
- } else {
- print STDERR "Can't open $filename\n";
- }
-}
-
-sub char_name_to_script {
- local($this, $char_name, *ht) = @_;
-
- return $cached_result if $cached_result = $ht{CHAR_NAME_TO_SCRIPT}->{$char_name};
- $orig_char_name = $char_name;
- $char_name =~ s/\s+(CONSONANT|LETTER|LIGATURE|SIGN|SYLLABLE|SYLLABICS|VOWEL)\b.*$//;
- my $script_name;
- while ($char_name) {
- last if $script_name = $ht{SCRIPT_NORM}->{(uc $char_name)};
- $char_name =~ s/\s*\S+\s*$//;
- }
- $script_name = "" unless defined($script_name);
- $ht{CHAR_NAME_TO_SCRIPT}->{$char_name} = $script_name;
- return $script_name;
-}
-
-sub letter_plus_char_p {
- local($this, $char_name) = @_;
-
- return $cached_result if $cached_result = $ht{CHAR_NAME_LETTER_PLUS}->{$char_name};
- my $letter_plus_p = ($char_name =~ /\b(?:LETTER|VOWEL SIGN|AU LENGTH MARK|CONSONANT SIGN|SIGN VIRAMA|SIGN PAMAAEH|SIGN COENG|SIGN AL-LAKUNA|SIGN ASAT|SIGN ANUSVARA|SIGN ANUSVARAYA|SIGN BINDI|TIPPI|SIGN NIKAHIT|SIGN CANDRABINDU|SIGN VISARGA|SIGN REAHMUK|SIGN NUKTA|SIGN DOT BELOW|HEBREW POINT)\b/) ? 1 : 0;
- $ht{CHAR_NAME_LETTER_PLUS}->{$char_name} = $letter_plus_p;
- return $letter_plus_p;
-}
-
-sub subjoined_char_p {
- local($this, $char_name) = @_;
-
- return $cached_result if $cached_result = $ht{CHAR_NAME_SUBJOINED}->{$char_name};
- my $subjoined_p = (($char_name =~ /\b(?:SUBJOINED LETTER|VOWEL SIGN|AU LENGTH MARK|EMPHASIS MARK|CONSONANT SIGN|SIGN VIRAMA|SIGN PAMAAEH|SIGN COENG|SIGN ASAT|SIGN ANUSVARA|SIGN ANUSVARAYA|SIGN BINDI|TIPPI|SIGN NIKAHIT|SIGN CANDRABINDU|SIGN VISARGA|SIGN REAHMUK|SIGN DOT BELOW|HEBREW (POINT|PUNCTUATION GERESH)|ARABIC (?:DAMMA|DAMMATAN|FATHA|FATHATAN|HAMZA|KASRA|KASRATAN|MADDAH|SHADDA|SUKUN))\b/)) ? 1 : 0;
- $ht{CHAR_NAME_SUBJOINED}->{$char_name} = $subjoined_p;
- return $subjoined_p;
-}
-
-sub new_node_id {
- local($this, *chart_ht) = @_;
-
- my $n_nodes = $chart_ht{N_NODES};
- $n_nodes++;
- $chart_ht{N_NODES} = $n_nodes;
- return $n_nodes;
-}
-
-sub add_node {
- local($this, $s, $start, $end, *chart_ht, $type, $comment) = @_;
-
- my $node_id = $this->new_node_id(*chart_ht);
- # print STDERR "add_node($node_id, $start-$end): $s [$comment]\n" if $comment =~ /number/;
- # print STDERR "add_node($node_id, $start-$end): $s [$comment]\n" if ($start >= 0) && ($start < 50);
- $chart_ht{NODE_START}->{$node_id} = $start;
- $chart_ht{NODE_END}->{$node_id} = $end;
- $chart_ht{NODES_STARTING_AT}->{$start}->{$node_id} = 1;
- $chart_ht{NODES_ENDING_AT}->{$end}->{$node_id} = 1;
- $chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}->{$node_id} = 1;
- $chart_ht{NODE_TYPE}->{$node_id} = $type;
- $chart_ht{NODE_COMMENT}->{$node_id} = $comment;
- $chart_ht{NODE_ROMAN}->{$node_id} = $s;
- return $node_id;
-}
-
-sub get_node_for_span {
- local($this, $start, $end, *chart_ht) = @_;
-
- return "" unless defined($chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end});
- my @node_ids = sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}};
-
- return (@node_ids) ? $node_ids[0] : "";
-}
-
-sub get_node_for_span_and_type {
- local($this, $start, $end, *chart_ht, $type) = @_;
-
- return "" unless defined($chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end});
- my @node_ids = sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}};
-
- foreach $node_id (@node_ids) {
- return $node_id if $chart_ht{NODE_TYPE}->{$node_id} eq $type;
- }
- return "";
-}
-
-sub get_node_roman {
- local($this, $node_id, *chart_id, $default) = @_;
-
- $default = "" unless defined($default);
- my $roman = $chart_ht{NODE_ROMAN}->{$node_id};
- return (defined($roman)) ? $roman : $default;
-}
-
-sub set_node_id_slot_value {
- local($this, $node_id, $slot, $value, *chart_id) = @_;
-
- $chart_ht{NODE_SLOT}->{$node_id}->{$slot} = $value;
-}
-
-sub copy_slot_values {
- local($this, $old_node_id, $new_node_id, *chart_id, @slots) = @_;
-
- if (@slots) {
- foreach $slot (keys %{$chart_ht{NODE_SLOT}->{$old_node_id}}) {
- if (($slots[0] eq "all") || $util->member($slot, @slots)) {
- my $value = $chart_ht{NODE_SLOT}->{$old_node_id}->{$slot};
- $chart_ht{NODE_SLOT}->{$new_node_id}->{$slot} = $value if defined($value);
- }
- }
- }
-}
-
-sub get_node_id_slot_value {
- local($this, $node_id, $slot, *chart_id, $default) = @_;
-
- $default = "" unless defined($default);
- my $value = $chart_ht{NODE_SLOT}->{$node_id}->{$slot};
- return (defined($value)) ? $value : $default;
-}
-
-sub get_node_for_span_with_slot_value {
- local($this, $start, $end, $slot, *chart_id, $default) = @_;
-
- $default = "" unless defined($default);
- return $default unless defined($chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end});
- my @node_ids = sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}};
- foreach $node_id (@node_ids) {
- my $value = $chart_ht{NODE_SLOT}->{$node_id}->{$slot};
- return $value if defined($value);
- }
- return $default;
-}
-
-sub get_node_for_span_with_slot {
- local($this, $start, $end, $slot, *chart_id, $default) = @_;
-
- $default = "" unless defined($default);
- return $default unless defined($chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end});
- my @node_ids = sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}};
- foreach $node_id (@node_ids) {
- my $value = $chart_ht{NODE_SLOT}->{$node_id}->{$slot};
- return $node_id if defined($value);
- }
- return $default;
-}
-
-sub register_new_complex_number_span_segment {
- local($this, $start, $mid, $end, *chart_id, $line_number) = @_;
- # e.g. 4 10 (= 40); 20 5 (= 25)
- # might become part of larger complex number span, e.g. 4 1000 3 100 20 1
-
- # print STDERR "register_new_complex_number_span_segment $start-$mid-$end\n" if $line_number == 43;
- if (defined($old_start = $chart_ht{COMPLEX_NUMERIC_END_START}->{$mid})) {
- undef($chart_ht{COMPLEX_NUMERIC_END_START}->{$mid});
- $chart_ht{COMPLEX_NUMERIC_START_END}->{$old_start} = $end;
- $chart_ht{COMPLEX_NUMERIC_END_START}->{$end} = $old_start;
- } else {
- $chart_ht{COMPLEX_NUMERIC_START_END}->{$start} = $end;
- $chart_ht{COMPLEX_NUMERIC_END_START}->{$end} = $start;
- }
-}
-
-sub romanize_by_token_with_caching {
- local($this, $s, $lang_code, $output_style, *ht, *pinyin_ht, $initial_char_offset, $control, $line_number) = @_;
-
- $control = "" unless defined($control);
- my $return_chart_p = ($control =~ /return chart/i);
- my $return_offset_mappings_p = ($control =~ /return offset mappings/i);
- return $this->romanize($s, $lang_code, $output_style, *ht, *pinyin_ht, $initial_char_offset, $control, $line_number)
- if $return_chart_p || $return_offset_mappings_p;
- my $result = "";
- my @separators = ();
- my @tokens = ();
- $s =~ s/\n$//; # Added May 2, 2019 as bug-fix (duplicate empty lines)
- while (($sep, $token, $rest) = ($s =~ /^(\s*)(\S+)(.*)$/)) {
- push(@separators, $sep);
- push(@tokens, $token);
- $s = $rest;
- }
- push(@separators, $s);
- while (@tokens) {
- my $sep = shift @separators;
- my $token = shift @tokens;
- $result .= $sep;
- if ($token =~ /^[\x00-\x7F]*$/) { # all ASCII
- $result .= $token;
- } else {
- my $rom_token = $ht{CACHED_ROMANIZATION}->{$lang_code}->{$token};
- unless (defined($rom_token)) {
- $rom_token = $this->romanize($token, $lang_code, $output_style, *ht, *pinyin_ht, $initial_char_offset, $control, $line_number);
- $ht{CACHED_ROMANIZATION}->{$lang_code}->{$token} = $rom_token if defined($rom_token);
- }
- $result .= $rom_token;
- }
- }
- my $sep = shift @separators;
- $result .= $sep if defined($sep);
-
- return $result;
-}
-
-sub romanize {
- local($this, $s, $lang_code, $output_style, *ht, *pinyin_ht, $initial_char_offset, $control, $line_number, $initial_rom_char_offset) = @_;
-
- my $orig_lang_code = $lang_code;
- # Check whether the text (to be romanized) starts with a language code directive.
- if (($line_lang_code) = ($s =~ /^::lcode\s+([a-z][a-z][a-z])\s/)) {
- $lang_code = $line_lang_code;
- }
- $initial_char_offset = 0 unless defined($initial_char_offset);
- $initial_rom_char_offset = 0 unless defined($initial_rom_char_offset);
- $control = "" unless defined($control);
- my $return_chart_p = ($control =~ /return chart/i);
- my $return_offset_mappings_p = ($control =~ /return offset mappings/i);
- $line_number = "" unless defined($line_number);
- my @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht);
- my $n_characters = $#chars + 1;
- %chart_ht = ();
- $chart_ht{N_CHARS} = $n_characters;
- $chart_ht{N_NODES} = 0;
- my $char = "";
- my $char_name = "";
- my $prev_script = "";
- my $current_script = "";
- my $script_start = 0;
- my $script_end = 0;
- my $prev_letter_plus_script = "";
- my $current_letter_plus_script = "";
- my $letter_plus_script_start = 0;
- my $letter_plus_script_end = 0;
- my $log ="";
- my $n_right_to_left_chars = 0;
- my $n_left_to_right_chars = 0;
- my $hebrew_word_start = ""; # used to identify Hebrew words with points
- my $hebrew_word_contains_point = 0;
- my $current_word_start = "";
- my $current_word_script = "";
- my $braille_all_caps_p = 0;
-
- # prep
- foreach $i ((0 .. ($#chars + 1))) {
- if ($i <= $#chars) {
- $char = $chars[$i];
- $chart_ht{ORIG_CHAR}->{$i} = $char;
- $char_name = $ht{UTF_TO_CHAR_NAME}->{$char} || "";
- $chart_ht{CHAR_NAME}->{$i} = $char_name;
- $current_script = $this->char_name_to_script($char_name, *ht);
- $current_script_direction = $ht{DIRECTION}->{$current_script} || '';
- if ($current_script_direction eq 'right-to-left') {
- $n_right_to_left_chars++;
- } elsif (($char =~ /^[a-z]$/i) || ! ($char =~ /^[\x00-\x7F]$/)) {
- $n_left_to_right_chars++;
- }
- $chart_ht{CHAR_SCRIPT}->{$i} = $current_script;
- $chart_ht{SCRIPT_SEGMENT_START}->{$i} = ""; # default value, to be updated later
- $chart_ht{SCRIPT_SEGMENT_END}->{$i} = ""; # default value, to be updated later
- $chart_ht{LETTER_TOKEN_SEGMENT_START}->{$i} = ""; # default value, to be updated later
- $chart_ht{LETTER_TOKEN_SEGMENT_END}->{$i} = ""; # default value, to be updated later
- $subjoined_char_p = $this->subjoined_char_p($char_name);
- $chart_ht{CHAR_SUBJOINED}->{$i} = $subjoined_char_p;
- $letter_plus_char_p = $this->letter_plus_char_p($char_name);
- $chart_ht{CHAR_LETTER_PLUS}->{$i} = $letter_plus_char_p;
- $current_letter_plus_script = ($letter_plus_char_p) ? $current_script : "";
- $numeric_value = $ht{UTF_TO_NUMERIC}->{$char};
- $numeric_value = "" unless defined($numeric_value);
- $annotation = $ht{UTF_ANNOTATION}->{$char};
- $annotation = "" unless defined($annotation);
- $chart_ht{CHAR_NUMERIC_VALUE}->{$i} = $numeric_value;
- $chart_ht{CHAR_ANNOTATION}->{$i} = $annotation;
- $syllable_info = $ht{UTF_TO_SYLLABLE_INFO}->{$char} || "";
- $chart_ht{CHAR_SYLLABLE_INFO}->{$i} = $syllable_info;
- $tone_mark = $ht{UTF_TO_TONE_MARK}->{$char} || "";
- $chart_ht{CHAR_TONE_MARK}->{$i} = $tone_mark;
- } else {
- $char = "";
- $char_name = "";
- $current_script = "";
- $current_letter_plus_script = "";
- }
- if ($char_name =~ /^HEBREW (LETTER|POINT|PUNCTUATION GERESH) /) {
- $hebrew_word_start = $i if $hebrew_word_start eq "";
- $hebrew_word_contains_point = 1 if $char_name =~ /^HEBREW POINT /;
- } elsif ($hebrew_word_start ne "") {
- if ($hebrew_word_contains_point) {
- foreach $j (($hebrew_word_start .. ($i-1))) {
- $chart_ht{CHAR_PART_OF_POINTED_HEBREW_WORD}->{$j} = 1;
- }
- $chart_ht{CHAR_START_OF_WORD}->{$hebrew_word_start} = 1;
- $chart_ht{CHAR_END_OF_WORD}->{($i-1)} = 1;
- }
- $hebrew_word_start = "";
- $hebrew_word_contains_point = 0;
- }
- my $part_of_word_p = $current_script
- && ($this->letter_plus_char_p($char_name)
- || $this->subjoined_char_p($char_name)
- || ($char_name =~ /\b(LETTER|SYLLABLE|SYLLABICS|LIGATURE)\b/));
-
- # Braille punctuation
- my $end_offset = 0;
- if ($char_name =~ /^Braille\b/i) {
- if (($char =~ /^\s*$/) || ($char_name =~ /BLANK/)) {
- $part_of_word_p = 0;
- $braille_all_caps_p = 0;
- } elsif ($chart_ht{NOT_PART_OF_WORD_P}->{$i}) {
- $part_of_word_p = 0;
- $braille_all_caps_p = 0;
- } elsif ((keys %{$ht{WORD_EXTERNAL_PUNCTUATION_LANG_SPEC}->{$lang_code}->{$char}})
- || (keys %{$ht{WORD_EXTERNAL_PUNCTUATION}->{$char}})) {
- $part_of_word_p = 0;
- $braille_all_caps_p = 0;
- } elsif (($i+1 <= $#chars)
- && ($s1 = $char . $chars[$i+1])
- && ((keys %{$ht{WORD_EXTERNAL_PUNCTUATION_LANG_SPEC}->{$lang_code}->{$s1}})
- || (keys %{$ht{WORD_EXTERNAL_PUNCTUATION}->{$s1}}))) {
- $part_of_word_p = 0;
- $braille_all_caps_p = 0;
- $chart_ht{NOT_PART_OF_WORD_P}->{($i+1)} = 1;
- } elsif (($i+2 <= $#chars)
- && ($s2 = $char . $chars[$i+1] . $chars[$i+2])
- && ((keys %{$ht{WORD_EXTERNAL_PUNCTUATION_LANG_SPEC}->{$lang_code}->{$s2}})
- || (keys %{$ht{WORD_EXTERNAL_PUNCTUATION}->{$s2}}))) {
- $part_of_word_p = 0;
- $braille_all_caps_p = 0;
- $chart_ht{NOT_PART_OF_WORD_P}->{($i+1)} = 1;
- $chart_ht{NOT_PART_OF_WORD_P}->{($i+2)} = 1;
- } elsif (($i+1 <= $#chars)
- && ($char eq $braille_capital_letter_indicator)
- && ($chars[$i+1] eq $braille_capital_letter_indicator)) {
- $braille_all_caps_p = 1;
- } else {
- $part_of_word_p = 1;
- }
- # last period in Braille text is also not part_of_word_p
- if (($char eq $braille_period)
- && (($i == $#chars)
- || (($i < $#chars)
- && (! $this->braille_string_p($chars[$i+1]))))) {
- $part_of_word_p = 0;
- }
- # period before other word-external punctuation is also not part_of_word_p
- if (($i > 0)
- && ($chars[$i-1] eq $braille_period)
- && (! $part_of_word_p)
- && ($current_word_start ne "")) {
- $end_offset = -1;
- }
- } else {
- $braille_all_caps_p = 0;
- }
- $chart_ht{BRAILLE_ALL_CAPS_P}->{$i} = $braille_all_caps_p;
-
- if (($current_word_start ne "")
- && ((! $part_of_word_p)
- || ($current_script ne $current_word_script))) {
- # END OF WORD
- $chart_ht{CHAR_START_OF_WORD}->{$current_word_start} = 1;
- $chart_ht{CHAR_END_OF_WORD}->{($i-1+$end_offset)} = 1;
- my $word = join("", @chars[$current_word_start .. ($i-1+$end_offset)]);
- $chart_ht{WORD_START_END}->{$current_word_start}->{$i} = $word;
- $chart_ht{WORD_END_START}->{$i+$end_offset}->{$current_word_start} = $word;
- # print STDERR "Word ($current_word_start-$i+$end_offset): $word ($current_word_script)\n";
- $current_word_start = "";
- $current_word_script = "";
- }
- if ($part_of_word_p && ($current_word_start eq "")) {
- # START OF WORD
- $current_word_start = $i;
- $current_word_script = $current_script;
- }
- # print STDERR "$i char: $char ($current_script)\n";
- unless ($current_script eq $prev_script) {
- if ($prev_script && ($i-1 >= $script_start)) {
- my $script_end = $i;
- $chart_ht{SCRIPT_SEGMENT_START_TO_END}->{$script_start} = $script_end;
- $chart_ht{SCRIPT_SEGMENT_END_TO_START}->{$script_end} = $script_start;
- foreach $i (($script_start .. $script_end)) {
- $chart_ht{SCRIPT_SEGMENT_START}->{$i} = $script_start;
- $chart_ht{SCRIPT_SEGMENT_END}->{$i} = $script_end;
- }
- # print STDERR "Script segment $script_start-$script_end: $prev_script\n";
- }
- $script_start = $i;
- }
- unless ($current_letter_plus_script eq $prev_letter_plus_script) {
- if ($prev_letter_plus_script && ($i-1 >= $letter_plus_script_start)) {
- my $letter_plus_script_end = $i;
- $chart_ht{LETTER_TOKEN_SEGMENT_START_TO_END}->{$letter_plus_script_start} = $letter_plus_script_end;
- $chart_ht{LETTER_TOKEN_SEGMENT_END_TO_START}->{$letter_plus_script_end} = $letter_plus_script_start;
- foreach $i (($letter_plus_script_start .. $letter_plus_script_end)) {
- $chart_ht{LETTER_TOKEN_SEGMENT_START}->{$i} = $letter_plus_script_start;
- $chart_ht{LETTER_TOKEN_SEGMENT_END}->{$i} = $letter_plus_script_end;
- }
- # print STDERR "Script token segment $letter_plus_script_start-$letter_plus_script_end: $prev_letter_plus_script\n";
- }
- $letter_plus_script_start = $i;
- }
- $prev_script = $current_script;
- $prev_letter_plus_script = $current_letter_plus_script;
- }
- $ht{STRING_IS_DOMINANTLY_RIGHT_TO_LEFT}->{$s} = 1 if $n_right_to_left_chars > $n_left_to_right_chars;
-
- # main
- my $i = 0;
- while ($i <= $#chars) {
- my $char = $chart_ht{ORIG_CHAR}->{$i};
- my $current_script = $chart_ht{CHAR_SCRIPT}->{$i};
- $chart_ht{CHART_CONTAINS_SCRIPT}->{$current_script} = 1;
- my $script_segment_start = $chart_ht{SCRIPT_SEGMENT_START}->{$i};
- my $script_segment_end = $chart_ht{SCRIPT_SEGMENT_END}->{$i};
- my $char_name = $chart_ht{CHAR_NAME}->{$i};
- my $subjoined_char_p = $chart_ht{CHAR_SUBJOINED}->{$i};
- my $letter_plus_char_p = $chart_ht{CHAR_LETTER_PLUS}->{$i};
- my $numeric_value = $chart_ht{CHAR_NUMERIC_VALUE}->{$i};
- my $annotation = $chart_ht{CHAR_ANNOTATION}->{$i};
- # print STDERR " $char_name annotation: $annotation\n" if $annotation;
- my $tone_mark = $chart_ht{CHAR_TONE_MARK}->{$i};
- my $found_char_mapping_p = 0;
- my $prev_char_name = ($i >= 1) ? $chart_ht{CHAR_NAME}->{($i-1)} : "";
- my $prev2_script = ($i >= 2) ? $chart_ht{CHAR_SCRIPT}->{($i-2)} : "";
- my $prev_script = ($i >= 1) ? $chart_ht{CHAR_SCRIPT}->{($i-1)} : "";
- my $next_script = ($i < $#chars) ? $chart_ht{CHAR_SCRIPT}->{($i+1)} : "";
- my $next_char = ($i < $#chars) ? $chart_ht{ORIG_CHAR}->{($i+1)} : "";
- my $next_char_name = $ht{UTF_TO_CHAR_NAME}->{$next_char} || "";
- my $prev2_letter_plus_char_p = ($i >= 2) ? $chart_ht{CHAR_LETTER_PLUS}->{($i-2)} : 0;
- my $prev_letter_plus_char_p = ($i >= 1) ? $chart_ht{CHAR_LETTER_PLUS}->{($i-1)} : 0;
- my $next_letter_plus_char_p = ($i < $#chars) ? $chart_ht{CHAR_LETTER_PLUS}->{($i+1)} : 0;
- my $next_index = $i + 1;
-
- # Braille numeric mode
- if ($char eq $braille_number_indicator) {
- my $offset = 0;
- my $numeric_value = "";
- my $digit;
- while ($i+$offset < $#chars) {
- $offset++;
- my $offset_char = $chart_ht{ORIG_CHAR}->{$i+$offset};
- if (defined($digit = $ht{BRAILLE_TO_DIGIT}->{$offset_char})) {
- $numeric_value .= $digit;
- } elsif (($offset_char eq $braille_decimal_point)
- || ($ht{UTF_CHAR_MAPPING}->{$offset_char}->{"."})) {
- $numeric_value .= ".";
- } elsif ($offset_char eq $braille_comma) {
- $numeric_value .= ",";
- } elsif ($offset_char eq $braille_numeric_space) {
- $numeric_value .= " ";
- } elsif ($offset_char eq $braille_solidus) {
- $numeric_value .= "/";
- } elsif ($offset_char eq $braille_number_indicator) {
- # stay in Braille numeric mode
- } elsif ($offset_char eq $braille_letter_indicator) {
- # consider as part of number, but without contributing to numeric_value
- last;
- } else {
- $offset--;
- last;
- }
- }
- if ($offset) {
- $next_index = $i + $offset + 1;
- $node_id = $this->add_node($numeric_value, $i, $next_index, *chart_ht, "", "braille number");
- $found_char_mapping_p = 1;
- }
- }
-
- unless ($found_char_mapping_p) {
- foreach $string_length (reverse(1 .. 6)) {
- next if ($i + $string_length-1) > $#chars;
- my $start_of_word_p = $chart_ht{CHAR_START_OF_WORD}->{$i} || 0;
- my $end_of_word_p = $chart_ht{CHAR_END_OF_WORD}->{($i+$string_length-1)} || 0;
- my $multi_char_substring = join("", @chars[$i..($i+$string_length-1)]);
- my @mappings = keys %{$ht{UTF_CHAR_MAPPING_LANG_SPEC}->{$lang_code}->{$multi_char_substring}};
- @mappings = keys %{$ht{UTF_CHAR_MAPPING}->{$multi_char_substring}} unless @mappings;
- my @mappings_whole = ();
- my @mappings_start_or_end = ();
- my @mappings_other = ();
- foreach $mapping (@mappings) {
- next if $mapping =~ /\(__.*__\)/;
- if ($ht{USE_ONLY_FOR_WHOLE_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$mapping}
- || $ht{USE_ONLY_FOR_WHOLE_WORD}->{$multi_char_substring}->{$mapping}) {
- push(@mappings_whole, $mapping) if $start_of_word_p && $end_of_word_p;
- } elsif ($ht{USE_ONLY_AT_START_OF_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$mapping}
- || $ht{USE_ONLY_AT_START_OF_WORD}->{$multi_char_substring}->{$mapping}) {
- push(@mappings_start_or_end, $mapping) if $start_of_word_p;
- } elsif ($ht{USE_ONLY_AT_END_OF_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$mapping}
- || $ht{USE_ONLY_AT_END_OF_WORD}->{$multi_char_substring}->{$mapping}) {
- push(@mappings_start_or_end, $mapping) if $end_of_word_p;
- } else {
- push(@mappings_other, $mapping);
- }
- }
- @mappings = @mappings_whole;
- @mappings = @mappings_start_or_end unless @mappings;
- @mappings = @mappings_other unless @mappings;
- foreach $mapping (@mappings) {
- next if $mapping =~ /\(__.*__\)/;
- if ($ht{DONT_USE_AT_START_OF_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$mapping}
- || $ht{DONT_USE_AT_START_OF_WORD}->{$multi_char_substring}->{$mapping}) {
- next if $start_of_word_p;
- }
- if ($ht{DONT_USE_AT_END_OF_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$mapping}
- || $ht{DONT_USE_AT_END_OF_WORD}->{$multi_char_substring}->{$mapping}) {
- next if $end_of_word_p;
- }
- my $mapping2 = ($chart_ht{BRAILLE_ALL_CAPS_P}->{$i}) ? (uc $mapping) : $mapping;
- $node_id = $this->add_node($mapping2, $i, $i+$string_length, *chart_ht, "", "multi-char-mapping");
- $next_index = $i + $string_length;
- $found_char_mapping_p = 1;
- if ($annotation) {
- @annotation_elems = split(/,\s*/, $annotation);
- foreach $annotation_elem (@annotation_elems) {
- if (($a_slot, $a_value) = ($annotation_elem =~ /^(\S+?):(\S+)\s*$/)) {
- $this->set_node_id_slot_value($node_id, $a_slot, $a_value, *chart_ht);
- } else {
- $this->set_node_id_slot_value($node_id, $annotation_elem, 1, *chart_ht);
- }
- }
- }
- }
- my @alt_mappings = keys %{$ht{UTF_CHAR_ALT_MAPPING_LANG_SPEC}->{$lang_code}->{$multi_char_substring}};
- @alt_mappings = keys %{$ht{UTF_CHAR_ALT_MAPPING}->{$multi_char_substring}} unless @alt_mappings;
- @alt_mappings = () if ($#alt_mappings == 0) && ($alt_mappings[0] eq "_NONE_");
- foreach $alt_mapping (@alt_mappings) {
- if ($chart_ht{CHAR_PART_OF_POINTED_HEBREW_WORD}->{$i}) {
- next unless
- $ht{USE_ALT_IN_POINTED_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$alt_mapping}
- || $ht{USE_ALT_IN_POINTED}->{$multi_char_substring}->{$alt_mapping};
- }
- if ($ht{USE_ALT_ONLY_FOR_WHOLE_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$alt_mapping}
- || $ht{USE_ALT_ONLY_FOR_WHOLE_WORD}->{$multi_char_substring}->{$alt_mapping}) {
- next unless $start_of_word_p && $end_of_word_p;
- }
- if ($ht{USE_ALT_ONLY_AT_START_OF_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$alt_mapping}
- || $ht{USE_ALT_ONLY_AT_START_OF_WORD}->{$multi_char_substring}->{$alt_mapping}) {
- next unless $start_of_word_p;
- }
- if ($ht{USE_ALT_ONLY_AT_END_OF_WORD_LANG_SPEC}->{$lang_code}->{$multi_char_substring}->{$alt_mapping}
- || $ht{USE_ALT_ONLY_AT_END_OF_WORD}->{$multi_char_substring}->{$alt_mapping}) {
- next unless $end_of_word_p;
- }
- my $alt_mapping2 = ($chart_ht{BRAILLE_ALL_CAPS_P}->{$i}) ? (uc $alt_mapping) : $alt_mapping;
- $node_id = $this->add_node($alt_mapping2, $i, $i+$string_length, *chart_ht, "alt", "multi-char-mapping");
- if ($annotation) {
- @annotation_elems = split(/,\s*/, $annotation);
- foreach $annotation_elem (@annotation_elems) {
- if (($a_slot, $a_value) = ($annotation_elem =~ /^(\S+?):(\S+)\s*$/)) {
- $this->set_node_id_slot_value($node_id, $a_slot, $a_value, *chart_ht);
- } else {
- $this->set_node_id_slot_value($node_id, $annotation_elem, 1, *chart_ht);
- }
- }
- }
- }
- }
- }
- unless ($found_char_mapping_p) {
- my $prev_node_id = $this->get_node_for_span($i-4, $i, *chart_ht)
- || $this->get_node_for_span($i-3, $i, *chart_ht)
- || $this->get_node_for_span($i-2, $i, *chart_ht)
- || $this->get_node_for_span($i-1, $i, *chart_ht);
- my $prev_char_roman = ($prev_node_id) ? $this->get_node_roman($prev_node_id, *chart_id) : "";
- my $prev_node_start = ($prev_node_id) ? $chart_ht{NODE_START}->{$prev_node_id} : "";
-
- # Number
- if (($numeric_value =~ /\d/)
- && (! ($char_name =~ /SUPERSCRIPT/))) {
- my $prev_numeric_value = $this->get_node_for_span_with_slot_value($i-1, $i, "numeric-value", *chart_id);
- my $sep = "";
- $sep = " " if ($char_name =~ /^vulgar fraction /i) && ($prev_numeric_value =~ /\d/);
- $node_id = $this->add_node("$sep$numeric_value", $i, $i+1, *chart_ht, "", "number");
- $this->set_node_id_slot_value($node_id, "numeric-value", $numeric_value, *chart_ht);
- if ((($prev_numeric_value =~ /\d/) && ($numeric_value =~ /\d\d/))
- || (($prev_numeric_value =~ /\d\d/) && ($numeric_value =~ /\d/))) {
- # pull in any other parts of single digits
- my $j = 1;
- # pull in any single digits adjoining on left
- if ($prev_numeric_value =~ /^\d$/) {
- while (1) {
- if (($i-$j-1 >= 0)
- && defined($digit_value = $this->get_node_for_span_with_slot_value($i-$j-1, $i-$j, "numeric-value", *chart_id))
- && ($digit_value =~ /^\d$/)) {
- $j++;
- } elsif (($i-$j-2 >= 0)
- && ($chart_ht{ORIG_CHAR}->{($i-$j-1)} =~ /^[.,]$/)
- && defined($digit_value = $this->get_node_for_span_with_slot_value($i-$j-2, $i-$j-1, "numeric-value", *chart_id))
- && ($digit_value =~ /^\d$/)) {
- $j += 2;
- } else {
- last;
- }
- }
- }
- # pull in any single digits adjoining on right
- my $k = 0;
- if ($numeric_value =~ /^\d$/) {
- while (1) {
- if (defined($next_numeric_value = $chart_ht{CHAR_NUMERIC_VALUE}->{($i+$k+1)})
- && ($next_numeric_value =~ /^\d$/)) {
- $k++;
- } else {
- last;
- }
- }
- }
- $this->register_new_complex_number_span_segment($i-$j, $i, $i+$k+1, *chart_ht, $line_number);
- }
- if ($chinesePM->string_contains_utf8_cjk_unified_ideograph_p($char)
- && ($tonal_translit = $chinesePM->tonal_pinyin($char, *pinyin_ht, ""))) {
- $de_accented_translit = $util->de_accent_string($tonal_translit);
- if ($numeric_value =~ /^(10000|1000000000000|10000000000000000)$/) {
- $chart_ht{NODE_TYPE}->{$node_id} = "alt"; # keep, but demote
- $alt_node_id = $this->add_node($de_accented_translit, $i, $i+1, *chart_ht, "", "CJK");
- } else {
- $alt_node_id = $this->add_node($de_accented_translit, $i, $i+1, *chart_ht, "alt", "CJK");
- }
- }
-
- # ASCII
- } elsif ($char =~ /^[\x00-\x7F]$/) {
- $this->add_node($char, $i, $i+1, *chart_ht, "", "ASCII"); # ASCII character, incl. control characters
-
- # Emoji, dingbats, pictographs
- } elsif ($char =~ /^(\xE2[\x98-\x9E]|\xF0\x9F[\x8C-\xA7])/) {
- $this->add_node($char, $i, $i+1, *chart_ht, "", "pictograph");
-
- # Hangul (Korean)
- } elsif (($char =~ /^[\xEA-\xED]/)
- && ($romanized_char = $this->unicode_hangul_romanization($char))) {
- $this->add_node($romanized_char, $i, $i+1, *chart_ht, "", "Hangul");
-
- # CJK (Chinese, Japanese, Korean)
- } elsif ($chinesePM->string_contains_utf8_cjk_unified_ideograph_p($char)
- && ($tonal_translit = $chinesePM->tonal_pinyin($char, *pinyin_ht, ""))) {
- $de_accented_translit = $util->de_accent_string($tonal_translit);
- $this->add_node($de_accented_translit, $i, $i+1, *chart_ht, "", "CJK");
-
- # Virama (cancel preceding vowel in Abudiga scripts)
- } elsif ($char_name =~ /\bSIGN (?:VIRAMA|AL-LAKUNA|ASAT|COENG|PAMAAEH)\b/) {
- # VIRAMA: cancel preceding default vowel (in Abudiga scripts)
- if (($prev_script eq $current_script)
- && (($prev_char_roman_consonant, $prev_char_roman_vowel) = ($prev_char_roman =~ /^(.*[bcdfghjklmnpqrstvwxyz])([aeiou]+)$/i))
- && ($ht{SCRIPT_ABUDIGA_DEFAULT_VOWEL}->{$current_script}->{(lc $prev_char_roman_vowel)})) {
- $this->add_node($prev_char_roman_consonant, $prev_node_start, $i+1, *chart_ht, "", "virama");
- } else {
- $this->add_node("", $i, $i+1, *chart_ht, "", "unexpected-virama");
- }
-
- # Nukta (special (typically foreign) variant)
- } elsif ($char_name =~ /\bSIGN (?:NUKTA)\b/) {
- # NUKTA (dot): indicates special (typically foreign) variant; normally covered by multi-mappings
- if ($prev_script eq $current_script) {
- my $node_id = $this->add_node($prev_char_roman, $prev_node_start, $i+1, *chart_ht, "", "nukta");
- $this->copy_slot_values($prev_node_id, $node_id, *chart_id, "all");
- $this->set_node_id_slot_value($node_id, "nukta", 1, *chart_ht);
- } else {
- $this->add_node("", $i, $i+1, *chart_ht, "", "unexpected-nukta");
- }
-
- # Zero-width character, incl. zero width space/non-joiner/joiner, left-to-right/right-to-left mark
- } elsif ($char =~ /^\xE2\x80[\x8B-\x8F\xAA-\xAE]$/) {
- if ($prev_node_id) {
- my $node_id = $this->add_node($prev_char_roman, $prev_node_start, $i+1, *chart_ht, "", "zero-width-char");
- $this->copy_slot_values($prev_node_id, $node_id, *chart_id, "all");
- } else {
- $this->add_node("", $i, $i+1, *chart_ht, "", "zero-width-char");
- }
- } elsif (($char =~ /^\xEF\xBB\xBF$/) && $prev_node_id) { # OK to leave byte-order-mark at beginning of line
- my $node_id = $this->add_node($prev_char_roman, $prev_node_start, $i+1, *chart_ht, "", "zero-width-char");
- $this->copy_slot_values($prev_node_id, $node_id, *chart_id, "all");
-
- # Tone mark
- } elsif ($tone_mark) {
- if ($prev_script eq $current_script) {
- my $node_id = $this->add_node($prev_char_roman, $prev_node_start, $i+1, *chart_ht, "", "tone-mark");
- $this->copy_slot_values($prev_node_id, $node_id, *chart_id, "all");
- $this->set_node_id_slot_value($node_id, "tone-mark", $tone_mark, *chart_ht);
- } else {
- $this->add_node("", $i, $i+1, *chart_ht, "", "unexpected-tone-mark");
- }
-
- # Diacritic
- } elsif (($char_name =~ /\b(ACCENT|TONE|COMBINING DIAERESIS|COMBINING DIAERESIS BELOW|COMBINING MACRON|COMBINING VERTICAL LINE ABOVE|COMBINING DOT ABOVE RIGHT|COMBINING TILDE|COMBINING CYRILLIC|MUUSIKATOAN|TRIISAP)\b/) && ($ht{UTF_TO_CAT}->{$char} =~ /^Mn/)) {
- if ($prev_script eq $current_script) {
- my $node_id = $this->add_node($prev_char_roman, $prev_node_start, $i+1, *chart_ht, "", "diacritic");
- $this->copy_slot_values($prev_node_id, $node_id, *chart_id, "all");
- $diacritic = lc $char_name;
- $diacritic =~ s/^.*(?:COMBINING CYRILLIC|COMBINING|SIGN)\s+//i;
- $diacritic =~ s/^.*(ACCENT|TONE)/$1/i;
- $diacritic =~ s/^\s*//;
- $this->set_node_id_slot_value($node_id, "diacritic", $diacritic, *chart_ht);
- # print STDERR "diacritic: $diacritic\n";
- } else {
- $this->add_node("", $i, $i+1, *chart_ht, "", "unexpected-diacritic");
- }
-
- # Romanize to find out more
- } elsif ($char_name) {
- if (defined($romanized_char = $this->romanize_char_at_position($i, $lang_code, $output_style, *ht, *chart_ht))) {
- # print STDERR "ROM l.$line_number/$i: $romanized_char\n" if $line_number =~ /^[12]$/;
- print STDOUT "ROM l.$line_number/$i: $romanized_char\n" if $verbosePM;
-
- # Empty string mapping
- if ($romanized_char eq "\"\"") {
- $this->add_node("", $i, $i+1, *chart_ht, "", "empty-string-mapping");
- # consider adding something for implausible romanizations of length 6+
-
- # keep original character (instead of romanized_char lengthener, character-18b00 etc.)
- } elsif (($romanized_char =~ /^(character|lengthener|modifier)/)) {
- $this->add_node($char, $i, $i+1, *chart_ht, "", "nevermind-keep-original");
-
- # Syllabic suffix in Abudiga languages, e.g. -m, -ng
- } elsif (($romanized_char =~ /^\+(H|M|N|NG)$/i)
- && ($prev_script eq $current_script)
- && ($ht{SCRIPT_ABUDIGA_DEFAULT_VOWEL}->{$current_script}->{"a"})) {
- my $core_suffix = $romanized_char;
- $core_suffix =~ s/^\+//;
- if ($prev_char_roman =~ /[aeiou]$/i) {
- $this->add_node($core_suffix, $i, $i+1, *chart_ht, "", "syllable-end-consonant");
- } else {
- $this->add_node(join("", $prev_char_roman, "a", $core_suffix), $prev_node_start, $i+1, *chart_ht, "", "syllable-end-consonant-with-added-a");
- $this->add_node(join("", "a", $core_suffix), $i, $i+1, *chart_ht, "backup", "syllable-end-consonant");
- }
-
- # Japanese special cases
- } elsif ($char_name =~ /(?:HIRAGANA|KATAKANA) LETTER SMALL Y/) {
- if (($prev_script eq $current_script)
- && (($prev_char_roman_consonant) = ($prev_char_roman =~ /^(.*[bcdfghjklmnpqrstvwxyz])i$/i))) {
- unless ($this->get_node_for_span_and_type($prev_node_start, $i+1, *chart_ht, "")) {
- $this->add_node("$prev_char_roman_consonant$romanized_char", $prev_node_start, $i+1, *chart_ht, "", "japanese-contraction");
- }
- } else {
- $this->add_node($romanized_char, $i, $i+1, *chart_ht, "", "unexpected-japanese-contraction-character");
- }
- } elsif (($prev_script =~ /^(HIRAGANA|KATAKANA)$/i)
- && ($char_name eq "KATAKANA-HIRAGANA PROLONGED SOUND MARK") # Choonpu
- && (($prev_char_roman_vowel) = ($prev_char_roman =~ /([aeiou])$/i))) {
- $this->add_node("$prev_char_roman$prev_char_roman_vowel", $prev_node_start, $i+1, *chart_ht, "", "japanese-vowel-lengthening");
- } elsif (($current_script =~ /^(Hiragana|Katakana)$/i)
- && ($char_name =~ /^(HIRAGANA|KATAKANA) LETTER SMALL TU$/i) # Sokuon/Sukun
- && ($next_script eq $current_script)
- && ($romanized_next_char = $this->romanize_char_at_position_incl_multi($i+1, $lang_code, $output_style, *ht, *chart_ht))
- && (($doubled_consonant) = ($romanized_next_char =~ /^(ch|[bcdfghjklmnpqrstwz])/i))) {
- # Note: $romanized_next_char could be part of a multi-character mapping
- # print STDERR "current_script: $current_script char_name: $char_name next_script: $next_script romanized_next_char: $romanized_next_char doubled_consonant: $doubled_consonant\n";
- $doubled_consonant = "t" if $doubled_consonant eq "ch";
- $this->add_node($doubled_consonant, $i, $i+1, *chart_ht, "", "japanese-consonant-doubling");
-
- # Greek small letter mu to micro-sign (instead of to "m") as used in abbreviations for microgram/micrometer/microliter/microsecond/micromolar/microfarad etc.
- } elsif (($char_name eq "GREEK SMALL LETTER MU")
- && (! ($prev_script =~ /^GREEK$/))
- && ($i < $#chars)
- && ($chart_ht{ORIG_CHAR}->{($i+1)} =~ /^[cfgjlmstv]$/i)) {
- $this->add_node("\xC2\xB5", $i, $i+1, *chart_ht, "", "greek-mu-to-micro-sign");
-
- # Gurmukhi addak (doubles following consonant)
- } elsif (($current_script eq "Gurmukhi")
- && ($char_name eq "GURMUKHI ADDAK")) {
- if (($next_script eq $current_script)
- && ($romanized_next_char = $this->romanize_char_at_position_incl_multi($i+1, $lang_code, $output_style, *ht, *chart_ht))
- && (($doubled_consonant) = ($romanized_next_char =~ /^([bcdfghjklmnpqrstvwxz])/i))) {
- $this->add_node($doubled_consonant, $i, $i+1, *chart_ht, "", "gurmukhi-consonant-doubling");
- } else {
- $this->add_node("'", $i, $i+1, *chart_ht, "", "gurmukhi-unexpected-addak");
- }
-
- # Subjoined character
- } elsif ($subjoined_char_p
- && ($prev_script eq $current_script)
- && (($prev_char_roman_consonant, $prev_char_roman_vowel) = ($prev_char_roman =~ /^(.*[bcdfghjklmnpqrstvwxyz])([aeiou]+)$/i))
- && ($ht{SCRIPT_ABUDIGA_DEFAULT_VOWEL}->{$current_script}->{(lc $prev_char_roman_vowel)})) {
- my $new_roman = "$prev_char_roman_consonant$romanized_char";
- $this->add_node($new_roman, $prev_node_start, $i+1, *chart_ht, "", "subjoined-character");
- # print STDERR " Subjoin l.$line_number/$i: $new_roman\n" if $line_number =~ /^[12]$/;
-
- # Thai special case: written-pre-consonant-spoken-post-consonant
- } elsif (($char_name =~ /THAI CHARACTER/)
- && ($prev_script eq $current_script)
- && ($chart_ht{CHAR_SYLLABLE_INFO}->{($i-1)} =~ /written-pre-consonant-spoken-post-consonant/i)
- && ($prev_char_roman =~ /^[aeiou]+$/i)
- && ($romanized_char =~ /^[bcdfghjklmnpqrstvwxyz]/)) {
- $this->add_node("$romanized_char$prev_char_roman", $prev_node_start, $i+1, *chart_ht, "", "thai-vowel-consonant-swap");
-
- # Thai special case: THAI CHARACTER O ANG (U+0E2D "\xE0\xB8\xAD")
- } elsif ($char_name eq "THAI CHARACTER O ANG") {
- if ($prev_script ne $current_script) {
- $this->add_node("", $i, $i+1, *chart_ht, "", "thai-initial-o-ang-drop");
- } elsif ($next_script ne $current_script) {
- $this->add_node("", $i, $i+1, *chart_ht, "", "thai-final-o-ang-drop");
- } else {
- my $romanized_next_char = $this->romanize_char_at_position($i+1, $lang_code, $output_style, *ht, *chart_ht);
- my $romanized_prev2_char = $this->romanize_char_at_position($i-2, $lang_code, $output_style, *ht, *chart_ht);
- if (($prev_char_roman =~ /^[bcdfghjklmnpqrstvwxz]+$/i)
- && ($romanized_next_char =~ /^[bcdfghjklmnpqrstvwxz]+$/i)) {
- $this->add_node("o", $i, $i+1, *chart_ht, "", "thai-middle-o-ang"); # keep between consonants
- } elsif (($prev2_script eq $current_script)
- && 0
- && ($prev_char_name =~ /^THAI CHARACTER MAI [A-Z]+$/) # Thai tone
- && ($romanized_prev2_char =~ /^[bcdfghjklmnpqrstvwxz]+$/i)
- && ($romanized_next_char =~ /^[bcdfghjklmnpqrstvwxz]+$/i)) {
- $this->add_node("o", $i, $i+1, *chart_ht, "", "thai-middle-o-ang"); # keep between consonant+tone-mark and consonant
- } else {
- $this->add_node("", $i, $i+1, *chart_ht, "", "thai-middle-o-ang-drop"); # drop next to vowel
- }
- }
-
- # Romanization with space
- } elsif ($romanized_char =~ /\s/) {
- $this->add_node($char, $i, $i+1, *chart_ht, "", "space");
-
- # Tibetan special cases
- } elsif ($current_script eq "Tibetan") {
-
- if ($subjoined_char_p
- && ($prev_script eq $current_script)
- && $prev_letter_plus_char_p
- && ($prev_char_roman =~ /^[bcdfghjklmnpqrstvwxyz]+$/i)) {
- $this->add_node("$prev_char_roman$romanized_char", $prev_node_start, $i+1, *chart_ht, "", "subjoined-tibetan-character");
- } elsif ($romanized_char =~ /^-A$/i) {
- my $romanized_next_char = $this->romanize_char_at_position($i+1, $lang_code, $output_style, *ht, *chart_ht);
- if (! $prev_letter_plus_char_p) {
- $this->add_node("'", $i, $i+1, *chart_ht, "", "tibetan-frontal-dash-a");
- } elsif (($prev_script eq $current_script)
- && ($next_script eq $current_script)
- && ($prev_char_roman =~ /[bcdfghjklmnpqrstvwxyz]$/)
- && ($romanized_next_char =~ /^[aeiou]/)) {
- $this->add_node("a'", $i, $i+1, *chart_ht, "", "tibetan-medial-dash-a");
- } elsif (($prev_script eq $current_script)
- && ($next_script eq $current_script)
- && ($prev_char_roman =~ /[aeiou]$/)
- && ($romanized_next_char =~ /[aeiou]/)) {
- $this->add_node("'", $i, $i+1, *chart_ht, "", "tibetan-reduced-medial-dash-a");
- } elsif (($prev_script eq $current_script)
- && (! ($prev_char_roman =~ /[aeiou]/))
- && (! $next_letter_plus_char_p)) {
- $this->add_node("a", $i, $i+1, *chart_ht, "", "tibetan-final-dash-a");
- } else {
- $this->add_node("a", $i, $i+1, *chart_ht, "", "unexpected-tibetan-dash-a");
- }
- } elsif (($romanized_char =~ /^[AEIOU]/i)
- && ($prev_script eq $current_script)
- && ($prev_char_roman =~ /^A$/i)
- && (! $prev2_letter_plus_char_p)) {
- $this->add_node($romanized_char, $prev_node_start, $i+1, *chart_ht, "", "tibetan-dropped-word-initial-a");
- } else {
- $this->add_node($romanized_char, $i, $i+1, *chart_ht, "", "standard-unicode-based-romanization");
- }
-
- # Khmer (for MUUSIKATOAN etc. see under "Diacritic" above)
- } elsif (($current_script eq "Khmer")
- && (($char_roman_consonant, $char_roman_vowel) = ($romanized_char =~ /^(.*[bcdfghjklmnpqrstvwxyz])([ao]+)-$/i))) {
- my $romanized_next_char = $this->romanize_char_at_position($i+1, $lang_code, $output_style, *ht, *chart_ht);
- if (($next_script eq $current_script)
- && ($romanized_next_char =~ /^[aeiouy]/i)) {
- $this->add_node($char_roman_consonant, $i, $i+1, *chart_ht, "", "khmer-vowel-drop");
- } else {
- $this->add_node("$char_roman_consonant$char_roman_vowel", $i, $i+1, *chart_ht, "", "khmer-standard-unicode-based-romanization");
- }
-
- # Abudiga add default vowel
- } elsif ((@abudiga_default_vowels = sort keys %{$ht{SCRIPT_ABUDIGA_DEFAULT_VOWEL}->{$current_script}})
- && ($abudiga_default_vowel = $abudiga_default_vowels[0])
- && ($romanized_char =~ /^[bcdfghjklmnpqrstvwxyz]+$/i)) {
- my $new_roman = join("", $romanized_char, $abudiga_default_vowel);
- $this->add_node($new_roman, $i, $i+1, *chart_ht, "", "standard-unicode-based-romanization-plus-abudiga-default-vowel");
- # print STDERR " Abudiga add default vowel l.$line_number/$i: $new_roman\n" if $line_number =~ /^[12]$/;
-
- # Standard romanization
- } else {
- $node_id = $this->add_node($romanized_char, $i, $i+1, *chart_ht, "", "standard-unicode-based-romanization");
- }
- } else {
- $this->add_node($char, $i, $i+1, *chart_ht, "", "unexpected-original");
- }
- } elsif (defined($romanized_char = $this->romanize_char_at_position($i, $lang_code, $output_style, *ht, *chart_ht))
- && ((length($romanized_char) <= 2)
- || ($ht{UTF_TO_CHAR_ROMANIZATION}->{$char}))) { # or from unicode_overwrite_romanization table
- $romanized_char =~ s/^""$//;
- $this->add_node($romanized_char, $i, $i+1, *chart_ht, "", "romanized-without-character-name");
- } else {
- $this->add_node($char, $i, $i+1, *chart_ht, "", "unexpected-original-without-character-name");
- }
- }
- $i = $next_index;
- }
-
- $this->schwa_deletion(0, $n_characters, *chart_ht, $lang_code);
- $this->default_vowelize_tibetan(0, $n_characters, *chart_ht, $lang_code, $line_number) if $chart_ht{CHART_CONTAINS_SCRIPT}->{"Tibetan"};
- $this->assemble_numbers_in_chart(*chart_ht, $line_number);
-
- if ($return_chart_p) {
- } elsif ($return_offset_mappings_p) {
- ($result, $offset_mappings, $new_char_offset, $new_rom_char_offset) = $this->best_romanized_string(0, $n_characters, *chart_ht, $control, $initial_char_offset, $initial_rom_char_offset);
- } else {
- $result = $this->best_romanized_string(0, $n_characters, *chart_ht) unless $return_chart_p;
- }
-
- if ($verbosePM) {
- my $logfile = "/nfs/isd/ulf/cgi-mt/amr-tmp/uroman-log.txt";
- $util->append_to_file($logfile, $log) if $log && (-r $logfile);
- }
-
- return ($result, $offset_mappings) if $return_offset_mappings_p;
- return *chart_ht if $return_chart_p;
- return $result;
-}
-
-sub string_to_json_string {
- local($this, $s) = @_;
-
- utf8::decode($s);
- my $j = JSON->new->utf8->encode([$s]);
- $j =~ s/^\[(.*)\]$/$1/;
- return $j;
-}
-
-sub chart_to_json_romanization_elements {
- local($this, $chart_start, $chart_end, *chart_ht, $line_number) = @_;
-
- my $result = "";
- my $start = $chart_start;
- my $end;
- while ($start < $chart_end) {
- $end = $this->find_end_of_rom_segment($start, $chart_end, *chart_ht);
- my @best_romanizations;
- if (($end && ($start < $end))
- && (@best_romanizations = $this->best_romanizations($start, $end, *chart_ht))) {
- $orig_segment = $this->orig_string_at_span($start, $end, *chart_ht);
- $next_start = $end;
- } else {
- $orig_segment = $chart_ht{ORIG_CHAR}->{$start};
- @best_romanizations = ($orig);
- $next_start = $start + 1;
- }
- $exclusive_end = $end - 1;
- # $guarded_orig = $util->string_guard($orig_segment);
- $guarded_orig = $this->string_to_json_string($orig_segment);
- $result .= " { \"line\": $line_number, \"start\": $start, \"end\": $exclusive_end, \"orig\": $guarded_orig, \"roms\": [";
- foreach $i ((0 .. $#best_romanizations)) {
- my $rom = $best_romanizations[$i];
- # my $guarded_rom = $util->string_guard($rom);
- my $guarded_rom = $this->string_to_json_string($rom);
- $result .= " { \"rom\": $guarded_rom";
- # $result .= ", \"alt\": true" if $i >= 1;
- $result .= " }";
- $result .= "," if $i < $#best_romanizations;
- }
- $result .= " ] },\n";
- $start = $next_start;
- }
- return $result;
-}
-
-sub default_vowelize_tibetan {
- local($this, $chart_start, $chart_end, *chart_ht, $lang_code, $line_number) = @_;
-
- # my $verbose = ($line_number == 103);
- # print STDERR "\nStart default_vowelize_tibetan l.$line_number $chart_start-$chart_end\n" if $verbose;
- my $token_start = $chart_start;
- my $next_token_start = $chart_start;
- while (($token_start = $next_token_start) < $chart_end) {
- $next_token_start = $token_start + 1;
-
- next unless $chart_ht{CHAR_LETTER_PLUS}->{$token_start};
- my $current_script = $chart_ht{CHAR_SCRIPT}->{$token_start};
- next unless ($current_script eq "Tibetan");
- my $token_end = $chart_ht{LETTER_TOKEN_SEGMENT_START_TO_END}->{$token_start};
- next unless $token_end;
- next unless $token_end > $token_start;
- $next_token_start = $token_end;
-
- my $start = $token_start;
- my $end;
- my @node_ids = ();
- while ($start < $token_end) {
- $end = $this->find_end_of_rom_segment($start, $chart_end, *chart_ht);
- last unless $end && ($end > $start);
- my @alt_node_ids = sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}};
- last unless @alt_node_ids;
- push(@node_ids, $alt_node_ids[0]);
- $start = $end;
- }
- my $contains_vowel_p = 0;
- my @romanizations = ();
- foreach $node_id (@node_ids) {
- my $roman = $chart_ht{NODE_ROMAN}->{$node_id};
- $roman = "" unless defined($roman);
- push(@romanizations, $roman);
- $contains_vowel_p = 1 if $roman =~ /[aeiou]/i;
- }
- # print STDERR " old: $token_start-$token_end @romanizations\n" if $verbose;
- unless ($contains_vowel_p) {
- my $default_vowel_target_index;
- if ($#node_ids <= 1) {
- $default_vowel_target_index = 0;
- } elsif ($romanizations[$#romanizations] eq "s") {
- if ($romanizations[($#romanizations-1)] eq "y") {
- $default_vowel_target_index = $#romanizations-1;
- } else {
- $default_vowel_target_index = $#romanizations-2;
- }
- } else {
- $default_vowel_target_index = $#romanizations-1;
- }
- $romanizations[$default_vowel_target_index] .= "a";
- my $old_node_id = $node_ids[$default_vowel_target_index];
- my $old_start = $chart_ht{NODE_START}->{$old_node_id};
- my $old_end = $chart_ht{NODE_END}->{$old_node_id};
- my $old_roman = $chart_ht{NODE_ROMAN}->{$old_node_id};
- my $new_roman = $old_roman . "a";
- my $new_node_id = $this->add_node($new_roman, $old_start, $old_end, *chart_ht, "", "tibetan-default-vowel");
- $this->copy_slot_values($old_node_id, $new_node_id, *chart_id, "all");
- $chart_ht{NODE_TYPE}->{$old_node_id} = "backup"; # keep, but demote
- }
- if (($romanizations[0] eq "'")
- && ($#romanizations >= 1)
- && ($romanizations[1] =~ /^[o]$/)) {
- my $old_node_id = $node_ids[0];
- my $old_start = $chart_ht{NODE_START}->{$old_node_id};
- my $old_end = $chart_ht{NODE_END}->{$old_node_id};
- my $new_node_id = $this->add_node("", $old_start, $old_end, *chart_ht, "", "tibetan-delete-apostrophe");
- $this->copy_slot_values($old_node_id, $new_node_id, *chart_id, "all");
- $chart_ht{NODE_TYPE}->{$old_node_id} = "alt"; # keep, but demote
- }
- if (($#node_ids >= 1)
- && ($romanizations[$#romanizations] =~ /^[bcdfghjklmnpqrstvwxz]+y$/)) {
- my $old_node_id = $node_ids[$#romanizations];
- my $old_start = $chart_ht{NODE_START}->{$old_node_id};
- my $old_end = $chart_ht{NODE_END}->{$old_node_id};
- my $old_roman = $chart_ht{NODE_ROMAN}->{$old_node_id};
- my $new_roman = $old_roman . "a";
- my $new_node_id = $this->add_node($new_roman, $old_start, $old_end, *chart_ht, "", "tibetan-syllable-final-vowel");
- $this->copy_slot_values($old_node_id, $new_node_id, *chart_id, "all");
- $chart_ht{NODE_TYPE}->{$old_node_id} = "alt"; # keep, but demote
- }
- foreach $old_node_id (@node_ids) {
- my $old_roman = $chart_ht{NODE_ROMAN}->{$old_node_id};
- next unless $old_roman =~ /-a/;
- my $old_start = $chart_ht{NODE_START}->{$old_node_id};
- my $old_end = $chart_ht{NODE_END}->{$old_node_id};
- my $new_roman = $old_roman;
- $new_roman =~ s/-a/a/;
- my $new_node_id = $this->add_node($new_roman, $old_start, $old_end, *chart_ht, "", "tibetan-syllable-delete-dash");
- $this->copy_slot_values($old_node_id, $new_node_id, *chart_id, "all");
- $chart_ht{NODE_TYPE}->{$old_node_id} = "alt"; # keep, but demote
- }
- }
-}
-
-sub schwa_deletion {
- local($this, $chart_start, $chart_end, *chart_ht, $lang_code) = @_;
- # delete word-final simple "a" in Devanagari (e.g. nepaala -> nepaal)
- # see Wikipedia article "Schwa deletion in Indo-Aryan languages"
-
- if ($chart_ht{CHART_CONTAINS_SCRIPT}->{"Devanagari"}) {
- my $script_start = $chart_start;
- my $next_script_start = $chart_start;
- while (($script_start = $next_script_start) < $chart_end) {
- $next_script_start = $script_start + 1;
-
- my $current_script = $chart_ht{CHAR_SCRIPT}->{$script_start};
- next unless ($current_script eq "Devanagari");
- my $script_end = $chart_ht{SCRIPT_SEGMENT_START_TO_END}->{$script_start};
- next unless $script_end;
- next unless $script_end - $script_start >= 2;
- $next_script_start = $script_end;
- my $end_node_id = $this->get_node_for_span($script_end-1, $script_end, *chart_ht);
- next unless $end_node_id;
- my $end_roman = $chart_ht{NODE_ROMAN}->{$end_node_id};
- next unless ($end_consonant) = ($end_roman =~ /^([bcdfghjklmnpqrstvwxz]+)a$/i);
- my $prev_node_id = $this->get_node_for_span($script_end-4, $script_end-1, *chart_ht)
- || $this->get_node_for_span($script_end-3, $script_end-1, *chart_ht)
- || $this->get_node_for_span($script_end-2, $script_end-1, *chart_ht);
- next unless $prev_node_id;
- my $prev_roman = $chart_ht{NODE_ROMAN}->{$prev_node_id};
- next unless $prev_roman =~ /[aeiou]/i;
- # TO DO: check further back for vowel (e.g. if $prev_roman eq "r" due to vowel cancelation)
-
- $chart_ht{NODE_TYPE}->{$end_node_id} = "alt"; # keep, but demote
- # print STDERR "* Schwa deletion " . ($script_end-1) . "-$script_end $end_roman->$end_consonant\n";
- $this->add_node($end_consonant, $script_end-1, $script_end, *chart_ht, "", "devanagari-with-deleted-final-schwa");
- }
- }
-}
-
-sub best_romanized_string {
- local($this, $chart_start, $chart_end, *chart_ht, $control, $orig_char_offset, $rom_char_offset) = @_;
-
- $control = "" unless defined($control);
- my $current_orig_char_offset = $orig_char_offset || 0;
- my $current_rom_char_offset = $rom_char_offset || 0;
- my $return_offset_mappings_p = ($control =~ /\breturn offset mappings\b/);
- my $result = "";
- my $start = $chart_start;
- my $end;
- my @char_offsets = ("$current_orig_char_offset:$current_rom_char_offset");
- while ($start < $chart_end) {
- $end = $this->find_end_of_rom_segment($start, $chart_end, *chart_ht);
- my $n_orig_chars_in_segment = 0;
- my $n_rom_chars_in_segment = 0;
- if ($end && ($start < $end)) {
- my @best_romanizations = $this->best_romanizations($start, $end, *chart_ht);
- my $best_romanization = (@best_romanizations) ? $best_romanizations[0] : undef;
- if (defined($best_romanization)) {
- $result .= $best_romanization;
- if ($return_offset_mappings_p) {
- $n_orig_chars_in_segment = $end-$start;
- $n_rom_chars_in_segment = $utf8->length_in_utf8_chars($best_romanization);
- }
- $start = $end;
- } else {
- my $best_romanization = $chart_ht{ORIG_CHAR}->{$start};
- $result .= $best_romanization;
- $start++;
- if ($return_offset_mappings_p) {
- $n_orig_chars_in_segment = 1;
- $n_rom_chars_in_segment = $utf8->length_in_utf8_chars($best_romanization);
- }
- }
- } else {
- my $best_romanization = $chart_ht{ORIG_CHAR}->{$start};
- $result .= $best_romanization;
- $start++;
- if ($return_offset_mappings_p) {
- $n_orig_chars_in_segment = 1;
- $n_rom_chars_in_segment = $utf8->length_in_utf8_chars($best_romanization);
- }
- }
- if ($return_offset_mappings_p) {
- my $new_orig_char_offset = $current_orig_char_offset + $n_orig_chars_in_segment;
- my $new_rom_char_offset = $current_rom_char_offset + $n_rom_chars_in_segment;
- my $offset_mapping = "$new_orig_char_offset:$new_rom_char_offset";
- push(@char_offsets, $offset_mapping);
- $current_orig_char_offset = $new_orig_char_offset;
- $current_rom_char_offset = $new_rom_char_offset;
- }
- }
- return ($result, join(",", @char_offsets), $current_orig_char_offset, $current_rom_char_offset) if $return_offset_mappings_p;
- return $result;
-}
-
-sub orig_string_at_span {
- local($this, $start, $end, *chart_ht) = @_;
-
- my $result = "";
- foreach $i (($start .. ($end-1))) {
- $result .= $chart_ht{ORIG_CHAR}->{$i};
- }
- return $result;
-}
-
-sub find_end_of_rom_segment {
- local($this, $start, $chart_end, *chart_ht) = @_;
-
- my @ends = sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}};
- my $end_index = $#ends;
- while (($end_index >= 0) && ($ends[$end_index] > $chart_end)) {
- $end_index--;
- }
- if (($end_index >= 0)
- && defined($end = $ends[$end_index])
- && ($start < $end)) {
- return $end;
- } else {
- return "";
- }
-}
-
-sub best_romanizations {
- local($this, $start, $end, *chart_ht) = @_;
-
- @regular_romanizations = ();
- @alt_romanizations = ();
- @backup_romanizations = ();
-
- foreach $node_id (sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}}) {
- my $type = $chart_ht{NODE_TYPE}->{$node_id};
- my $roman = $chart_ht{NODE_ROMAN}->{$node_id};
- if (! defined($roman)) {
- # ignore
- } elsif (($type eq "backup") && ! defined($backup_romanization)) {
- push(@backup_romanizations, $roman) unless $util->member($roman, @backup_romanizations);
- } elsif (($type eq "alt") && ! defined($alt_romanization)) {
- push(@alt_romanizations, $roman) unless $util->member($roman, @alt_romanizations);
- } else {
- push(@regular_romanizations, $roman) unless $util->member($roman, @regular_romanizations);
- }
- }
- @regular_alt_romanizations = sort @regular_romanizations;
- foreach $alt_romanization (sort @alt_romanizations) {
- push(@regular_alt_romanizations, $alt_romanization) unless $util->member($alt_romanization, @regular_alt_romanizations);
- }
- return @regular_alt_romanizations if @regular_alt_romanizations;
- return sort @backup_romanizations;
-}
-
-sub join_alt_romanizations_for_viz {
- local($this, @list) = @_;
-
- my @viz_romanizations = ();
-
- foreach $alt_rom (@list) {
- if ($alt_rom eq "") {
- push(@viz_romanizations, "-");
- } else {
- push(@viz_romanizations, $alt_rom);
- }
- }
- return join(", ", @viz_romanizations);
-}
-
-sub markup_orig_rom_strings {
- local($this, $chart_start, $chart_end, *ht, *chart_ht, *pinyin_ht, $last_group_id_index) = @_;
-
- my $marked_up_rom = "";
- my $marked_up_orig = "";
- my $start = $chart_start;
- my $end;
- while ($start < $chart_end) {
- my $segment_start = $start;
- my $segment_end = $start+1;
- my $end = $this->find_end_of_rom_segment($start, $chart_end, *chart_ht);
- my $rom_segment = "";
- my $orig_segment = "";
- my $rom_title = "";
- my $orig_title = "";
- my $contains_alt_romanizations = 0;
- if ($end) {
- $segment_end = $end;
- my @best_romanizations = $this->best_romanizations($start, $end, *chart_ht);
- my $best_romanization = (@best_romanizations) ? $best_romanizations[0] : undef;
- if (defined($best_romanization)) {
- $rom_segment .= $best_romanization;
- $orig_segment .= $this->orig_string_at_span($start, $end, *chart_ht);
- $segment_end = $end;
- if ($#best_romanizations >= 1) {
- $rom_title .= $util->guard_html("Alternative romanizations: " . $this->join_alt_romanizations_for_viz(@best_romanizations) . "\n");
- $contains_alt_romanizations = 1;
- }
- } else {
- my $segment = $this->orig_string_at_span($start, $start+1, *chart_ht);
- $rom_segment .= $segment;
- $orig_segment .= $segment;
- $segment_end = $start+1;
- }
- $start = $segment_end;
- } else {
- $rom_segment .= $chart_ht{ORIG_CHAR}->{$start};
- $orig_segment .= $this->orig_string_at_span($start, $start+1, *chart_ht);
- $segment_end = $start+1;
- $start = $segment_end;
- }
- my $next_char = $chart_ht{ORIG_CHAR}->{$segment_end};
- my $next_char_is_combining_p = $this->char_is_combining_char($next_char, *ht);
- while ($next_char_is_combining_p
- && ($segment_end < $chart_end)
- && ($end = $this->find_end_of_rom_segment($segment_end, $chart_end, *chart_ht))
- && ($end > $segment_end)
- && (@best_romanizations = $this->best_romanizations($segment_end, $end, *chart_ht))
- && defined($best_romanization = $best_romanizations[0])) {
- $orig_segment .= $this->orig_string_at_span($segment_end, $end, *chart_ht);
- $rom_segment .= $best_romanization;
- if ($#best_romanizations >= 1) {
- $rom_title .= $util->guard_html("Alternative romanizations: " . $this->join_alt_romanizations_for_viz(@best_romanizations) . "\n");
- $contains_alt_romanizations = 1;
- }
- $segment_end = $end;
- $start = $segment_end;
- $next_char = $chart_ht{ORIG_CHAR}->{$segment_end};
- $next_char_is_combining_p = $this->char_is_combining_char($next_char, *ht);
- }
- foreach $i (($segment_start .. ($segment_end-1))) {
- $orig_title .= "+ " unless $orig_title eq "";
- my $char = $chart_ht{ORIG_CHAR}->{$i};
- my $numeric = $ht{UTF_TO_NUMERIC}->{$char};
- $numeric = "" unless defined($numeric);
- my $pic_descr = $ht{UTF_TO_PICTURE_DESCR}->{$char};
- $pic_descr = "" unless defined($pic_descr);
- if ($char =~ /^\xE4\xB7[\x80-\xBF]$/) {
- $orig_title .= "$char_name\n";
- } elsif (($char =~ /^[\xE3-\xE9][\x80-\xBF]{2,2}$/) && $chinesePM->string_contains_utf8_cjk_unified_ideograph_p($char)) {
- my $unicode = $utf8->utf8_to_unicode($char);
- $orig_title .= "CJK Unified Ideograph U+" . (uc sprintf("%04x", $unicode)) . "\n";
- $orig_title .= "Chinese: $tonal_translit\n" if $tonal_translit = $chinesePM->tonal_pinyin($char, *pinyin_ht, "");
- $orig_title .= "Number: $numeric\n" if $numeric =~ /\d/;
- } elsif ($char_name = $ht{UTF_TO_CHAR_NAME}->{$char}) {
- $orig_title .= "$char_name\n";
- $orig_title .= "Number: $numeric\n" if $numeric =~ /\d/;
- $orig_title .= "Picture: $pic_descr\n" if $pic_descr =~ /\S/;
- } else {
- my $unicode = $utf8->utf8_to_unicode($char);
- if (($unicode >= 0xAC00) && ($unicode <= 0xD7A3)) {
- $orig_title .= "Hangul syllable U+" . (uc sprintf("%04x", $unicode)) . "\n";
- } else {
- $orig_title .= "Unicode character U+" . (uc sprintf("%04x", $unicode)) . "\n";
- }
- }
- }
- (@non_ascii_roms) = ($rom_segment =~ /([\xC0-\xFF][\x80-\xBF]*)/g);
- foreach $char (@non_ascii_roms) {
- my $char_name = $ht{UTF_TO_CHAR_NAME}->{$char};
- my $unicode = $utf8->utf8_to_unicode($char);
- my $unicode_s = "U+" . (uc sprintf("%04x", $unicode));
- if ($char_name) {
- $rom_title .= "$char_name\n";
- } else {
- $rom_title .= "$unicode_s\n";
- }
- }
- $last_group_id_index++;
- $rom_title =~ s/\s*$//;
- $rom_title =~ s/\n/
/g;
- $orig_title =~ s/\s*$//;
- $orig_title =~ s/\n/
/g;
- $orig_title = "" . $orig_title . "";
- my $rom_title_clause = ($rom_title eq "") ? "" : " title=\"$rom_title\"";
- my $orig_title_clause = ($orig_title eq "") ? "" : " title=\"$orig_title\"";
- my $alt_rom_clause = ($contains_alt_romanizations) ? "border-bottom:1px dotted;" : "";
- $marked_up_rom .= "" . $util->guard_html($rom_segment) . "<\/span>";
- $marked_up_orig .= "" . $util->guard_html($orig_segment) . "<\/span>";
- if (($last_char = $chart_ht{ORIG_CHAR}->{($segment_end-1)})
- && ($last_char_name = $ht{UTF_TO_CHAR_NAME}->{$last_char})
- && ($last_char_name =~ /^(FULLWIDTH COLON|FULLWIDTH COMMA|FULLWIDTH RIGHT PARENTHESIS|IDEOGRAPHIC COMMA|IDEOGRAPHIC FULL STOP|RIGHT CORNER BRACKET|BRAILLE PATTERN BLANK|TIBETAN MARK .*)$/)) {
- $marked_up_orig .= "";
- $marked_up_rom .= "";
- }
- }
- return ($marked_up_rom, $marked_up_orig, $last_group_id_index);
-}
-
-sub romanizations_with_alternatives {
- local($this, *ht, *chart_ht, *pinyin_ht, $chart_start, $chart_end) = @_;
-
- $chart_start = 0 unless defined($chart_start);
- $chart_end = $chart_ht{N_CHARS} unless defined($chart_end);
- my $result = "";
- my $start = $chart_start;
- my $end;
- # print STDOUT "romanizations_with_alternatives $chart_start-$chart_end\n";
- while ($start < $chart_end) {
- my $segment_start = $start;
- my $segment_end = $start+1;
- my $end = $this->find_end_of_rom_segment($start, $chart_end, *chart_ht);
- my $rom_segment = "";
- # print STDOUT " $start-$end\n";
- if ($end) {
- $segment_end = $end;
- my @best_romanizations = $this->best_romanizations($start, $end, *chart_ht);
- # print STDOUT " $start-$end @best_romanizations\n";
- if (@best_romanizations) {
- if ($#best_romanizations == 0) {
- $rom_segment .= $best_romanizations[0];
- } else {
- $rom_segment .= "{" . join("|", @best_romanizations) . "}";
- }
- $segment_end = $end;
- } else {
- my $segment = $this->orig_string_at_span($start, $start+1, *chart_ht);
- $rom_segment .= $segment;
- $segment_end = $start+1;
- }
- $start = $segment_end;
- } else {
- $rom_segment .= $chart_ht{ORIG_CHAR}->{$start};
- $segment_end = $start+1;
- $start = $segment_end;
- }
- # print STDOUT " $start-$end ** $rom_segment\n";
- $result .= $rom_segment;
- }
- return $result;
-}
-
-sub quick_romanize {
- local($this, $s, $lang_code, *ht) = @_;
-
- my $result = "";
- my @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht);
- while (@chars) {
- my $found_match_in_table_p = 0;
- foreach $string_length (reverse(1..4)) {
- next if ($string_length-1) > $#chars;
- $multi_char_substring = join("", @chars[0..($string_length-1)]);
- my @mappings = keys %{$ht{UTF_CHAR_MAPPING_LANG_SPEC}->{$lang_code}->{$multi_char_substring}};
- @mappings = keys %{$ht{UTF_CHAR_MAPPING}->{$multi_char_substring}} unless @mappings;
- if (@mappings) {
- my $mapping = $mappings[0];
- $result .= $mapping;
- foreach $_ ((1 .. $string_length)) {
- shift @chars;
- }
- $found_match_in_table_p = 1;
- last;
- }
- }
- unless ($found_match_in_table_p) {
- $result .= $chars[0];
- shift @chars;
- }
- }
- return $result;
-}
-
-sub char_is_combining_char {
- local($this, $c, *ht) = @_;
-
- return 0 unless $c;
- my $category = $ht{UTF_TO_CAT}->{$c};
- return 0 unless $category;
- return $category =~ /^M/;
-}
-
-sub mark_up_string_for_mouse_over {
- local($this, $s, *ht, $control, *pinyin_ht) = @_;
-
- $control = "" unless defined($control);
- $no_ascii_p = ($control =~ /NO-ASCII/);
- my $result = "";
- @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht);
- while (@chars) {
- $char = shift @chars;
- $numeric = $ht{UTF_TO_NUMERIC}->{$char};
- $numeric = "" unless defined($numeric);
- $pic_descr = $ht{UTF_TO_PICTURE_DESCR}->{$char};
- $pic_descr = "" unless defined($pic_descr);
- $next_char = ($#chars >= 0) ? $chars[0] : "";
- $next_char_is_combining_p = $this->char_is_combining_char($next_char, *ht);
- if ($no_ascii_p
- && ($char =~ /^[\x00-\x7F]*$/)
- && ! $next_char_is_combining_p) {
- $result .= $util->guard_html($char);
- } elsif (($char =~ /^[\xE3-\xE9][\x80-\xBF]{2,2}$/) && $chinesePM->string_contains_utf8_cjk_unified_ideograph_p($char)) {
- $unicode = $utf8->utf8_to_unicode($char);
- $title = "CJK Unified Ideograph U+" . (uc sprintf("%04x", $unicode));
- $title .= "
Chinese: $tonal_translit" if $tonal_translit = $chinesePM->tonal_pinyin($char, *pinyin_ht, "");
- $title .= "
Number: $numeric" if $numeric =~ /\d/;
- $result .= "" . $util->guard_html($char) . "<\/span>";
- } elsif ($char_name = $ht{UTF_TO_CHAR_NAME}->{$char}) {
- $title = $char_name;
- $title .= "
Number: $numeric" if $numeric =~ /\d/;
- $title .= "
Picture: $pic_descr" if $pic_descr =~ /\S/;
- $char_plus = $char;
- while ($next_char_is_combining_p) {
- # combining marks (Mc:non-spacing, Mc:spacing combining, Me: enclosing)
- $next_char_name = $ht{UTF_TO_CHAR_NAME}->{$next_char};
- $title .= "
+ $next_char_name";
- $char = shift @chars;
- $char_plus .= $char;
- $next_char = ($#chars >= 0) ? $chars[0] : "";
- $next_char_is_combining_p = $this->char_is_combining_char($next_char, *ht);
- }
- $result .= "" . $util->guard_html($char_plus) . "<\/span>";
- $result .= "" if $char_name =~ /^(FULLWIDTH COLON|FULLWIDTH COMMA|FULLWIDTH RIGHT PARENTHESIS|IDEOGRAPHIC COMMA|IDEOGRAPHIC FULL STOP|RIGHT CORNER BRACKET)$/;
- } elsif (($unicode = $utf8->utf8_to_unicode($char))
- && ($unicode >= 0xAC00) && ($unicode <= 0xD7A3)) {
- $title = "Hangul syllable U+" . (uc sprintf("%04x", $unicode));
- $result .= "" . $util->guard_html($char) . "<\/span>";
- } else {
- $result .= $util->guard_html($char);
- }
- }
- return $result;
-}
-
-sub romanize_char_at_position_incl_multi {
- local($this, $i, $lang_code, $output_style, *ht, *chart_ht) = @_;
-
- my $char = $chart_ht{ORIG_CHAR}->{$i};
- return "" unless defined($char);
- my @mappings = keys %{$ht{UTF_CHAR_MAPPING_LANG_SPEC}->{$lang_code}->{$char}};
- return $mappings[0] if @mappings;
- @mappings = keys %{$ht{UTF_CHAR_MAPPING}->{$char}};
- return $mappings[0] if @mappings;
- return $this->romanize_char_at_position($i, $lang_code, $output_style, *ht, *chart_ht);
-}
-
-sub romanize_char_at_position {
- local($this, $i, $lang_code, $output_style, *ht, *chart_ht) = @_;
-
- my $char = $chart_ht{ORIG_CHAR}->{$i};
- return "" unless defined($char);
- return $char if $char =~ /^[\x00-\x7F]$/; # ASCII
- my $romanization = $ht{UTF_TO_CHAR_ROMANIZATION}->{$char};
- return $romanization if $romanization;
- my $char_name = $chart_ht{CHAR_NAME}->{$i};
- $romanization = $this->romanize_charname($char_name, $lang_code, $output_style, *ht, $char);
- $ht{SUSPICIOUS_ROMANIZATION}->{$char_name}->{$romanization}
- = ($ht{SUSPICIOUS_ROMANIZATION}->{$char_name}->{$romanization} || 0) + 1
- unless (length($romanization) < 4)
- || ($romanization =~ /\s/)
- || ($romanization =~ /^[bcdfghjklmnpqrstvwxyz]{2,3}[aeiou]-$/) # Khmer ngo-/nyo-/pho- OK
- || ($romanization =~ /^[bcdfghjklmnpqrstvwxyz]{2,2}[aeiougw][aeiou]{1,2}$/) # Canadian, Ethiopic syllable OK
- || ($romanization =~ /^(allah|bbux|nyaa|nnya|quuv|rrep|shch|shur|syrx)$/i) # Arabic; Yi; Ethiopic syllable nyaa; Cyrillic letter shcha
- || (($char_name =~ /^(YI SYLLABLE|VAI SYLLABLE|ETHIOPIC SYLLABLE|CANADIAN SYLLABICS|CANADIAN SYLLABICS CARRIER)\s+(\S+)$/) && (length($romanization) <= 5));
- # print STDERR "romanize_char_at_position $i $char_name :: $romanization\n" if $char_name =~ /middle/i;
- return $romanization;
-}
-
-sub romanize_charname {
- local($this, $char_name, $lang_code, $output_style, *ht, $char) = @_;
-
- my $cached_result = $ht{ROMANIZE_CHARNAME}->{$char_name}->{$lang_code}->{$output_style};
- # print STDERR "(C) romanize_charname($char_name): $cached_result\n" if $cached_result && ($char_name =~ /middle/i);
- return $cached_result if defined($cashed_result);
- $orig_char_name = $char_name;
- $char_name =~ s/^.* LETTER\s+([A-Z]+)-\d+$/$1/; # HENTAIGANA LETTER A-3
- $char_name =~ s/^.* LETTER\s+//;
- $char_name =~ s/^.* SYLLABLE\s+B\d\d\d\s+//; # Linear B syllables
- $char_name =~ s/^.* SYLLABLE\s+//;
- $char_name =~ s/^.* SYLLABICS\s+//;
- $char_name =~ s/^.* LIGATURE\s+//;
- $char_name =~ s/^.* VOWEL SIGN\s+//;
- $char_name =~ s/^.* CONSONANT SIGN\s+//;
- $char_name =~ s/^.* CONSONANT\s+//;
- $char_name =~ s/^.* VOWEL\s+//;
- $char_name =~ s/ WITH .*$//;
- $char_name =~ s/ WITHOUT .*$//;
- $char_name =~ s/\s+(ABOVE|AGUNG|BAR|BARREE|BELOW|CEDILLA|CEREK|DIGRAPH|DOACHASHMEE|FINAL FORM|GHUNNA|GOAL|INITIAL FORM|ISOLATED FORM|KAWI|LELET|LELET RASWADI|LONSUM|MAHAPRANA|MEDIAL FORM|MURDA|MURDA MAHAPRANA|REVERSED|ROTUNDA|SASAK|SUNG|TAM|TEDUNG|TYPE ONE|TYPE TWO|WOLOSO)\s*$//;
- $char_name =~ s/^([A-Z]+)\d+$/$1/; # Linear B syllables etc.
- foreach $_ ((1 .. 3)) {
- $char_name =~ s/^.*\b(?:ABKHASIAN|ACADEMY|AFRICAN|AIVILIK|AITON|AKHMIMIC|ALEUT|ALI GALI|ALPAPRAANA|ALTERNATE|ALTERNATIVE|AMBA|ARABIC|ARCHAIC|ASPIRATED|ATHAPASCAN|BASELINE|BLACKLETTER|BARRED|BASHKIR|BERBER|BHATTIPROLU|BIBLE-CREE|BIG|BINOCULAR|BLACKFOOT|BLENDED|BOTTOM|BROAD|BROKEN|CANDRA|CAPITAL|CARRIER|CHILLU|CLOSE|CLOSED|COPTIC|CROSSED|CRYPTOGRAMMIC|CURLED|CURLY|CYRILLIC|DANTAJA|DENTAL|DIALECT-P|DIAERESIZED|DOTLESS|DOUBLE|DOUBLE-STRUCK|EASTERN PWO KAREN|EGYPTOLOGICAL|FARSI|FINAL|FLATTENED|GLOTTAL|GREAT|GREEK|HALF|HIGH|INITIAL|INSULAR|INVERTED|IOTIFIED|JONA|KANTAJA|KASHMIRI|KHAKASSIAN|KHAMTI|KHANDA|KINNA|KIRGHIZ|KOMI|L-SHAPED|LATINATE|LITTLE|LONG|LONG-LEGGED|LOOPED|LOW|MAHAAPRAANA|MALAYALAM|MANCHU|MANDAILING|MATHEMATICAL|MEDIAL|MIDDLE-WELSH|MON|MONOCULAR|MOOSE-CREE|MULTIOCULAR|MUURDHAJA|N-CREE|NARROW|NASKAPI|NDOLE|NEUTRAL|NIKOLSBURG|NORTHERN|NUBIAN|NUNAVIK|NUNAVUT|OJIBWAY|OLD|OPEN|ORKHON|OVERLONG|PALI|PERSIAN|PHARYNGEAL|PRISHTHAMATRA|R-CREE|REDUPLICATION|REVERSED|ROMANIAN|ROUND|ROUNDED|RUDIMENTA|RUMAI PALAUNG|SANSKRIT|SANYAKA|SARA|SAYISI|SCRIPT|SEBATBEIT|SEMISOFT|SGAW KAREN|SHAN|SHARP|SHWE PALAUNG|SHORT|SIBE|SIDEWAYS|SIMALUNGUN|SMALL|SOGDIAN|SOFT|SOUTH-SLAVEY|SOUTHERN|SPIDERY|STIRRUP|STRAIGHT|STRETCHED|SUBSCRIPT|SWASH|TAI LAING|TAILED|TAILLESS|TAALUJA|TH-CREE|TALL|THREE-LEGGED|TURNED|TODO|TOP|TROKUTASTI|TUAREG|UKRAINIAN|UNBLENDED|VISIGOTHIC|VOCALIC|VOICED|VOICELESS|VOLAPUK|WAVY|WESTERN PWO KAREN|WEST-CREE|WESTERN|WIDE|WOODS-CREE|Y-CREE|YENISEI|YIDDISH)\s+//;
- }
- $char_name =~ s/\s+(ABOVE|AGUNG|BAR|BARREE|BELOW|CEDILLA|CEREK|DIGRAPH|DOACHASHMEE|FINAL FORM|GHUNNA|GOAL|INITIAL FORM|ISOLATED FORM|KAWI|LELET|LELET RASWADI|LONSUM|MAHAPRANA|MEDIAL FORM|MURDA|MURDA MAHAPRANA|REVERSED|ROTUNDA|SASAK|SUNG|TAM|TEDUNG|TYPE ONE|TYPE TWO|WOLOSO)\s*$//;
- if ($char_name =~ /THAI CHARACTER/) {
- $char_name =~ s/^THAI CHARACTER\s+//;
- if ($char =~ /^\xE0\xB8[\x81-\xAE]/) {
- # Thai consonants
- $char_name =~ s/^([^AEIOU]*).*/$1/i;
- } elsif ($char_name =~ /^SARA [AEIOU]/) {
- # Thai vowels
- $char_name =~ s/^SARA\s+//;
- } else {
- $char_name = $char;
- }
- }
- if ($orig_char_name =~ /(HIRAGANA LETTER|KATAKANA LETTER|SYLLABLE|LIGATURE)/) {
- $char_name = lc $char_name;
- } elsif ($char_name =~ /\b(ANUSVARA|ANUSVARAYA|NIKAHIT|SIGN BINDI|TIPPI)\b/) {
- $char_name = "+m";
- } elsif ($char_name =~ /\bSCHWA\b/) {
- $char_name = "e";
- } elsif ($char_name =~ /\bIOTA\b/) {
- $char_name = "i";
- } elsif ($char_name =~ /\s/) {
- } elsif ($orig_char_name =~ /KHMER LETTER/) {
- $char_name .= "-";
- } elsif ($orig_char_name =~ /CHEROKEE LETTER/) {
- # use whole letter as is
- } elsif ($orig_char_name =~ /KHMER INDEPENDENT VOWEL/) {
- $char_name =~ s/q//;
- } elsif ($orig_char_name =~ /LETTER/) {
- $char_name =~ s/^[AEIOU]+([^AEIOU]+)$/$1/i;
- $char_name =~ s/^([^-AEIOUY]+)[AEIOU].*/$1/i;
- $char_name =~ s/^(Y)[AEIOU].*/$1/i if $orig_char_name =~ /\b(?:BENGALI|DEVANAGARI|GURMUKHI|GUJARATI|KANNADA|MALAYALAM|MODI|MYANMAR|ORIYA|TAMIL|TELUGU|TIBETAN)\b.*\bLETTER YA\b/;
- $char_name =~ s/^(Y[AEIOU]+)[^AEIOU].*$/$1/i;
- $char_name =~ s/^([AEIOU]+)[^AEIOU]+[AEIOU].*/$1/i;
- }
-
- my $result = ($orig_char_name =~ /\bCAPITAL\b/) ? (uc $char_name) : (lc $char_name);
- # print STDERR "(R) romanize_charname($orig_char_name): $result\n" if $orig_char_name =~ /middle/i;
- $ht{ROMANIZE_CHARNAME}->{$char_name}->{$lang_code}->{$output_style} = $result;
- return $result;
-}
-
-sub assemble_numbers_in_chart {
- local($this, *chart_ht, $line_number) = @_;
-
- foreach $start (sort { $a <=> $b } keys %{$chart_ht{COMPLEX_NUMERIC_START_END}}) {
- my $end = $chart_ht{COMPLEX_NUMERIC_START_END}->{$start};
- my @numbers = ();
- foreach $i (($start .. ($end-1))) {
- my $orig_char = $chart_ht{ORIG_CHAR}->{$i};
- my $node_id = $this->get_node_for_span_with_slot($i, $i+1, "numeric-value", *chart_id);
- if (defined($node_id)) {
- my $number = $chart_ht{NODE_ROMAN}->{$node_id};
- if (defined($number)) {
- push(@numbers, $number);
- } elsif ($orig_char =~ /^[.,]$/) { # decimal point, comma separator
- push(@numbers, $orig_char);
- } else {
- print STDERR "Found no romanization for node_id $node_id ($i-" . ($i+1) . ") in assemble_numbers_in_chart\n" if $verbosePM;
- }
- } else {
- print STDERR "Found no node_id for span $i-" . ($i+1) . " in assemble_numbers_in_chart\n" if $verbosePM;
- }
- }
- my $complex_number = $this->assemble_number(join("\xC2\xB7", @numbers), $line_number);
- # print STDERR "assemble_numbers_in_chart l.$line_number $start-$end $complex_number (@numbers)\n";
- $this->add_node($complex_number, $start, $end, *chart_ht, "", "complex-number");
- }
-}
-
-sub assemble_number {
- local($this, $s, $line_number) = @_;
- # e.g. 10 9 100 7 10 8 = 1978
-
- my $middot = "\xC2\xB7";
- my @tokens = split(/$middot/, $s); # middle dot U+00B7
- my $i = 0;
- my @orig_tokens = @tokens;
-
- # assemble single digit numbers, e.g. 1 7 5 -> 175
- while ($i < $#tokens) {
- if ($tokens[$i] =~ /^\d$/) {
- my $j = $i+1;
- while (($j <= $#tokens) && ($tokens[$j] =~ /^[0-9.,]$/)) {
- $j++;
- }
- $j--;
- if ($j>$i) {
- my $new_token = join("", @tokens[$i .. $j]);
- $new_token =~ s/,//g;
- splice(@tokens, $i, $j-$i+1, $new_token);
- }
- }
- $i++;
- }
-
- foreach $power ((10, 100, 1000, 10000, 100000, 1000000, 100000000, 1000000000, 1000000000000)) {
- for (my $i=0; $i <= $#tokens; $i++) {
- if ($tokens[$i] == $power) {
- if (($i > 0) && ($tokens[($i-1)] < $power)) {
- splice(@tokens, $i-1, 2, ($tokens[($i-1)] * $tokens[$i]));
- $i--;
- if (($i < $#tokens) && ($tokens[($i+1)] < $power)) {
- splice(@tokens, $i, 2, ($tokens[$i] + $tokens[($i+1)]));
- $i--;
- }
- }
- }
- # 400 30 (e.g. Egyptian)
- my $gen_pattern = $power;
- $gen_pattern =~ s/^1/\[1-9\]/;
- if (($tokens[$i] =~ /^$gen_pattern$/) && ($i < $#tokens) && ($tokens[($i+1)] < $power)) {
- splice(@tokens, $i, 2, ($tokens[$i] + $tokens[($i+1)]));
- $i--;
- }
- }
- last if $#tokens == 0;
- }
- my $result = join($middot, @tokens);
- if ($verbosePM) {
- my $logfile = "/nfs/isd/ulf/cgi-mt/amr-tmp/uroman-number-log.txt";
- $util->append_to_file($logfile, "$s -> $result\n") if -r $logfile;
- # print STDERR " assemble number l.$line_number @orig_tokens -> $result\n" if $line_number == 43;
- }
- return $result;
-}
-
-1;
-
diff --git a/spaces/katanaml-org/sparrow-ml/routers/donut_training.py b/spaces/katanaml-org/sparrow-ml/routers/donut_training.py
deleted file mode 100644
index 91085d82ea34f21f7f2a2117bad8cee378360174..0000000000000000000000000000000000000000
--- a/spaces/katanaml-org/sparrow-ml/routers/donut_training.py
+++ /dev/null
@@ -1,393 +0,0 @@
-# !pip install -q git+https://github.com/huggingface/transformers.git datasets sentencepiece
-# !pip install -q pytorch-lightning==1.9.5 wandb
-
-from config import settings
-from datasets import load_dataset
-from transformers import VisionEncoderDecoderConfig
-from transformers import DonutProcessor, VisionEncoderDecoderModel
-
-import json
-import random
-from typing import Any, List, Tuple
-
-import torch
-from torch.utils.data import Dataset
-
-from torch.utils.data import DataLoader
-
-import re
-from nltk import edit_distance
-import numpy as np
-import os
-import time
-
-import pytorch_lightning as pl
-from functools import lru_cache
-
-from pytorch_lightning.loggers import WandbLogger
-from pytorch_lightning.callbacks import Callback
-from config import settings
-
-added_tokens = []
-
-dataset_name = settings.dataset
-base_config_name = settings.base_config
-base_processor_name = settings.base_processor
-base_model_name = settings.base_model
-model_name = settings.model
-
-@lru_cache(maxsize=1)
-def prepare_job():
- print("Preparing job...")
-
- dataset = load_dataset(dataset_name)
-
- max_length = 768
- image_size = [1280, 960]
-
- # update image_size of the encoder
- # during pre-training, a larger image size was used
- config = VisionEncoderDecoderConfig.from_pretrained(base_config_name)
- config.encoder.image_size = image_size # (height, width)
- # update max_length of the decoder (for generation)
- config.decoder.max_length = max_length
- # TODO we should actually update max_position_embeddings and interpolate the pre-trained ones:
- # https://github.com/clovaai/donut/blob/0acc65a85d140852b8d9928565f0f6b2d98dc088/donut/model.py#L602
-
- processor = DonutProcessor.from_pretrained(base_processor_name)
- model = VisionEncoderDecoderModel.from_pretrained(base_model_name, config=config)
-
- return model, processor, dataset, config, image_size, max_length
-
-
-class DonutDataset(Dataset):
- """
- DonutDataset which is saved in huggingface datasets format. (see details in https://huggingface.co/docs/datasets)
- Each row, consists of image path(png/jpg/jpeg) and gt data (json/jsonl/txt),
- and it will be converted into input_tensor(vectorized image) and input_ids(tokenized string).
- Args:
- dataset_name_or_path: name of dataset (available at huggingface.co/datasets) or the path containing image files and metadata.jsonl
- max_length: the max number of tokens for the target sequences
- split: whether to load "train", "validation" or "test" split
- ignore_id: ignore_index for torch.nn.CrossEntropyLoss
- task_start_token: the special token to be fed to the decoder to conduct the target task
- prompt_end_token: the special token at the end of the sequences
- sort_json_key: whether or not to sort the JSON keys
- """
-
- def __init__(
- self,
- dataset_name_or_path: str,
- max_length: int,
- split: str = "train",
- ignore_id: int = -100,
- task_start_token: str = "",
- prompt_end_token: str = None,
- sort_json_key: bool = True,
- ):
- super().__init__()
-
- model, processor, dataset, config, image_size, p1 = prepare_job()
-
- self.max_length = max_length
- self.split = split
- self.ignore_id = ignore_id
- self.task_start_token = task_start_token
- self.prompt_end_token = prompt_end_token if prompt_end_token else task_start_token
- self.sort_json_key = sort_json_key
-
- self.dataset = load_dataset(dataset_name_or_path, split=self.split)
- self.dataset_length = len(self.dataset)
-
- self.gt_token_sequences = []
- for sample in self.dataset:
- ground_truth = json.loads(sample["ground_truth"])
- if "gt_parses" in ground_truth: # when multiple ground truths are available, e.g., docvqa
- assert isinstance(ground_truth["gt_parses"], list)
- gt_jsons = ground_truth["gt_parses"]
- else:
- assert "gt_parse" in ground_truth and isinstance(ground_truth["gt_parse"], dict)
- gt_jsons = [ground_truth["gt_parse"]]
-
- self.gt_token_sequences.append(
- [
- self.json2token(
- gt_json,
- update_special_tokens_for_json_key=self.split == "train",
- sort_json_key=self.sort_json_key,
- )
- + processor.tokenizer.eos_token
- for gt_json in gt_jsons # load json from list of json
- ]
- )
-
- self.add_tokens([self.task_start_token, self.prompt_end_token])
- self.prompt_end_token_id = processor.tokenizer.convert_tokens_to_ids(self.prompt_end_token)
-
- def json2token(self, obj: Any, update_special_tokens_for_json_key: bool = True, sort_json_key: bool = True):
- """
- Convert an ordered JSON object into a token sequence
- """
- if type(obj) == dict:
- if len(obj) == 1 and "text_sequence" in obj:
- return obj["text_sequence"]
- else:
- output = ""
- if sort_json_key:
- keys = sorted(obj.keys(), reverse=True)
- else:
- keys = obj.keys()
- for k in keys:
- if update_special_tokens_for_json_key:
- self.add_tokens([fr"", fr""])
- output += (
- fr""
- + self.json2token(obj[k], update_special_tokens_for_json_key, sort_json_key)
- + fr""
- )
- return output
- elif type(obj) == list:
- return r"".join(
- [self.json2token(item, update_special_tokens_for_json_key, sort_json_key) for item in obj]
- )
- else:
- obj = str(obj)
- if f"<{obj}/>" in added_tokens:
- obj = f"<{obj}/>" # for categorical special tokens
- return obj
-
- def add_tokens(self, list_of_tokens: List[str]):
- """
- Add special tokens to tokenizer and resize the token embeddings of the decoder
- """
- model, processor, dataset, config, image_size, p1 = prepare_job()
-
- newly_added_num = processor.tokenizer.add_tokens(list_of_tokens)
- if newly_added_num > 0:
- model.decoder.resize_token_embeddings(len(processor.tokenizer))
- added_tokens.extend(list_of_tokens)
-
- def __len__(self) -> int:
- return self.dataset_length
-
- def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
- """
- Load image from image_path of given dataset_path and convert into input_tensor and labels
- Convert gt data into input_ids (tokenized string)
- Returns:
- input_tensor : preprocessed image
- input_ids : tokenized gt_data
- labels : masked labels (model doesn't need to predict prompt and pad token)
- """
-
- model, processor, dataset, config, image_size, p1 = prepare_job()
-
- sample = self.dataset[idx]
-
- # inputs
- pixel_values = processor(sample["image"], random_padding=self.split == "train",
- return_tensors="pt").pixel_values
- pixel_values = pixel_values.squeeze()
-
- # targets
- target_sequence = random.choice(self.gt_token_sequences[idx]) # can be more than one, e.g., DocVQA Task 1
- input_ids = processor.tokenizer(
- target_sequence,
- add_special_tokens=False,
- max_length=self.max_length,
- padding="max_length",
- truncation=True,
- return_tensors="pt",
- )["input_ids"].squeeze(0)
-
- labels = input_ids.clone()
- labels[labels == processor.tokenizer.pad_token_id] = self.ignore_id # model doesn't need to predict pad token
- # labels[: torch.nonzero(labels == self.prompt_end_token_id).sum() + 1] = self.ignore_id # model doesn't need to predict prompt (for VQA)
- return pixel_values, labels, target_sequence
-
-
-def build_data_loaders():
- print("Building data loaders...")
-
- model, processor, dataset, config, image_size, max_length = prepare_job()
-
- # we update some settings which differ from pretraining; namely the size of the images + no rotation required
- # source: https://github.com/clovaai/donut/blob/master/config/train_cord.yaml
- processor.feature_extractor.size = image_size[::-1] # should be (width, height)
- processor.feature_extractor.do_align_long_axis = False
-
- train_dataset = DonutDataset(dataset_name, max_length=max_length,
- split="train", task_start_token="", prompt_end_token="",
- sort_json_key=False, # cord dataset is preprocessed, so no need for this
- )
-
- val_dataset = DonutDataset(dataset_name, max_length=max_length,
- split="validation", task_start_token="", prompt_end_token="",
- sort_json_key=False, # cord dataset is preprocessed, so no need for this
- )
-
- model.config.pad_token_id = processor.tokenizer.pad_token_id
- model.config.decoder_start_token_id = processor.tokenizer.convert_tokens_to_ids([''])[0]
-
- # feel free to increase the batch size if you have a lot of memory
- # I'm fine-tuning on Colab and given the large image size, batch size > 1 is not feasible
- # Set num_workers=4
- train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=4)
- val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=4)
-
- return train_dataloader, val_dataloader, max_length
-
-
-class DonutModelPLModule(pl.LightningModule):
- def __init__(self, config, processor, model):
- super().__init__()
- self.config = config
- self.processor = processor
- self.model = model
-
- self.train_dataloader, self.val_dataloader, self.max_length = build_data_loaders()
-
- def training_step(self, batch, batch_idx):
- pixel_values, labels, _ = batch
-
- outputs = self.model(pixel_values, labels=labels)
- loss = outputs.loss
- self.log_dict({"train_loss": loss}, sync_dist=True)
- return loss
-
- def validation_step(self, batch, batch_idx, dataset_idx=0):
- pixel_values, labels, answers = batch
- batch_size = pixel_values.shape[0]
- # we feed the prompt to the model
- decoder_input_ids = torch.full((batch_size, 1), self.model.config.decoder_start_token_id, device=self.device)
-
- outputs = self.model.generate(pixel_values,
- decoder_input_ids=decoder_input_ids,
- max_length=self.max_length,
- early_stopping=True,
- pad_token_id=self.processor.tokenizer.pad_token_id,
- eos_token_id=self.processor.tokenizer.eos_token_id,
- use_cache=True,
- num_beams=1,
- bad_words_ids=[[self.processor.tokenizer.unk_token_id]],
- return_dict_in_generate=True, )
-
- predictions = []
- for seq in self.processor.tokenizer.batch_decode(outputs.sequences):
- seq = seq.replace(self.processor.tokenizer.eos_token, "").replace(self.processor.tokenizer.pad_token, "")
- seq = re.sub(r"<.*?>", "", seq, count=1).strip() # remove first task start token
- predictions.append(seq)
-
- scores = list()
- for pred, answer in zip(predictions, answers):
- pred = re.sub(r"(?:(?<=>) | (?=", "", answer, count=1)
- answer = answer.replace(self.processor.tokenizer.eos_token, "")
- scores.append(edit_distance(pred, answer) / max(len(pred), len(answer)))
-
- if self.config.get("verbose", False) and len(scores) == 1:
- print(f"Prediction: {pred}")
- print(f" Answer: {answer}")
- print(f" Normed ED: {scores[0]}")
-
- return scores
-
- def validation_epoch_end(self, validation_step_outputs):
- # I set this to 1 manually
- # (previously set to len(self.config.dataset_name_or_paths))
- num_of_loaders = 1
- if num_of_loaders == 1:
- validation_step_outputs = [validation_step_outputs]
- assert len(validation_step_outputs) == num_of_loaders
- cnt = [0] * num_of_loaders
- total_metric = [0] * num_of_loaders
- val_metric = [0] * num_of_loaders
- for i, results in enumerate(validation_step_outputs):
- for scores in results:
- cnt[i] += len(scores)
- total_metric[i] += np.sum(scores)
- val_metric[i] = total_metric[i] / cnt[i]
- val_metric_name = f"val_metric_{i}th_dataset"
- self.log_dict({val_metric_name: val_metric[i]}, sync_dist=True)
- self.log_dict({"val_metric": np.sum(total_metric) / np.sum(cnt)}, sync_dist=True)
-
- def configure_optimizers(self):
- # TODO add scheduler
- optimizer = torch.optim.Adam(self.parameters(), lr=self.config.get("lr"))
-
- return optimizer
-
- def train_dataloader(self):
- return self.train_dataloader
-
- def val_dataloader(self):
- return self.val_dataloader
-
-
-class PushToHubCallback(Callback):
- def on_train_epoch_end(self, trainer, pl_module):
- print(f"Pushing model to the hub, epoch {trainer.current_epoch}")
- pl_module.model.push_to_hub(model_name,
- commit_message=f"Training in progress, epoch {trainer.current_epoch}")
-
- def on_train_end(self, trainer, pl_module):
- print(f"Pushing model to the hub after training")
- pl_module.processor.push_to_hub(model_name,
- commit_message=f"Training done")
- pl_module.model.push_to_hub(model_name,
- commit_message=f"Training done")
-
-
-def run_training_donut(max_epochs_param, val_check_interval_param, warmup_steps_param):
- worker_pid = os.getpid()
- print(f"Handling training request with worker PID: {worker_pid}")
-
- start_time = time.time()
-
- # Set epochs = 30
- # Set num_training_samples_per_epoch = training set size
- # Set val_check_interval = 0.4
- # Set warmup_steps: 425 / 8 = 54, 54 * 10 = 540, 540 * 0.15 = 81
- config_params = {"max_epochs": max_epochs_param,
- "val_check_interval": val_check_interval_param, # how many times we want to validate during an epoch
- "check_val_every_n_epoch": 1,
- "gradient_clip_val": 1.0,
- "num_training_samples_per_epoch": 425,
- "lr": 3e-5,
- "train_batch_sizes": [8],
- "val_batch_sizes": [1],
- # "seed":2022,
- "num_nodes": 1,
- "warmup_steps": warmup_steps_param, # 425 / 8 = 54, 54 * 10 = 540, 540 * 0.15 = 81
- "result_path": "./result",
- "verbose": False,
- }
-
- model, processor, dataset, config, image_size, p1 = prepare_job()
-
- model_module = DonutModelPLModule(config, processor, model)
-
- # wandb_logger = WandbLogger(project="sparrow", name="invoices-donut-v5")
-
- # trainer = pl.Trainer(
- # accelerator="gpu",
- # devices=1,
- # max_epochs=config_params.get("max_epochs"),
- # val_check_interval=config_params.get("val_check_interval"),
- # check_val_every_n_epoch=config_params.get("check_val_every_n_epoch"),
- # gradient_clip_val=config_params.get("gradient_clip_val"),
- # precision=16, # we'll use mixed precision
- # num_sanity_val_steps=0,
- # # logger=wandb_logger,
- # callbacks=[PushToHubCallback()],
- # )
-
- # trainer.fit(model_module)
-
- end_time = time.time()
- processing_time = end_time - start_time
-
- print(f"Training done, worker PID: {worker_pid}")
-
- return processing_time
diff --git a/spaces/kcagle/AutoGPT/autogpt/config/config.py b/spaces/kcagle/AutoGPT/autogpt/config/config.py
deleted file mode 100644
index 4b53df10e8d2832be7ffb321d9036aec5a47a79d..0000000000000000000000000000000000000000
--- a/spaces/kcagle/AutoGPT/autogpt/config/config.py
+++ /dev/null
@@ -1,251 +0,0 @@
-"""Configuration class to store the state of bools for different scripts access."""
-import os
-
-import openai
-import yaml
-from colorama import Fore
-from dotenv import load_dotenv
-
-from autogpt.config.singleton import Singleton
-
-load_dotenv(verbose=True)
-
-
-class Config(metaclass=Singleton):
- """
- Configuration class to store the state of bools for different scripts access.
- """
-
- def __init__(self) -> None:
- """Initialize the Config class"""
- self.debug_mode = False
- self.continuous_mode = False
- self.continuous_limit = 0
- self.speak_mode = False
- self.skip_reprompt = False
- self.allow_downloads = False
- self.skip_news = False
-
- self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
- self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
- self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
- self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
- self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
- self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
-
- self.openai_api_key = os.getenv("OPENAI_API_KEY")
- self.temperature = float(os.getenv("TEMPERATURE", "1"))
- self.use_azure = os.getenv("USE_AZURE") == "True"
- self.execute_local_commands = (
- os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
- )
- self.restrict_to_workspace = (
- os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
- )
-
- if self.use_azure:
- self.load_azure_config()
- openai.api_type = self.openai_api_type
- openai.api_base = self.openai_api_base
- openai.api_version = self.openai_api_version
-
- self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
- self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
- self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
-
- self.use_mac_os_tts = False
- self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
-
- self.use_brian_tts = False
- self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
-
- self.github_api_key = os.getenv("GITHUB_API_KEY")
- self.github_username = os.getenv("GITHUB_USERNAME")
-
- self.google_api_key = os.getenv("GOOGLE_API_KEY")
- self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
-
- self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
- self.pinecone_region = os.getenv("PINECONE_ENV")
-
- self.weaviate_host = os.getenv("WEAVIATE_HOST")
- self.weaviate_port = os.getenv("WEAVIATE_PORT")
- self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
- self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
- self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
- self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
- self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
- self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
- self.use_weaviate_embedded = (
- os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
- )
-
- # milvus configuration, e.g., localhost:19530.
- self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
- self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
-
- self.image_provider = os.getenv("IMAGE_PROVIDER")
- self.image_size = int(os.getenv("IMAGE_SIZE", 256))
- self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
- self.huggingface_image_model = os.getenv(
- "HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
- )
- self.huggingface_audio_to_text_model = os.getenv(
- "HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
- )
- self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
- self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
-
- # Selenium browser settings
- self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
- self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
-
- # User agent header to use when making HTTP requests
- # Some websites might just completely deny request with an error code if
- # no user agent was found.
- self.user_agent = os.getenv(
- "USER_AGENT",
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
- " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
- )
-
- self.redis_host = os.getenv("REDIS_HOST", "localhost")
- self.redis_port = os.getenv("REDIS_PORT", "6379")
- self.redis_password = os.getenv("REDIS_PASSWORD", "")
- self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
- self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
- # Note that indexes must be created on db 0 in redis, this is not configurable.
-
- self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
- # Initialize the OpenAI API client
- openai.api_key = self.openai_api_key
-
- def get_azure_deployment_id_for_model(self, model: str) -> str:
- """
- Returns the relevant deployment id for the model specified.
-
- Parameters:
- model(str): The model to map to the deployment id.
-
- Returns:
- The matching deployment id if found, otherwise an empty string.
- """
- if model == self.fast_llm_model:
- return self.azure_model_to_deployment_id_map[
- "fast_llm_model_deployment_id"
- ] # type: ignore
- elif model == self.smart_llm_model:
- return self.azure_model_to_deployment_id_map[
- "smart_llm_model_deployment_id"
- ] # type: ignore
- elif model == "text-embedding-ada-002":
- return self.azure_model_to_deployment_id_map[
- "embedding_model_deployment_id"
- ] # type: ignore
- else:
- return ""
-
- AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
-
- def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
- """
- Loads the configuration parameters for Azure hosting from the specified file
- path as a yaml file.
-
- Parameters:
- config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
-
- Returns:
- None
- """
- try:
- with open(config_file) as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader)
- except FileNotFoundError:
- config_params = {}
- self.openai_api_type = config_params.get("azure_api_type") or "azure"
- self.openai_api_base = config_params.get("azure_api_base") or ""
- self.openai_api_version = (
- config_params.get("azure_api_version") or "2023-03-15-preview"
- )
- self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
-
- def set_continuous_mode(self, value: bool) -> None:
- """Set the continuous mode value."""
- self.continuous_mode = value
-
- def set_continuous_limit(self, value: int) -> None:
- """Set the continuous limit value."""
- self.continuous_limit = value
-
- def set_speak_mode(self, value: bool) -> None:
- """Set the speak mode value."""
- self.speak_mode = value
-
- def set_fast_llm_model(self, value: str) -> None:
- """Set the fast LLM model value."""
- self.fast_llm_model = value
-
- def set_smart_llm_model(self, value: str) -> None:
- """Set the smart LLM model value."""
- self.smart_llm_model = value
-
- def set_fast_token_limit(self, value: int) -> None:
- """Set the fast token limit value."""
- self.fast_token_limit = value
-
- def set_smart_token_limit(self, value: int) -> None:
- """Set the smart token limit value."""
- self.smart_token_limit = value
-
- def set_browse_chunk_max_length(self, value: int) -> None:
- """Set the browse_website command chunk max length value."""
- self.browse_chunk_max_length = value
-
- def set_openai_api_key(self, value: str) -> None:
- """Set the OpenAI API key value."""
- self.openai_api_key = value
-
- def set_elevenlabs_api_key(self, value: str) -> None:
- """Set the ElevenLabs API key value."""
- self.elevenlabs_api_key = value
-
- def set_elevenlabs_voice_1_id(self, value: str) -> None:
- """Set the ElevenLabs Voice 1 ID value."""
- self.elevenlabs_voice_1_id = value
-
- def set_elevenlabs_voice_2_id(self, value: str) -> None:
- """Set the ElevenLabs Voice 2 ID value."""
- self.elevenlabs_voice_2_id = value
-
- def set_google_api_key(self, value: str) -> None:
- """Set the Google API key value."""
- self.google_api_key = value
-
- def set_custom_search_engine_id(self, value: str) -> None:
- """Set the custom search engine id value."""
- self.custom_search_engine_id = value
-
- def set_pinecone_api_key(self, value: str) -> None:
- """Set the Pinecone API key value."""
- self.pinecone_api_key = value
-
- def set_pinecone_region(self, value: str) -> None:
- """Set the Pinecone region value."""
- self.pinecone_region = value
-
- def set_debug_mode(self, value: bool) -> None:
- """Set the debug mode value."""
- self.debug_mode = value
-
-
-def check_openai_api_key() -> None:
- """Check if the OpenAI API key is set in config.py or as an environment variable."""
- cfg = Config()
- if not cfg.openai_api_key:
- print(
- Fore.RED
- + "Please set your OpenAI API key in .env or as an environment variable."
- )
- print("You can get your key from https://platform.openai.com/account/api-keys")
- exit(1)
diff --git a/spaces/keivalya/alternovation/app.py b/spaces/keivalya/alternovation/app.py
deleted file mode 100644
index 85d8e7bed5f8c324c619ae5746101e263983994a..0000000000000000000000000000000000000000
--- a/spaces/keivalya/alternovation/app.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import gradio as gr
-import requests
-import io
-from PIL import Image
-from catboost import CatBoostRegressor
-from web import HTMLCode, CSSCode, footCode
-import numpy as np
-import joblib
-
-API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
-headers = {"Authorization": "Bearer "}
-defaultprompt = "round table with blue color and smooth edges, places in the living room"
-
-main_model = CatBoostRegressor()
-main_model.load_model("model.cbm")
-
-pred_vol = joblib.load('model_predvol.pkl')
-pred_wei = joblib.load('model_predwei.pkl')
-
-def query(payload):
- response = requests.post(API_URL, headers=headers, json=payload)
- return response.content
-
-def stablefurniture(originalprompt, ftype, purpose, texture, length, width, height):
- prompt = f"full {ftype} places in the living room {originalprompt} {texture} texture for {purpose}, having {int(length)} millimeter length, {int(width)} millimeter width, and {int(height)} millimeter height, hd quality, full furniture, hyperrealistic, highly detailed, sharp focus, cinematic lighting, for commercial website"
- print(prompt)
- image_bytes = query(
- {
- "inputs": prompt,
- }
- )
- image = Image.open(io.BytesIO(image_bytes)).convert("RGBA")
- # print(f"length: {length},width: {width},height: {height}")
- vol_pred = pred_vol.predict([[length, width, height]])
- wei_pred = pred_wei.predict([[length, width, height, vol_pred[0]]])
- prediction = main_model.predict([length, width, height, vol_pred[0], wei_pred[0]])
- rubles = "₽ " + str(np.round(prediction))
- return image, rubles
-
-with gr.Blocks(theme=gr.themes.Soft(), css=CSSCode) as demo:
- gr.HTML(HTMLCode)
- with gr.Row():
- with gr.Column(scale=1, min_width=600):
- with gr.Row():
- originalprompt = gr.Textbox(label="Prompt",default=defaultprompt)
- with gr.Row():
- ftype = gr.Dropdown(
- [
- "Table",
- "Rack",
- "Closet",
- "Cabinet",
- "Roll-out stand",
- "Pedestal",
- "Screen",
- "Console",
- "Reception Desk",
- "Mezzanine",
- "Penalty",
- "Classical",
- ],
- label="Type",
- info="Which type of furniture are you looking for?",
- )
- purpose = gr.Dropdown(
- [
- "computer",
- "for clothes",
- "for documents",
- "for negotiations",
- "for office",
- "for office equipment",
- "for receptionists",
- "for magazine",
- "roll-out stand",
- "writing",
- ],
- label="Purpose",
- info="How may your furnityre help you?",
- )
- texture = gr.Dropdown(
- [
- "Beech",
- "Oak",
- "Kraft white",
- "Sonoma oak Light",
- "Craft Golden",
- "Wenge/Oak",
- "Nut",
- "Wine",
- "Grey",
- "Oak Cronberg",
- "Cherry",
- ],
- label="Texture",
- info="How would you like it to be?",
- )
- with gr.Row():
- length = gr.Number(label="Length")
- width = gr.Number(label="Width")
- height = gr.Number(label="Height")
- btn = gr.Button("Dream")
- prediction = gr.Textbox(label="Estimated Cost")
- with gr.Column(scale=2, min_width=600):
- furniture = gr.Image().style(height=580)
- btn.click(
- stablefurniture,
- inputs=[originalprompt, ftype, texture, purpose, length, width, height],
- outputs=[furniture, prediction],
- )
- with gr.Row():
- gr.HTML(footCode)
-
-demo.launch()
-
-
-# gr.Interface(fn=stablefurniture, inputs=[
-# gr.Textbox(),
-# gr.Dropdown(
-# ["Table","Rack","Closet","Cabinet","Roll-out stand","Pedestal","Screen","Console","Reception Desk","Mezzanine","Penalty","Classical"], label="Type", info="Which type of furniture are you looking for?"
-# ),
-# gr.Dropdown(
-# ["computer","for clothes","for documents","for negotiations","for office","for office equipment","for receptionists","for magazine","roll-out stand","writing"], label="Purpose", info="Let us know why are you looking for this furniture.|"
-# ),
-# "number",
-# "number",
-# "number"],
-# outputs=["image","number"],
-# theme=gr.themes.Soft()).launch()
\ No newline at end of file
diff --git a/spaces/kepajide/keyiwei/text/symbols.py b/spaces/kepajide/keyiwei/text/symbols.py
deleted file mode 100644
index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000
--- a/spaces/kepajide/keyiwei/text/symbols.py
+++ /dev/null
@@ -1,39 +0,0 @@
-'''
-Defines the set of symbols used in text input to the model.
-'''
-
-'''# japanese_cleaners
-_pad = '_'
-_punctuation = ',.!?-'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
-'''
-
-'''# japanese_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
-'''
-
-'''# korean_cleaners
-_pad = '_'
-_punctuation = ',.!?…~'
-_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
-'''
-
-'''# chinese_cleaners
-_pad = '_'
-_punctuation = ',。!?—…'
-_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
-'''
-
-# zh_ja_mixture_cleaners
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
-
-
-# Export all symbols:
-symbols = [_pad] + list(_punctuation) + list(_letters)
-
-# Special symbol ids
-SPACE_ID = symbols.index(" ")
\ No newline at end of file
diff --git a/spaces/khizon/emotion-classifier-demo/test.py b/spaces/khizon/emotion-classifier-demo/test.py
deleted file mode 100644
index 22f31211f32051a8b0bba931eed25a1c246853ed..0000000000000000000000000000000000000000
--- a/spaces/khizon/emotion-classifier-demo/test.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from main import *
-
-from sklearn.metrics import classification_report
-
-def speech_file_to_array_fn(batch):
- speech_array, sampling_rate = torchaudio.load(batch["path"])
- speech_array = speech_array
- resampler = torchaudio.transforms.Resample(sampling_rate, 16_000)
- speech_array = resampler(speech_array).squeeze().numpy()
-
- batch["speech"] = speech_array
- return batch
-
-
-def predict(batch):
- features = processor(batch["speech"], sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", padding=True)
-
- input_values = features.input_values.to(device)
- attention_mask = features.attention_mask.to(device)
-
- with torch.no_grad():
- logits = model(input_values, attention_mask=attention_mask).logits
-
- pred_ids = torch.argmax(logits, dim=-1).detach().cpu().numpy()
- batch["predicted"] = pred_ids
- return batch
-
-if __name__ == '__main__':
-
- data_files = {
- "test" : 'data/test.csv'
- }
- test_dataset = load_dataset('csv', data_files = data_files, delimiter = "\t")["test"]
- print(test_dataset)
-
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- print(f"Device: {device}")
-
- # model_name_or_path = "m3hrdadfi/wav2vec2-xlsr-greek-speech-emotion-recognition"
- model_name_or_path2 = "lighteternal/wav2vec2-large-xlsr-53-greek"
- # model_name_or_path = "data/wav2vec2-xlsr-greek-speech-emotion-recognition/checkpoint-180"
- model_name_or_path = 'artifacts/aesdd_classifier:v0'
- config = AutoConfig.from_pretrained(model_name_or_path)
- processor = Wav2Vec2Processor.from_pretrained(model_name_or_path2)
- model = Wav2Vec2ForSpeechClassification.from_pretrained(model_name_or_path).to(device)
-
- test_dataset = test_dataset.map(speech_file_to_array_fn)
-
- result = test_dataset.map(predict, batched=True, batch_size=8)
-
- label_names = [config.id2label[i] for i in range(config.num_labels)]
-
- print(f'Labels: {label_names}')
-
- y_true = [config.label2id[name] for name in result["emotion"]]
- y_pred = result["predicted"]
-
- print(y_true[:5])
- print(y_pred[:5])
-
- print(classification_report(y_true, y_pred, target_names=label_names))
\ No newline at end of file
diff --git a/spaces/kirch/Text2Video-Zero/style.css b/spaces/kirch/Text2Video-Zero/style.css
deleted file mode 100644
index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/style.css
+++ /dev/null
@@ -1,3 +0,0 @@
-h1 {
- text-align: center;
-}
diff --git a/spaces/kiroiineko/rvc-models-tragamundos/infer_pack/attentions.py b/spaces/kiroiineko/rvc-models-tragamundos/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/kiroiineko/rvc-models-tragamundos/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/kleinay/qasem-demo/app.py b/spaces/kleinay/qasem-demo/app.py
deleted file mode 100644
index ca05b9a87b6f035ec8760fb798442634fb1b6185..0000000000000000000000000000000000000000
--- a/spaces/kleinay/qasem-demo/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import gradio as gr
-from typing import List
-
-from qasem.end_to_end_pipeline import QASemEndToEndPipeline
-pipeline = QASemEndToEndPipeline(spacy_model="en_core_web_lg")
-
-
-description = f"""This is a demo of the QASem Parsing pipeline. It wraps models of three QA-based semantic tasks, composing a comprehensive semi-structured representation of sentence meaning - covering verbal and nominal semantic role labeling together with discourse relations."""
-title="QASem Parsing Demo"
-
-all_layers = ["qasrl", "qanom", "qadiscourse"]
-examples = [["Both were shot in the confrontation with police and have been recovering in hospital since the attack .", all_layers, False, 0.75],
- ["the construction of the officer 's building was delayed by the lockdown and is expected to continue for at least 10 more months.", all_layers, False, 0.75],
- ["While President Obama expressed condolences regarding the death of Margaret Thatcher upon her death earlier this year , he did not issue an executive order that flags be lowered in her honor .", all_layers, False, 0.75],
- ["We made a very clear commitment : if there is any proposal in the next parliament for a transfer of powers to Brussels ( the EU ) we will have an in/out referendum .", all_layers, False, 0.75],
- ["The doctor asked about the progress in Luke 's treatment .", all_layers, False, 0.75],
- ["The Veterinary student was interested in Luke 's treatment of sea animals .", all_layers, False, 0.7],
- ["Some reviewers agreed that the criticism raised by the AC is mostly justified .", all_layers, False, 0.6]]
-
-
-input_sent_box_label = "Insert sentence here, or select from the examples below"
-links = """
"""
-
-
-def call(sentence, layers, show_openie: bool, detection_threshold: float):
- outputs = pipeline([sentence], nominalization_detection_threshold=detection_threshold, output_openie=show_openie)
- if show_openie:
- openie_outputs = outputs["openie"][0] # list of OpenIE tuples
- outputs = outputs["qasem"]
- outputs = outputs[0] # only one sentence in input batch
- def pretty_qadisc_qas(qa_infos) -> List[str]:
- if not qa_infos: return []
- return ["- " + f"{qa['question']} --- {qa['answer']}".lstrip()
- for qa in qa_infos if qa is not None]
- def pretty_qasrl_qas(pred_info) -> List[str]:
- if not pred_info or not pred_info['QAs']: return []
- return ["- " + f"{qa['question']} --- {';'.join(qa['answers'])}".lstrip()
- for qa in pred_info['QAs'] if qa is not None]
- # filter outputs by requested `layers`
- outputs = {layer: qas if layer in layers else []
- for layer, qas in outputs.items()}
- # Prettify outputs
- qasrl_qas = [qa for pred_info in outputs['qasrl'] for qa in pretty_qasrl_qas(pred_info)]
- qanom_qas = [qa for pred_info in outputs['qanom'] for qa in pretty_qasrl_qas(pred_info)]
- qadisc_qas= pretty_qadisc_qas(outputs['qadiscourse'])
- all_qas = []
- if "qasrl" in layers: all_qas += ['\nQASRL:'] + qasrl_qas
- if "qanom" in layers: all_qas += ['\nQANom:'] + qanom_qas
- if "qadiscourse" in layers: all_qas += ['\nQADiscourse:'] + qadisc_qas
-
- if not qasrl_qas + qanom_qas + qadisc_qas:
- pretty_qa_output = "NO QA GENERATED"
- else:
- pretty_qa_output = "\n".join(all_qas)
-
- # also present highlighted predicates
- qasrl_predicates = [pred_info['predicate_idx'] for pred_info in outputs['qasrl']]
- qanom_predicates = [pred_info['predicate_idx'] for pred_info in outputs['qanom']]
- def color(idx):
- if idx in qasrl_predicates : return "aquamarine"
- if idx in qanom_predicates : return "aqua"
- def word_span(word, idx):
- return f'{word}'
- html = '' + ' '.join(word_span(word, idx) for idx, word in enumerate(sentence.split(" "))) + ''
- # show openie_outputs
- if show_openie:
- repr_oie = lambda tup: f"({','.join(e for e in tup)})"
- openie_html = 'Open Information Extraction: ' + ' '.join([repr_oie(tup) for tup in openie_outputs]) + ''
- else:
- openie_html = ''
-
- return html, pretty_qa_output, openie_html, outputs
-
-iface = gr.Interface(fn=call,
- inputs=[gr.components.Textbox(placeholder=input_sent_box_label, label="Sentence", lines=4),
- gr.components.CheckboxGroup(all_layers, value=all_layers, label="Annotation Layers"),
- gr.components.Checkbox(value=False, label="Show OpenIE format (converted from verbal QASRL only)"),
- gr.components.Slider(minimum=0., maximum=1., step=0.01, value=0.75, label="Nominalization Detection Threshold")],
- outputs=[gr.components.HTML(label="Detected Predicates"),
- gr.components.Textbox(label="Generated QAs"),
- gr.components.HTML(label="OpenIE Output"),
- gr.components.JSON(label="Raw QASemEndToEndPipeline Output")],
- title=title,
- description=description,
- article=links,
- examples=examples)
-iface.launch()
\ No newline at end of file
diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/criterions/__init__.py b/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/criterions/__init__.py
deleted file mode 100644
index 579abd2ace1b14b80f5e53e5c96583e4d5b14c52..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/criterions/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import importlib
-import os
-
-
-# ASG loss requires flashlight bindings
-files_to_skip = set()
-try:
- import flashlight.lib.sequence.criterion
-except ImportError:
- files_to_skip.add("ASG_loss.py")
-
-for file in sorted(os.listdir(os.path.dirname(__file__))):
- if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
- criterion_name = file[: file.find(".py")]
- importlib.import_module(
- "examples.speech_recognition.criterions." + criterion_name
- )
diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/segmentation.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/segmentation.py
deleted file mode 100644
index 3d4a9f94eaae84722db584277dbbf9bc41ede357..0000000000000000000000000000000000000000
--- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/segmentation.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .constants import weights as constant_weights
-
-
-class CrossEntropy2d(nn.Module):
- def __init__(self, reduction="mean", ignore_label=255, weights=None, *args, **kwargs):
- """
- weight (Tensor, optional): a manual rescaling weight given to each class.
- If given, has to be a Tensor of size "nclasses"
- """
- super(CrossEntropy2d, self).__init__()
- self.reduction = reduction
- self.ignore_label = ignore_label
- self.weights = weights
- if self.weights is not None:
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- self.weights = torch.FloatTensor(constant_weights[weights]).to(device)
-
- def forward(self, predict, target):
- """
- Args:
- predict:(n, c, h, w)
- target:(n, 1, h, w)
- """
- target = target.long()
- assert not target.requires_grad
- assert predict.dim() == 4, "{0}".format(predict.size())
- assert target.dim() == 4, "{0}".format(target.size())
- assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
- assert target.size(1) == 1, "{0}".format(target.size(1))
- assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2))
- assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3))
- target = target.squeeze(1)
- n, c, h, w = predict.size()
- target_mask = (target >= 0) * (target != self.ignore_label)
- target = target[target_mask]
- predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
- predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
- loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction)
- return loss
diff --git a/spaces/kurianbenoy/Pallakku/vegam-whisper-medium-ml-fp16/README.md b/spaces/kurianbenoy/Pallakku/vegam-whisper-medium-ml-fp16/README.md
deleted file mode 100644
index a875d9de2242786cbf359faf117f43cca228113d..0000000000000000000000000000000000000000
--- a/spaces/kurianbenoy/Pallakku/vegam-whisper-medium-ml-fp16/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
----
-language:
-- ml
-tags:
-- audio
-- automatic-speech-recognition
-license: mit
-datasets:
-- google/fleurs
-- thennal/IMaSC
-- mozilla-foundation/common_voice_11_0
-library_name: ctranslate2
----
-
-# vegam-whipser-medium-ml
-
-This is a conversion of [thennal/whisper-medium-ml](https://huggingface.co/thennal/whisper-medium-ml) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format.
-
-This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper).
-
-## Installation
-
-- Install [faster-whisper](https://github.com/guillaumekln/faster-whisper). More details about installation can be [found here in faster-whisper](https://github.com/guillaumekln/faster-whisper/tree/master#installation).
-
-```
-pip install faster-whisper
-```
-
-- Install [git-lfs](https://git-lfs.com/) for using this project. Note that git-lfs is just for downloading model from hugging-face.
-
-```
-apt-get install git-lfs
-```
-
-- Download the model weights
-
-```
-git lfs install
-git clone https://huggingface.co/kurianbenoy/vegam-whisper-medium-ml-fp16
-```
-
-## Usage
-
-```
-from faster_whisper import WhisperModel
-
-model_path = "vegam-whisper-medium-ml-fp16"
-
-# Run on GPU with FP16
-model = WhisperModel(model_path, device="cuda", compute_type="float16")
-
-segments, info = model.transcribe("audio.mp3", beam_size=5)
-
-print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
-
-for segment in segments:
- print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
-```
-
-## Example
-
-```
-from faster_whisper import WhisperModel
-
-model_path = "vegam-whisper-medium-ml-fp16"
-
-model = WhisperModel(model_path, device="cuda", compute_type="float16")
-
-
-segments, info = model.transcribe("00b38e80-80b8-4f70-babf-566e848879fc.webm", beam_size=5)
-
-print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
-
-for segment in segments:
- print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
-```
-
-> Detected language 'ta' with probability 0.353516
-
-> [0.00s -> 4.74s] പാലം കടുക്കുവോളം നാരായണ പാലം കടന്നാലൊ കൂരായണ
-
-Note: The audio file [00b38e80-80b8-4f70-babf-566e848879fc.webm](https://huggingface.co/kurianbenoy/vegam-whisper-medium-ml/blob/main/00b38e80-80b8-4f70-babf-566e848879fc.webm) is from [Malayalam Speech Corpus](https://blog.smc.org.in/malayalam-speech-corpus/) and is stored along with model weights.
-## Conversion Details
-
-This conversion was possible with wonderful [CTranslate2 library](https://github.com/OpenNMT/CTranslate2) leveraging the [Transformers converter for OpenAI Whisper](https://opennmt.net/CTranslate2/guides/transformers.html#whisper).The original model was converted with the following command:
-
-```
-ct2-transformers-converter --model thennal/whisper-medium-ml --output_dir vegam-whisper-medium-ml-fp16 \
---quantization float16
-```
-
-## Many Thanks to
-
-- Creators of CTranslate2 and faster-whisper
-- Thennal D K
-- Santhosh Thottingal
-
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/security/open_id_connect_url.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/security/open_id_connect_url.py
deleted file mode 100644
index 4e65f1f6c486fa579554c61b9d137c7fda1f1b17..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/security/open_id_connect_url.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from typing import Optional
-
-from fastapi.openapi.models import OpenIdConnect as OpenIdConnectModel
-from fastapi.security.base import SecurityBase
-from starlette.exceptions import HTTPException
-from starlette.requests import Request
-from starlette.status import HTTP_403_FORBIDDEN
-
-
-class OpenIdConnect(SecurityBase):
- def __init__(
- self,
- *,
- openIdConnectUrl: str,
- scheme_name: Optional[str] = None,
- description: Optional[str] = None,
- auto_error: bool = True,
- ):
- self.model = OpenIdConnectModel(
- openIdConnectUrl=openIdConnectUrl, description=description
- )
- self.scheme_name = scheme_name or self.__class__.__name__
- self.auto_error = auto_error
-
- async def __call__(self, request: Request) -> Optional[str]:
- authorization = request.headers.get("Authorization")
- if not authorization:
- if self.auto_error:
- raise HTTPException(
- status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
- )
- else:
- return None
- return authorization
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Button-661a0701.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Button-661a0701.js
deleted file mode 100644
index 3935ccb926db91896614bdda82388f7024685e85..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Button-661a0701.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as q,i as E,s as I,p as k,t as z,U as T,V as K,x as U,G as L,W as C,M as m,D as y,g as O,X as S,Y,Z as D,h as V,q as G,C as c,J as W,y as X}from"./index-7c0e54a6.js";const Z=(l,e)=>J[e](l[e]);function j(l,e){const t=e.reduce((i,n)=>(l[n]===void 0||!J[n]?i[n]=" ":i[n]=` ${Z(l,n)} `,i),{});return t.styles=` ${Object.values(t).join(" ").replace(/\s+/g," ").trim()} `,t}const J={container(l){return l?"":"padding: 0; margin: 0; border-width: 0; box-shadow: none; overflow: visible; background: transparent;"},label_container(l){return l?"":"border-width: 0; box-shadow: none; overflow: visible; background: transparent;"},grid_cols(l){let e=["","sm-","md-","lg-","xl-","2xl-"],t=Array.isArray(l)?l:[l];return[0,0,0,0,0,0].map((i,n)=>`--${e[n]}grid-cols: var(--grid-${t?.[n]||t?.[t?.length-1]});`).join(" ")},grid_rows(l){let e=["","sm-","md-","lg-","xl-","2xl-"],t=Array.isArray(l)?l:[l];return[0,0,0,0,0,0].map((i,n)=>`--${e[n]}grid-rows: var(--grid-${t?.[n]||t?.[t?.length-1]});`).join(" ")},height(l){return l==="auto"?"height: auto;":""},full_width(l){return l?"width: var(--size-full); flex-grow: 1;":"flex-grow: 0; width: fit-content;"},equal_height(l){return l?"align-items: stretch;":"align-items: flex-start;"},visible(l){return l?"":"display:hidden;"},item_container(l){return l?"":"border-width:0;"},object_fit(l){return`--object-fit: ${l};`}};function F(l){let e,t,i,n;const o=l[16].default,f=K(o,l,l[15],null);let _=[{"data-testid":l[5]},{id:l[0]},{class:t="block "+l[1].join(" ")+" svelte-mppz8v"},{style:i=l[9]+" "+(l[8]||null)}],a={};for(let s=0;s<_.length;s+=1)a=U(a,_[s]);return{c(){e=L(l[10]),f&&f.c(),C(l[10])(e,a),m(e,"hidden",l[6]===!1),m(e,"padded",l[4]),m(e,"border_focus",l[3]==="focus"),y(e,"border-style",l[2]),y(e,"overflow",l[7]?"visible":"hidden")},m(s,u){O(s,e,u),f&&f.m(e,null),n=!0},p(s,u){f&&f.p&&(!n||u&32768)&&S(f,o,s,s[15],n?D(o,s[15],u,null):Y(s[15]),null),C(s[10])(e,a=V(_,[(!n||u&32)&&{"data-testid":s[5]},(!n||u&1)&&{id:s[0]},(!n||u&2&&t!==(t="block "+s[1].join(" ")+" svelte-mppz8v"))&&{class:t},(!n||u&768&&i!==(i=s[9]+" "+(s[8]||null)))&&{style:i}])),m(e,"hidden",s[6]===!1),m(e,"padded",s[4]),m(e,"border_focus",s[3]==="focus"),u&4&&y(e,"border-style",s[2]),u&128&&y(e,"overflow",s[7]?"visible":"hidden")},i(s){n||(k(f,s),n=!0)},o(s){z(f,s),n=!1},d(s){s&&G(e),f&&f.d(s)}}}function H(l){let e,t=l[10]&&F(l);return{c(){t&&t.c()},m(i,n){t&&t.m(i,n),e=!0},p(i,[n]){i[10]&&t.p(i,n)},i(i){e||(k(t),e=!0)},o(i){z(t),e=!1},d(i){t&&t.d(i)}}}function N(l,e,t){let i,n,{$$slots:o={},$$scope:f}=e,{style:_={}}=e,{elem_id:a=""}=e,{elem_classes:s=[]}=e,{variant:u="solid"}=e,{border_mode:b="base"}=e,{padding:g=!0}=e,{type:v="normal"}=e,{test_id:r=void 0}=e,{disable:h=!1}=e,{explicit_call:w=!1}=e,{visible:B=!0}=e,{allow_overflow:A=!0}=e,M=v==="fieldset"?"fieldset":"div";return T("BLOCK_KEY"),l.$$set=d=>{"style"in d&&t(11,_=d.style),"elem_id"in d&&t(0,a=d.elem_id),"elem_classes"in d&&t(1,s=d.elem_classes),"variant"in d&&t(2,u=d.variant),"border_mode"in d&&t(3,b=d.border_mode),"padding"in d&&t(4,g=d.padding),"type"in d&&t(12,v=d.type),"test_id"in d&&t(5,r=d.test_id),"disable"in d&&t(13,h=d.disable),"explicit_call"in d&&t(14,w=d.explicit_call),"visible"in d&&t(6,B=d.visible),"allow_overflow"in d&&t(7,A=d.allow_overflow),"$$scope"in d&&t(15,f=d.$$scope)},l.$$.update=()=>{l.$$.dirty&26624&&t(9,{styles:i}=w?j(_,[]):h?j({container:!1},["container"]):{styles:""},i),l.$$.dirty&2048&&t(8,n=(typeof _.height=="number"?`height: ${_.height}px; `:"")+(typeof _.width=="number"?`width: ${_.width}px;`:""))},[a,s,u,b,g,r,B,A,n,i,M,_,v,h,w,f,o]}class x extends q{constructor(e){super(),E(this,e,N,H,I,{style:11,elem_id:0,elem_classes:1,variant:2,border_mode:3,padding:4,type:12,test_id:5,disable:13,explicit_call:14,visible:6,allow_overflow:7})}}function P(l){let e,t,i,n,o;const f=l[9].default,_=K(f,l,l[8],null);return{c(){e=L("button"),_&&_.c(),c(e,"class",t=l[4]+" "+l[3]+" "+l[1].join(" ")+" svelte-1ipelgc"),c(e,"style",l[6]),c(e,"id",l[0]),e.disabled=l[5],m(e,"hide",!l[2])},m(a,s){O(a,e,s),_&&_.m(e,null),i=!0,n||(o=W(e,"click",l[10]),n=!0)},p(a,[s]){_&&_.p&&(!i||s&256)&&S(_,f,a,a[8],i?D(f,a[8],s,null):Y(a[8]),null),(!i||s&26&&t!==(t=a[4]+" "+a[3]+" "+a[1].join(" ")+" svelte-1ipelgc"))&&c(e,"class",t),(!i||s&64)&&c(e,"style",a[6]),(!i||s&1)&&c(e,"id",a[0]),(!i||s&32)&&(e.disabled=a[5]),(!i||s&30)&&m(e,"hide",!a[2])},i(a){i||(k(_,a),i=!0)},o(a){z(_,a),i=!1},d(a){a&&G(e),_&&_.d(a),n=!1,o()}}}function Q(l,e,t){let i,{$$slots:n={},$$scope:o}=e,{style:f={}}=e,{elem_id:_=""}=e,{elem_classes:a=[]}=e,{visible:s=!0}=e,{variant:u="secondary"}=e,{size:b=f.size||"lg"}=e,{disabled:g=!1}=e;function v(r){X.call(this,l,r)}return l.$$set=r=>{"style"in r&&t(7,f=r.style),"elem_id"in r&&t(0,_=r.elem_id),"elem_classes"in r&&t(1,a=r.elem_classes),"visible"in r&&t(2,s=r.visible),"variant"in r&&t(3,u=r.variant),"size"in r&&t(4,b=r.size),"disabled"in r&&t(5,g=r.disabled),"$$scope"in r&&t(8,o=r.$$scope)},l.$$.update=()=>{l.$$.dirty&128&&t(6,{styles:i}=j(f,["full_width"]),i)},[_,a,s,u,b,g,i,f,o,n,v]}class p extends q{constructor(e){super(),E(this,e,Q,P,I,{style:7,elem_id:0,elem_classes:1,visible:2,variant:3,size:4,disabled:5})}}export{x as B,p as a,j as g};
-//# sourceMappingURL=Button-661a0701.js.map
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-7c0e54a6.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-7c0e54a6.js
deleted file mode 100644
index 202759ba0ebbe6c26a62d07f6bc9085f7cf15891..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-7c0e54a6.js
+++ /dev/null
@@ -1,18 +0,0 @@
-const VERSION_RE = new RegExp("3.33.1/", "g");function import_fix(mod, base) {const url = new URL(mod, base); return import(`https://gradio.s3-us-west-2.amazonaws.com/3.33.1/${url.pathname?.startsWith('/') ? url.pathname.substring(1).replace(VERSION_RE, "") : url.pathname.replace(VERSION_RE, "")}`);}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const o of document.querySelectorAll('link[rel="modulepreload"]'))n(o);new MutationObserver(o=>{for(const i of o)if(i.type==="childList")for(const s of i.addedNodes)s.tagName==="LINK"&&s.rel==="modulepreload"&&n(s)}).observe(document,{childList:!0,subtree:!0});function r(o){const i={};return o.integrity&&(i.integrity=o.integrity),o.referrerPolicy&&(i.referrerPolicy=o.referrerPolicy),o.crossOrigin==="use-credentials"?i.credentials="include":o.crossOrigin==="anonymous"?i.credentials="omit":i.credentials="same-origin",i}function n(o){if(o.ep)return;o.ep=!0;const i=r(o);fetch(o.href,i)}})();var Je={},Ne={},Te={},gr={get exports(){return Te},set exports(e){Te=e}},F=String,Wt=function(){return{isColorSupported:!1,reset:F,bold:F,dim:F,italic:F,underline:F,inverse:F,hidden:F,strikethrough:F,black:F,red:F,green:F,yellow:F,blue:F,magenta:F,cyan:F,white:F,gray:F,bgBlack:F,bgRed:F,bgGreen:F,bgYellow:F,bgBlue:F,bgMagenta:F,bgCyan:F,bgWhite:F}};gr.exports=Wt();Te.createColors=Wt;Object.defineProperty(Ne,"__esModule",{value:!0});Ne.dim=_r;Ne.default=void 0;var ce=mr(Te);function mr(e){return e&&e.__esModule?e:{default:e}}let mt=new Set;function Ke(e,t,r){typeof process<"u"&&{}.JEST_WORKER_ID||r&&mt.has(r)||(r&&mt.add(r),console.warn(""),t.forEach(n=>console.warn(e,"-",n)))}function _r(e){return ce.default.dim(e)}var hr={info(e,t){Ke(ce.default.bold(ce.default.cyan("info")),...Array.isArray(e)?[e]:[t,e])},warn(e,t){Ke(ce.default.bold(ce.default.yellow("warn")),...Array.isArray(e)?[e]:[t,e])},risk(e,t){Ke(ce.default.bold(ce.default.magenta("risk")),...Array.isArray(e)?[e]:[t,e])}};Ne.default=hr;Object.defineProperty(Je,"__esModule",{value:!0});Je.default=void 0;var br=wr(Ne);function wr(e){return e&&e.__esModule?e:{default:e}}function ke({version:e,from:t,to:r}){br.default.warn(`${t}-color-renamed`,[`As of Tailwind CSS ${e}, \`${t}\` has been renamed to \`${r}\`.`,"Update your configuration file to silence this warning."])}var yr={inherit:"inherit",current:"currentColor",transparent:"transparent",black:"#000",white:"#fff",slate:{50:"#f8fafc",100:"#f1f5f9",200:"#e2e8f0",300:"#cbd5e1",400:"#94a3b8",500:"#64748b",600:"#475569",700:"#334155",800:"#1e293b",900:"#0f172a"},gray:{50:"#f9fafb",100:"#f3f4f6",200:"#e5e7eb",300:"#d1d5db",400:"#9ca3af",500:"#6b7280",600:"#4b5563",700:"#374151",800:"#1f2937",900:"#111827"},zinc:{50:"#fafafa",100:"#f4f4f5",200:"#e4e4e7",300:"#d4d4d8",400:"#a1a1aa",500:"#71717a",600:"#52525b",700:"#3f3f46",800:"#27272a",900:"#18181b"},neutral:{50:"#fafafa",100:"#f5f5f5",200:"#e5e5e5",300:"#d4d4d4",400:"#a3a3a3",500:"#737373",600:"#525252",700:"#404040",800:"#262626",900:"#171717"},stone:{50:"#fafaf9",100:"#f5f5f4",200:"#e7e5e4",300:"#d6d3d1",400:"#a8a29e",500:"#78716c",600:"#57534e",700:"#44403c",800:"#292524",900:"#1c1917"},red:{50:"#fef2f2",100:"#fee2e2",200:"#fecaca",300:"#fca5a5",400:"#f87171",500:"#ef4444",600:"#dc2626",700:"#b91c1c",800:"#991b1b",900:"#7f1d1d"},orange:{50:"#fff7ed",100:"#ffedd5",200:"#fed7aa",300:"#fdba74",400:"#fb923c",500:"#f97316",600:"#ea580c",700:"#c2410c",800:"#9a3412",900:"#7c2d12"},amber:{50:"#fffbeb",100:"#fef3c7",200:"#fde68a",300:"#fcd34d",400:"#fbbf24",500:"#f59e0b",600:"#d97706",700:"#b45309",800:"#92400e",900:"#78350f"},yellow:{50:"#fefce8",100:"#fef9c3",200:"#fef08a",300:"#fde047",400:"#facc15",500:"#eab308",600:"#ca8a04",700:"#a16207",800:"#854d0e",900:"#713f12"},lime:{50:"#f7fee7",100:"#ecfccb",200:"#d9f99d",300:"#bef264",400:"#a3e635",500:"#84cc16",600:"#65a30d",700:"#4d7c0f",800:"#3f6212",900:"#365314"},green:{50:"#f0fdf4",100:"#dcfce7",200:"#bbf7d0",300:"#86efac",400:"#4ade80",500:"#22c55e",600:"#16a34a",700:"#15803d",800:"#166534",900:"#14532d"},emerald:{50:"#ecfdf5",100:"#d1fae5",200:"#a7f3d0",300:"#6ee7b7",400:"#34d399",500:"#10b981",600:"#059669",700:"#047857",800:"#065f46",900:"#064e3b"},teal:{50:"#f0fdfa",100:"#ccfbf1",200:"#99f6e4",300:"#5eead4",400:"#2dd4bf",500:"#14b8a6",600:"#0d9488",700:"#0f766e",800:"#115e59",900:"#134e4a"},cyan:{50:"#ecfeff",100:"#cffafe",200:"#a5f3fc",300:"#67e8f9",400:"#22d3ee",500:"#06b6d4",600:"#0891b2",700:"#0e7490",800:"#155e75",900:"#164e63"},sky:{50:"#f0f9ff",100:"#e0f2fe",200:"#bae6fd",300:"#7dd3fc",400:"#38bdf8",500:"#0ea5e9",600:"#0284c7",700:"#0369a1",800:"#075985",900:"#0c4a6e"},blue:{50:"#eff6ff",100:"#dbeafe",200:"#bfdbfe",300:"#93c5fd",400:"#60a5fa",500:"#3b82f6",600:"#2563eb",700:"#1d4ed8",800:"#1e40af",900:"#1e3a8a"},indigo:{50:"#eef2ff",100:"#e0e7ff",200:"#c7d2fe",300:"#a5b4fc",400:"#818cf8",500:"#6366f1",600:"#4f46e5",700:"#4338ca",800:"#3730a3",900:"#312e81"},violet:{50:"#f5f3ff",100:"#ede9fe",200:"#ddd6fe",300:"#c4b5fd",400:"#a78bfa",500:"#8b5cf6",600:"#7c3aed",700:"#6d28d9",800:"#5b21b6",900:"#4c1d95"},purple:{50:"#faf5ff",100:"#f3e8ff",200:"#e9d5ff",300:"#d8b4fe",400:"#c084fc",500:"#a855f7",600:"#9333ea",700:"#7e22ce",800:"#6b21a8",900:"#581c87"},fuchsia:{50:"#fdf4ff",100:"#fae8ff",200:"#f5d0fe",300:"#f0abfc",400:"#e879f9",500:"#d946ef",600:"#c026d3",700:"#a21caf",800:"#86198f",900:"#701a75"},pink:{50:"#fdf2f8",100:"#fce7f3",200:"#fbcfe8",300:"#f9a8d4",400:"#f472b6",500:"#ec4899",600:"#db2777",700:"#be185d",800:"#9d174d",900:"#831843"},rose:{50:"#fff1f2",100:"#ffe4e6",200:"#fecdd3",300:"#fda4af",400:"#fb7185",500:"#f43f5e",600:"#e11d48",700:"#be123c",800:"#9f1239",900:"#881337"},get lightBlue(){return ke({version:"v2.2",from:"lightBlue",to:"sky"}),this.sky},get warmGray(){return ke({version:"v3.0",from:"warmGray",to:"stone"}),this.stone},get trueGray(){return ke({version:"v3.0",from:"trueGray",to:"neutral"}),this.neutral},get coolGray(){return ke({version:"v3.0",from:"coolGray",to:"gray"}),this.gray},get blueGray(){return ke({version:"v3.0",from:"blueGray",to:"slate"}),this.slate}};Je.default=yr;let Xe=Je;var _t=(Xe.__esModule?Xe:{default:Xe}).default;const Zn=["red","green","blue","yellow","purple","teal","orange","cyan","lime","pink"],vr=[{color:"red",primary:600,secondary:100},{color:"green",primary:600,secondary:100},{color:"blue",primary:600,secondary:100},{color:"yellow",primary:500,secondary:100},{color:"purple",primary:600,secondary:100},{color:"teal",primary:600,secondary:100},{color:"orange",primary:600,secondary:100},{color:"cyan",primary:600,secondary:100},{color:"lime",primary:500,secondary:100},{color:"pink",primary:600,secondary:100}],Qn=vr.reduce((e,{color:t,primary:r,secondary:n})=>({...e,[t]:{primary:_t[t][r],secondary:_t[t][n]}}),{});function Be(e,t){if(document.querySelector(`link[href='${e}']`))return Promise.resolve();const n=document.createElement("link");return n.rel="stylesheet",n.href=e,t.appendChild(n),new Promise((o,i)=>{n.addEventListener("load",()=>o()),n.addEventListener("error",()=>{console.error(`Unable to preload CSS for ${e}`),o()})})}const kr="modulepreload",zr=function(e){return"https://gradio.s3-us-west-2.amazonaws.com/3.33.1/"+e},ht={},Fe=function(t,r,n){if(!r||r.length===0)return t();const o=document.getElementsByTagName("link");return Promise.all(r.map(i=>{if(i=zr(i),i in ht)return;ht[i]=!0;const s=i.endsWith(".css"),a=s?'[rel="stylesheet"]':"";if(!!n)for(let f=o.length-1;f>=0;f--){const u=o[f];if(u.href===i&&(!s||u.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${i}"]${a}`))return;const l=document.createElement("link");if(l.rel=s?"stylesheet":kr,s||(l.as="script",l.crossOrigin=""),l.href=i,document.head.appendChild(l),s)return new Promise((f,u)=>{l.addEventListener("load",f),l.addEventListener("error",()=>u(new Error(`Unable to preload CSS for ${i}`)))})})).then(()=>t())};function V(){}const dt=e=>e;function Ht(e,t){for(const r in t)e[r]=t[r];return e}function Jt(e){return e()}function bt(){return Object.create(null)}function ie(e){e.forEach(Jt)}function we(e){return typeof e=="function"}function je(e,t){return e!=e?t==t:e!==t||e&&typeof e=="object"||typeof e=="function"}let Le;function xr(e,t){return Le||(Le=document.createElement("a")),Le.href=t,e===Le.href}function Ar(e){return Object.keys(e).length===0}function Zt(e,...t){if(e==null)return V;const r=e.subscribe(...t);return r.unsubscribe?()=>r.unsubscribe():r}function Re(e,t,r){e.$$.on_destroy.push(Zt(t,r))}function Qt(e,t,r,n){if(e){const o=Kt(e,t,r,n);return e[0](o)}}function Kt(e,t,r,n){return e[1]&&n?Ht(r.ctx.slice(),e[1](n(t))):r.ctx}function Xt(e,t,r,n){if(e[2]&&n){const o=e[2](n(r));if(t.dirty===void 0)return o;if(typeof o=="object"){const i=[],s=Math.max(t.dirty.length,o.length);for(let a=0;a32){const t=[],r=e.ctx.length/32;for(let n=0;nwindow.performance.now():()=>Date.now(),pt=er?e=>requestAnimationFrame(e):V;const _e=new Set;function tr(e){_e.forEach(t=>{t.c(e)||(_e.delete(t),t.f())}),_e.size!==0&&pt(tr)}function gt(e){let t;return _e.size===0&&pt(tr),{promise:new Promise(r=>{_e.add(t={c:e,f:r})}),abort(){_e.delete(t)}}}function x(e,t){e.appendChild(t)}function rr(e){if(!e)return document;const t=e.getRootNode?e.getRootNode():e.ownerDocument;return t&&t.host?t:e.ownerDocument}function Er(e){const t=L("style");return Sr(rr(e),t),t.sheet}function Sr(e,t){return x(e.head||e,t),t.sheet}function z(e,t,r){e.insertBefore(t,r||null)}function v(e){e.parentNode&&e.parentNode.removeChild(e)}function nr(e,t){for(let r=0;re.removeEventListener(t,r,n)}function $n(e){return function(t){return t.preventDefault(),e.call(this,t)}}function Nr(e){return function(t){return t.stopPropagation(),e.call(this,t)}}function _(e,t,r){r==null?e.removeAttribute(t):e.getAttribute(t)!==r&&e.setAttribute(t,r)}function jr(e,t){const r=Object.getOwnPropertyDescriptors(e.__proto__);for(const n in t)t[n]==null?e.removeAttribute(n):n==="style"?e.style.cssText=t[n]:n==="__value"?e.value=e[n]=t[n]:r[n]&&r[n].set?e[n]=t[n]:_(e,n,t[n])}function qr(e,t){Object.keys(t).forEach(r=>{Cr(e,r,t[r])})}function Cr(e,t,r){t in e?e[t]=typeof e[t]=="boolean"&&r===""?!0:r:_(e,t,r)}function eo(e){return/-/.test(e)?qr:jr}function to(e){let t;return{p(...r){t=r,t.forEach(n=>e.push(n))},r(){t.forEach(r=>e.splice(e.indexOf(r),1))}}}function ro(e){return e===""?null:+e}function Lr(e){return Array.from(e.childNodes)}function Y(e,t){t=""+t,e.wholeText!==t&&(e.data=t)}function no(e,t){e.value=t??""}function K(e,t,r,n){r===null?e.style.removeProperty(t):e.style.setProperty(t,r,n?"important":"")}let Me;function Mr(){if(Me===void 0){Me=!1;try{typeof window<"u"&&window.parent&&window.parent.document}catch{Me=!0}}return Me}function oo(e,t){getComputedStyle(e).position==="static"&&(e.style.position="relative");const n=L("iframe");n.setAttribute("style","display: block; position: absolute; top: 0; left: 0; width: 100%; height: 100%; overflow: hidden; border: 0; opacity: 0; pointer-events: none; z-index: -1;"),n.setAttribute("aria-hidden","true"),n.tabIndex=-1;const o=Mr();let i;return o?(n.src="data:text/html,
-
- """,
- language="html",
- )
-
-
-def show_dataframes_metrics(len_requests, len_interventions, len_solved_verified_requests, lang, show_col_3=False):
- if lang == "en":
- # with st.expander("📝 Nt3awnou Platform Description"):
- st.markdown(INTRO_TEXT_EN, unsafe_allow_html=True)
- if show_col_3:
- col1, col2, col3 = st.columns([1, 1, 1])
- else:
- col1, col2 = st.columns([1, 1])
- with col1:
- st.metric(
- "# Number of help requests",
- len_requests,
- )
- with col2:
- st.metric(
- "# Number of interventions",
- len_interventions + len_solved_verified_requests,
- )
- if show_col_3:
- with col3:
- st.metric(
- "# Number of solved requests",
- len_solved_verified_requests,
- )
- elif lang == "ar":
- # with st.expander("📝 شرح منصة نتعاونو"):
- st.markdown(INTRO_TEXT_AR, unsafe_allow_html=True)
- if show_col_3:
- col1, col2, col3 = st.columns([1, 1, 1])
- else:
- col1, col2 = st.columns([1, 1])
- with col1:
- st.metric(
- "# عدد طلبات المساعدة",
- len_requests,
- )
- with col2:
- st.metric(
- "# عدد التدخلات",
- len_interventions + len_solved_verified_requests,
- )
- if show_col_3:
- with col3:
- st.metric(
- "# عدد الطلبات المستجاب لها",
- len_solved_verified_requests,
- )
- elif lang == "fr":
- # with st.expander("📝 Description de la plateforme Nt3awnou"):
- st.markdown(INTRO_TEXT_FR, unsafe_allow_html=True)
- if show_col_3:
- col1, col2, col3 = st.columns([1, 1, 1])
- else:
- col1, col2 = st.columns([1, 1])
- with col1:
- st.metric(
- "# Nombre de demandes d'aide",
- len_requests,
- )
- with col2:
- st.metric(
- "# Nombre d'interventions",
- len_interventions + len_solved_verified_requests,
- )
- if show_col_3:
- with col3:
- st.metric(
- "# Nombre de demandes résolues",
- len_solved_verified_requests,
- )
-
-
-@st.cache_data(ttl=60 * 60 * 24)
-def cached_parse_gg_sheet(url):
- return parse_gg_sheet(url)
-
-
-def show_charts():
- st.subheader(_("📊 **Charts**"))
- col1, col2 = st.columns([1, 1])
-
- # interventions_categories
- interventions_processed_df = cached_parse_gg_sheet(INTERVENTIONS_PROCESSED_URL)
- supply_data = (
- interventions_processed_df["supplies_category"]
- .str.split(",")
- .explode()
- .str.strip("[] '")
- .dropna()
- .astype("category")
- )
- interv_fig = px.pie(supply_data, names="supplies_category", color='supplies_category', color_discrete_map=PIE_CHART_COLOR_MAP)
- interv_fig.update_layout(
- autosize=True,
- legend=dict(
- orientation="h",
- # entrywidth=40,
- yanchor="bottom",
- y=1.02,
- xanchor="right",
- x=1,
- font=dict(
- # family="Courier",
- # size=10,
- # color="black"
- ),
- itemwidth=100,
- ),
- )
- with col1:
- st.subheader(_("Supplies Categories"))
- st.plotly_chart(interv_fig, use_container_width=True)
-
- # requests_categories
- requests_processed_df = cached_parse_gg_sheet(VERIFIED_REQUESTS_PROCESSED_URL)
- need_data = (
- requests_processed_df["need_category"].str.split(",").explode().str.strip("[] '").dropna().astype("category")
- )
- req_fig = px.pie(need_data, names="need_category", color='need_category', color_discrete_map=PIE_CHART_COLOR_MAP)
- req_fig.update_layout(
- autosize=True,
- legend=dict(
- orientation="h",
- # entrywidth=40,
- yanchor="bottom",
- y=1.02,
- xanchor="right",
- x=1,
- font=dict(
- # family="Courier",
- # size=10,
- # color="black"
- ),
- itemwidth=100,
- ),
- )
- with col2:
- st.subheader(_("Needs Categories"))
- st.plotly_chart(req_fig, use_container_width=True)
-
-
-def show_donations(lang):
- st.subheader(_("📝 **Donations**"))
- if lang == "en":
- st.markdown(
- """
- Notice: We are not responsible for the donations collection. This is the official bank account dedicated to tackle the consequences of the earthquake.""",
- unsafe_allow_html=True,
- )
- st.markdown(
- """
-
-
The official bank account dedicated to tackle the consequences of the earthquake is:
- Account number:
-
126
- RIB: 001-810-0078000201106203-18
-
- For the money transfers coming from outside Morocco
-
- IBAN: MA64001810007800020110620318
-
- """,
- unsafe_allow_html=True,
- )
- elif lang == "ar":
- st.markdown(
- """
- ملاحظة: نحن لسنا مسؤولين عن جمع التبرعات. هذا هو الحساب البنكي الرسمي المخصص لمواجهة عواقب الزلزال.
- """,
- unsafe_allow_html=True,
- )
- st.markdown(
- """
-
-
الحساب البنكي الرسمي المخصص لمواجهة عواقب الزلزال
- رقم الحساب
-
126
- RIB: 001-810-0078000201106203-18
-
- للتحويلات القادمة من خارج المغرب
-
- IBAN: MA64001810007800020110620318
-
-
- """,
- unsafe_allow_html=True,
- )
- elif lang == "fr":
- st.markdown(
- """
- Remarque: Nous ne sommes pas responsables de la collecte des dons. Ceci est le compte bancaire officiel dédié à la lutte contre les conséquences du séisme.
- """,
- unsafe_allow_html=True,
- )
- st.markdown(
- """
-
-
Le compte bancaire officiel dédié à la lutte contre les conséquences du séisme est le suivant:
- Numéro de compte:
-
126
- RIB: 001-810-0078000201106203-18
-
- Pour les transferts d'argent en provenance de l'étranger
-
- IBAN: MA64001810007800020110620318
-
- """,
- unsafe_allow_html=True,
- )
diff --git a/spaces/okeanos/uptimefactoryai/index.html b/spaces/okeanos/uptimefactoryai/index.html
deleted file mode 100644
index 6250c2958a7186a4e64f21c02b0359ff5ecd7e97..0000000000000000000000000000000000000000
--- a/spaces/okeanos/uptimefactoryai/index.html
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/omi0k/LoRA-DreamBooth-Training-UI/README.md b/spaces/omi0k/LoRA-DreamBooth-Training-UI/README.md
deleted file mode 100644
index b61f96a3f0f5df541bd4e0dfba3a468ceb1c54e9..0000000000000000000000000000000000000000
--- a/spaces/omi0k/LoRA-DreamBooth-Training-UI/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: LoRA DreamBooth Training UI
-emoji: ⚡
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.16.2
-python_version: 3.10.9
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: lora-library/LoRA-DreamBooth-Training-UI
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/omlab/vlchecklist_demo/models/vilt/modules/objectives.py b/spaces/omlab/vlchecklist_demo/models/vilt/modules/objectives.py
deleted file mode 100644
index 0b11912bb8571f7ba9cc73ca84afb7a83a5b1443..0000000000000000000000000000000000000000
--- a/spaces/omlab/vlchecklist_demo/models/vilt/modules/objectives.py
+++ /dev/null
@@ -1,652 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import os
-import glob
-import json
-import tqdm
-import functools
-
-from torch.utils.data.distributed import DistributedSampler
-from einops import rearrange
-
-from models.vilt.modules.dist_utils import all_gather
-
-
-def cost_matrix_cosine(x, y, eps=1e-5):
- """Compute cosine distnace across every pairs of x, y (batched)
- [B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
- assert x.dim() == y.dim()
- assert x.size(0) == y.size(0)
- assert x.size(2) == y.size(2)
- x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
- y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
- cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
- cosine_dist = 1 - cosine_sim
- return cosine_dist
-
-
-def trace(x):
- """ compute trace of input tensor (batched) """
- b, m, n = x.size()
- assert m == n
- mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
- trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
- return trace
-
-
-@torch.no_grad()
-def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
- """ [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
- b, m, n = C.size()
- sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
- T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
- A = torch.exp(-C.transpose(1, 2) / beta)
-
- # mask padded positions
- sigma.masked_fill_(x_pad, 0)
- joint_pad = joint_pad.transpose(1, 2)
- T.masked_fill_(joint_pad, 0)
- A.masked_fill_(joint_pad, 0)
-
- # broadcastable lengths
- x_len = x_len.unsqueeze(1).unsqueeze(2)
- y_len = y_len.unsqueeze(1).unsqueeze(2)
-
- # mask to zero out padding in delta and sigma
- x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
- y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
-
- for _ in range(iteration):
- Q = A * T # bs * n * m
- sigma = sigma.view(b, m, 1)
- for _ in range(k):
- delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
- sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
- T = delta.view(b, n, 1) * Q * sigma
- T.masked_fill_(joint_pad, 0)
- return T
-
-
-def optimal_transport_dist(
- txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1
-):
- """ [B, M, D], [B, N, D], [B, M], [B, N]"""
- cost = cost_matrix_cosine(txt_emb, img_emb)
- # mask the padded inputs
- joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
- cost.masked_fill_(joint_pad, 0)
-
- txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
- img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
-
- T = ipot(
- cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k
- )
- distance = trace(cost.matmul(T.detach()))
- return distance
-
-
-def compute_mlm(pl_module, batch):
- infer = pl_module.infer(batch, mask_text=True, mask_image=False)
- mlm_logits = pl_module.mlm_score(infer["text_feats"])
- mlm_labels = infer["text_labels"]
-
- mlm_loss = F.cross_entropy(
- mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
- mlm_labels.view(-1),
- ignore_index=-100,
- )
-
- ret = {
- "mlm_loss": mlm_loss,
- "mlm_logits": mlm_logits,
- "mlm_labels": mlm_labels,
- "mlm_ids": infer["text_ids"],
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
- acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
- ret["mlm_logits"], ret["mlm_labels"]
- )
- pl_module.log(f"mlm/{phase}/loss", loss)
- pl_module.log(f"mlm/{phase}/accuracy", acc)
-
- return ret
-
-
-def compute_mpp(pl_module, batch):
- infer = pl_module.infer(batch, mask_text=False, mask_image=True)
- mpp_logits = pl_module.mpp_score(infer["image_feats"])
- mpp_logits = torch.stack(
- [
- mpp_logits[:, :, 0:256],
- mpp_logits[:, :, 256:512],
- mpp_logits[:, :, 512:768],
- ],
- dim=2,
- )
- mpp_labels = infer["image_labels"]
-
- mpp_loss = F.cross_entropy(
- mpp_logits.view(-1, 256),
- mpp_labels.view(-1),
- ignore_index=-100,
- )
-
- ret = {
- "mpp_loss": mpp_loss,
- "mpp_logits": mpp_logits,
- "mpp_labels": mpp_labels,
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_mpp_loss")(ret["mpp_loss"])
- acc = getattr(pl_module, f"{phase}_mpp_accuracy")(
- ret["mpp_logits"], ret["mpp_labels"]
- )
- pl_module.log(f"mpp/{phase}/loss", loss)
- pl_module.log(f"mpp/{phase}/accuracy", acc)
-
- return ret
-
-
-def compute_mppd(pl_module, batch):
- infer = pl_module.infer(batch, mask_text=False, mask_image=True)
- mppd_logits = pl_module.mppd_score(infer["image_feats"])
- mppd_labels = infer["image_labels_mppd"]
- filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
-
- labels = mppd_labels[filter_to_train]
- logits = mppd_logits[filter_to_train]
- mppd_loss = F.mse_loss(logits, labels)
-
- ret = {
- "mppd_loss": mppd_loss,
- "mppd_logits": mppd_logits,
- "mppd_labels": mppd_labels,
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_mppd_loss")(ret["mppd_loss"])
- pl_module.log(f"mppd/{phase}/loss", loss)
-
- return ret
-
-
-def compute_mpfr(pl_module, batch):
- infer = pl_module.infer(batch, mask_text=False, mask_image=True)
- mpfr_logits = pl_module.mpfr_score(infer["image_feats"])
- mpfr_labels = infer["image_labels_mpfr"]
- filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
-
- labels = mpfr_labels[filter_to_train]
- logits = mpfr_logits[filter_to_train]
- mpfr_loss = F.mse_loss(logits, labels)
-
- ret = {
- "mpfr_loss": mpfr_loss,
- "mpfr_logits": mpfr_logits,
- "mpfr_labels": mpfr_labels,
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_mpfr_loss")(ret["mpfr_loss"])
- pl_module.log(f"mpfr/{phase}/loss", loss)
-
- return ret
-
-
-def compute_itm_wpa(pl_module, batch):
- pos_len = len(batch["text"]) // 2
- neg_len = len(batch["text"]) - pos_len
- itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
- pl_module.device
- )
- itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
-
- itm_images = [
- torch.stack(
- [
- ti if itm_labels[i] == 1 else fi
- for i, (ti, fi) in enumerate(zip(bti, bfi))
- ]
- )
- for bti, bfi in zip(batch["image"], batch["false_image_0"])
- ]
-
- batch = {k: v for k, v in batch.items()}
- batch["image"] = itm_images
-
- infer = pl_module.infer(batch, mask_text=False, mask_image=False)
-
- with torch.cuda.amp.autocast(enabled=False):
- txt_emb, img_emb = infer["text_feats"], infer["image_feats"]
- txt_mask, img_mask = infer["text_masks"].bool(), infer["image_masks"].bool()
- for i, _len in enumerate(txt_mask.sum(dim=1)):
- txt_mask[i, _len - 1] = False
- txt_mask[:, 0] = False
- img_mask[:, 0] = False
- if "deit" in pl_module.hparams.config["vit"]:
- img_mask[:, 1] = False
- txt_pad, img_pad = ~txt_mask, ~img_mask
-
- cost = cost_matrix_cosine(txt_emb.float(), img_emb.float())
- joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
- cost.masked_fill_(joint_pad, 0)
-
- txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(
- dtype=cost.dtype
- )
- img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(
- dtype=cost.dtype
- )
- T = ipot(
- cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1
- )
- distance = trace(cost.matmul(T.detach()))
-
- dist_pos = distance.masked_select(itm_labels == 1)
- dist_neg = distance.masked_select(itm_labels == 0)
- ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0))
-
- itm_logits = pl_module.itm_score(infer["cls_feats"])
- itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
-
- ret = {
- "itm_loss": itm_loss,
- "itm_wpa_loss": 0.1 * ot_loss,
- "itm_logits": itm_logits,
- "itm_labels": itm_labels,
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
- wpa_loss = getattr(pl_module, f"{phase}_itm_wpa_loss")(ret["itm_wpa_loss"])
- acc = getattr(pl_module, f"{phase}_itm_accuracy")(
- ret["itm_logits"], ret["itm_labels"]
- )
- pl_module.log(f"itm/{phase}/loss", loss)
- pl_module.log(f"itm/{phase}/wpa_loss", wpa_loss)
- pl_module.log(f"itm/{phase}/accuracy", acc)
-
- return ret
-
-
-def compute_imgcls(pl_module, batch):
- infer = pl_module.infer(batch, mask_text=False, mask_image=False)
- imgcls_logits = pl_module.img_classifier(infer["cls_feats"])
- imgcls_labels = batch["label"]
- imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long()
- imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels)
-
- ret = {
- "imgcls_loss": imgcls_loss,
- "imgcls_logits": imgcls_logits,
- "imgcls_labels": imgcls_labels,
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"])
- acc = getattr(pl_module, f"{phase}_imgcls_accuracy")(
- ret["imgcls_logits"], ret["imgcls_labels"]
- )
- pl_module.log(f"imgcls/{phase}/loss", loss)
- pl_module.log(f"imgcls/{phase}/accuracy", acc)
-
- return ret
-
-
-def compute_vqa(pl_module, batch):
- infer = pl_module.infer(batch, mask_text=False, mask_image=False)
- vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
- vqa_targets = torch.zeros(
- len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
- ).to(pl_module.device)
-
- vqa_labels = batch["vqa_labels"]
- vqa_scores = batch["vqa_scores"]
-
- for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
- for l, s in zip(_label, _score):
- vqa_targets[i, l] = s
-
- vqa_loss = (
- F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
- * vqa_targets.shape[1]
- ) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
-
- ret = {
- "vqa_loss": vqa_loss,
- "vqa_logits": vqa_logits,
- "vqa_targets": vqa_targets,
- "vqa_labels": vqa_labels,
- "vqa_scores": vqa_scores,
- }
-
- phase = "train" if pl_module.training else "val"
- loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
- score = getattr(pl_module, f"{phase}_vqa_score")(
- ret["vqa_logits"], ret["vqa_targets"]
- )
- pl_module.log(f"vqa/{phase}/loss", loss)
- pl_module.log(f"vqa/{phase}/score", score)
-
- return ret
-
-
-def compute_nlvr2(pl_module, batch):
- infer1 = pl_module.infer(
- batch, mask_text=False, mask_image=False, image_token_type_idx=1
- )
- infer2 = pl_module.infer(
- batch, mask_text=False, mask_image=False, image_token_type_idx=2
- )
-
- cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
- nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
-
- nlvr2_labels = batch["answers"]
- nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
- nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels)
-
- ret = {
- "nlvr2_loss": nlvr2_loss,
- "nlvr2_logits": nlvr2_logits,
- "nlvr2_labels": nlvr2_labels,
- }
-
- phase = "train" if pl_module.training else "val"
-
- if phase == "train":
- loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
- acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
- ret["nlvr2_logits"], ret["nlvr2_labels"]
- )
- pl_module.log(f"nlvr2/{phase}/loss", loss)
- pl_module.log(f"nlvr2/{phase}/accuracy", acc)
- else:
- dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
- test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
-
- if dev_batches:
- dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
- F.cross_entropy(
- ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
- )
- )
- dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
- ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
- )
- pl_module.log(f"nlvr2/dev/loss", dev_loss)
- pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
- if test_batches:
- test_loss = getattr(pl_module, f"test_nlvr2_loss")(
- F.cross_entropy(
- ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
- )
- )
- test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
- ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
- )
- pl_module.log(f"nlvr2/test/loss", test_loss)
- pl_module.log(f"nlvr2/test/accuracy", test_acc)
-
- return ret
-
-
-def compute_irtr(pl_module, batch):
- is_training_phase = pl_module.training
-
- _bs, _c, _h, _w = batch["image"][0].shape
- false_len = pl_module.hparams.config["draw_false_text"]
- text_ids = torch.stack(
- [batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
- )
- text_masks = torch.stack(
- [batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
- )
- text_labels = torch.stack(
- [batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
- )
-
- text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
- text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
- text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
- images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
-
- infer = pl_module.infer(
- {
- "image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
- "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
- "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
- "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
- }
- )
- score = pl_module.rank_output(infer["cls_feats"])[:, 0]
- score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
- answer = torch.zeros(_bs).to(score).long()
- irtr_loss = F.cross_entropy(score, answer)
-
- ret = {
- "irtr_loss": irtr_loss,
- }
-
- phase = "train" if pl_module.training else "val"
- irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
-
- pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
-
- return ret
-
-
-@torch.no_grad()
-def compute_irtr_recall(pl_module):
- text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
- text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
- text_loader = torch.utils.data.DataLoader(
- text_dset,
- batch_size=64,
- num_workers=pl_module.hparams.config["num_workers"],
- pin_memory=True,
- collate_fn=functools.partial(
- text_dset.collate,
- mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
- ),
- )
-
- image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
- image_only=True
- )
- image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
- dist_sampler = DistributedSampler(image_dset, shuffle=False)
- image_loader = torch.utils.data.DataLoader(
- image_dset,
- batch_size=1,
- num_workers=pl_module.hparams.config["num_workers"],
- sampler=dist_sampler,
- pin_memory=True,
- collate_fn=functools.partial(
- image_dset.collate,
- mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
- ),
- )
-
- text_preload = list()
- for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
- text_preload.append(
- {
- "text_ids": _b["text_ids"].to(pl_module.device),
- "text_masks": _b["text_masks"].to(pl_module.device),
- "text_labels": _b["text_labels"].to(pl_module.device),
- "img_index": _b["img_index"],
- }
- )
-
- tiids = list()
- for pre in text_preload:
- tiids += pre["img_index"]
- tiids = torch.tensor(tiids)
-
- image_preload = list()
- for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
- (ie, im, _, _) = pl_module.transformer.visual_embed(
- _b["image"][0].to(pl_module.device),
- max_image_len=pl_module.hparams.config["max_image_len"],
- mask_it=False,
- )
- image_preload.append((ie, im, _b["img_index"][0]))
-
- rank_scores = list()
- rank_iids = list()
-
- for img_batch in tqdm.tqdm(image_preload, desc="rank loop"):
- _ie, _im, _iid = img_batch
- _, l, c = _ie.shape
-
- img_batch_score = list()
- for txt_batch in text_preload:
- fblen = len(txt_batch["text_ids"])
- ie = _ie.expand(fblen, l, c)
- im = _im.expand(fblen, l)
-
- with torch.cuda.amp.autocast():
- score = pl_module.rank_output(
- pl_module.infer(
- {
- "text_ids": txt_batch["text_ids"],
- "text_masks": txt_batch["text_masks"],
- "text_labels": txt_batch["text_labels"],
- },
- image_embeds=ie,
- image_masks=im,
- )["cls_feats"]
- )[:, 0]
-
- img_batch_score.append(score)
-
- img_batch_score = torch.cat(img_batch_score)
- rank_scores.append(img_batch_score.cpu().tolist())
- rank_iids.append(_iid)
-
- torch.distributed.barrier()
- gather_rank_scores = all_gather(rank_scores)
- gather_rank_iids = all_gather(rank_iids)
-
- iids = torch.tensor(gather_rank_iids)
- iids = iids.view(-1)
- scores = torch.tensor(gather_rank_scores)
- scores = scores.view(len(iids), -1)
-
- topk10 = scores.topk(10, dim=1)
- topk5 = scores.topk(5, dim=1)
- topk1 = scores.topk(1, dim=1)
- topk10_iids = tiids[topk10.indices]
- topk5_iids = tiids[topk5.indices]
- topk1_iids = tiids[topk1.indices]
-
- tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
- tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
- tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
-
- topk10 = scores.topk(10, dim=0)
- topk5 = scores.topk(5, dim=0)
- topk1 = scores.topk(1, dim=0)
- topk10_iids = iids[topk10.indices]
- topk5_iids = iids[topk5.indices]
- topk1_iids = iids[topk1.indices]
-
- ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
- ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
- ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
-
- return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
-
-
-def init_weights(module):
- if isinstance(module, (nn.Linear, nn.Embedding)):
- module.weight.data.normal_(mean=0.0, std=0.02)
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
-
-
-def vqa_test_step(pl_module, batch, output):
- id2answer = (
- pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
- if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
- else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
- )
- vqa_logits = output["vqa_logits"]
- vqa_preds = vqa_logits.argmax(dim=-1)
- vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
- questions = batch["text"]
- qids = batch["qid"]
- return {"qids": qids, "preds": vqa_preds}
-
-
-def arc_test_step(pl_module, batch, output):
- return output
-
-
-def vqa_test_wrapup(outs, model_name):
- rank = torch.distributed.get_rank()
- qids, preds = list(), list()
- for out in outs:
- qids += out["qids"]
- preds += out["preds"]
-
- rets = list()
- for qid, pred in zip(qids, preds):
- rets.append({"question_id": qid, "answer": pred})
- with open(f"vqa_submit_{rank}.json", "w") as fp:
- json.dump(rets, fp, indent=4)
-
- torch.distributed.barrier()
-
- if rank == 0:
- jsons = list()
- paths = list(glob.glob("vqa_submit_*.json"))
- for path in paths:
- with open(path, "r") as fp:
- jsons += json.load(fp)
- os.makedirs("result", exist_ok=True)
- with open(f"result/vqa_submit_{model_name}.json", "w") as fp:
- json.dump(jsons, fp, indent=4)
-
- torch.distributed.barrier()
- os.remove(f"vqa_submit_{rank}.json")
-
-
-def arc_test_wrapup(outs, caplen, model_name):
- rank = torch.distributed.get_rank()
- iids, captions = list(), list()
- for out in outs:
- iids += out["iid"]
- captions += out["captions"]
-
- rets = list()
- for iid, caption in zip(iids, captions):
- rets.append({"image_id": iid, "caption": caption})
- with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
- json.dump(rets, fp, indent=4)
-
- torch.distributed.barrier()
-
- if rank == 0:
- jsons = list()
- paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
- for path in paths:
- with open(path, "r") as fp:
- jsons += json.load(fp)
- os.makedirs("result/arc", exist_ok=True)
- jsons = sorted(jsons, key=lambda x: x["image_id"])
- with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
- json.dump(jsons, fp, indent=4)
-
- torch.distributed.barrier()
- os.remove(f"coco_cap_len{caplen}_{rank}.json")
diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py
deleted file mode 100644
index 0e9eb9806dc1ecc04bb30f877c5e546bbffa0d9e..0000000000000000000000000000000000000000
--- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py
+++ /dev/null
@@ -1,547 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from dataclasses import dataclass
-from typing import List, Optional, Union
-
-import numpy as np
-import PIL
-import torch
-from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection
-
-from ...models import PriorTransformer
-from ...schedulers import UnCLIPScheduler
-from ...utils import (
- BaseOutput,
- logging,
- replace_example_docstring,
-)
-from ...utils.torch_utils import randn_tensor
-from ..pipeline_utils import DiffusionPipeline
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
- >>> import torch
-
- >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior")
- >>> pipe_prior.to("cuda")
-
- >>> prompt = "red cat, 4k photo"
- >>> out = pipe_prior(prompt)
- >>> image_emb = out.image_embeds
- >>> negative_image_emb = out.negative_image_embeds
-
- >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
- >>> pipe.to("cuda")
-
- >>> image = pipe(
- ... prompt,
- ... image_embeds=image_emb,
- ... negative_image_embeds=negative_image_emb,
- ... height=768,
- ... width=768,
- ... num_inference_steps=100,
- ... ).images
-
- >>> image[0].save("cat.png")
- ```
-"""
-
-EXAMPLE_INTERPOLATE_DOC_STRING = """
- Examples:
- ```py
- >>> from diffusers import KandinskyPriorPipeline, KandinskyPipeline
- >>> from diffusers.utils import load_image
- >>> import PIL
-
- >>> import torch
- >>> from torchvision import transforms
-
- >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(
- ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
- ... )
- >>> pipe_prior.to("cuda")
-
- >>> img1 = load_image(
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- ... "/kandinsky/cat.png"
- ... )
-
- >>> img2 = load_image(
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- ... "/kandinsky/starry_night.jpeg"
- ... )
-
- >>> images_texts = ["a cat", img1, img2]
- >>> weights = [0.3, 0.3, 0.4]
- >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights)
-
- >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
- >>> pipe.to("cuda")
-
- >>> image = pipe(
- ... "",
- ... image_embeds=image_emb,
- ... negative_image_embeds=zero_image_emb,
- ... height=768,
- ... width=768,
- ... num_inference_steps=150,
- ... ).images[0]
-
- >>> image.save("starry_cat.png")
- ```
-"""
-
-
-@dataclass
-class KandinskyPriorPipelineOutput(BaseOutput):
- """
- Output class for KandinskyPriorPipeline.
-
- Args:
- image_embeds (`torch.FloatTensor`)
- clip image embeddings for text prompt
- negative_image_embeds (`List[PIL.Image.Image]` or `np.ndarray`)
- clip image embeddings for unconditional tokens
- """
-
- image_embeds: Union[torch.FloatTensor, np.ndarray]
- negative_image_embeds: Union[torch.FloatTensor, np.ndarray]
-
-
-class KandinskyPriorPipeline(DiffusionPipeline):
- """
- Pipeline for generating image prior for Kandinsky
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- prior ([`PriorTransformer`]):
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
- image_encoder ([`CLIPVisionModelWithProjection`]):
- Frozen image-encoder.
- text_encoder ([`CLIPTextModelWithProjection`]):
- Frozen text-encoder.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- scheduler ([`UnCLIPScheduler`]):
- A scheduler to be used in combination with `prior` to generate image embedding.
- """
-
- _exclude_from_cpu_offload = ["prior"]
- model_cpu_offload_seq = "text_encoder->prior"
-
- def __init__(
- self,
- prior: PriorTransformer,
- image_encoder: CLIPVisionModelWithProjection,
- text_encoder: CLIPTextModelWithProjection,
- tokenizer: CLIPTokenizer,
- scheduler: UnCLIPScheduler,
- image_processor: CLIPImageProcessor,
- ):
- super().__init__()
-
- self.register_modules(
- prior=prior,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- scheduler=scheduler,
- image_encoder=image_encoder,
- image_processor=image_processor,
- )
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING)
- def interpolate(
- self,
- images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]],
- weights: List[float],
- num_images_per_prompt: int = 1,
- num_inference_steps: int = 25,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- negative_prior_prompt: Optional[str] = None,
- negative_prompt: str = "",
- guidance_scale: float = 4.0,
- device=None,
- ):
- """
- Function invoked when using the prior pipeline for interpolation.
-
- Args:
- images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`):
- list of prompts and images to guide the image generation.
- weights: (`List[float]`):
- list of weights for each condition in `images_and_prompts`
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- num_inference_steps (`int`, *optional*, defaults to 25):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- negative_prior_prompt (`str`, *optional*):
- The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if
- `guidance_scale` is less than `1`).
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if
- `guidance_scale` is less than `1`).
- guidance_scale (`float`, *optional*, defaults to 4.0):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
-
- Examples:
-
- Returns:
- [`KandinskyPriorPipelineOutput`] or `tuple`
- """
-
- device = device or self.device
-
- if len(images_and_prompts) != len(weights):
- raise ValueError(
- f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length"
- )
-
- image_embeddings = []
- for cond, weight in zip(images_and_prompts, weights):
- if isinstance(cond, str):
- image_emb = self(
- cond,
- num_inference_steps=num_inference_steps,
- num_images_per_prompt=num_images_per_prompt,
- generator=generator,
- latents=latents,
- negative_prompt=negative_prior_prompt,
- guidance_scale=guidance_scale,
- ).image_embeds
-
- elif isinstance(cond, (PIL.Image.Image, torch.Tensor)):
- if isinstance(cond, PIL.Image.Image):
- cond = (
- self.image_processor(cond, return_tensors="pt")
- .pixel_values[0]
- .unsqueeze(0)
- .to(dtype=self.image_encoder.dtype, device=device)
- )
-
- image_emb = self.image_encoder(cond)["image_embeds"]
-
- else:
- raise ValueError(
- f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}"
- )
-
- image_embeddings.append(image_emb * weight)
-
- image_emb = torch.cat(image_embeddings).sum(dim=0, keepdim=True)
-
- out_zero = self(
- negative_prompt,
- num_inference_steps=num_inference_steps,
- num_images_per_prompt=num_images_per_prompt,
- generator=generator,
- latents=latents,
- negative_prompt=negative_prior_prompt,
- guidance_scale=guidance_scale,
- )
- zero_image_emb = out_zero.negative_image_embeds if negative_prompt == "" else out_zero.image_embeds
-
- return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb)
-
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- latents = latents * scheduler.init_noise_sigma
- return latents
-
- def get_zero_embed(self, batch_size=1, device=None):
- device = device or self.device
- zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to(
- device=device, dtype=self.image_encoder.dtype
- )
- zero_image_emb = self.image_encoder(zero_img)["image_embeds"]
- zero_image_emb = zero_image_emb.repeat(batch_size, 1)
- return zero_image_emb
-
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt=None,
- ):
- batch_size = len(prompt) if isinstance(prompt, list) else 1
- # get prompt text embeddings
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- text_mask = text_inputs.attention_mask.bool().to(device)
-
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
-
- text_encoder_output = self.text_encoder(text_input_ids.to(device))
-
- prompt_embeds = text_encoder_output.text_embeds
- text_encoder_hidden_states = text_encoder_output.last_hidden_state
-
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
-
- if do_classifier_free_guidance:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- uncond_text_mask = uncond_input.attention_mask.bool().to(device)
- negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
-
- negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds
- uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
-
- seq_len = negative_prompt_embeds.shape[1]
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
-
- seq_len = uncond_text_encoder_hidden_states.shape[1]
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
- batch_size * num_images_per_prompt, seq_len, -1
- )
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
-
- # done duplicates
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
-
- text_mask = torch.cat([uncond_text_mask, text_mask])
-
- return prompt_embeds, text_encoder_hidden_states, text_mask
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- prompt: Union[str, List[str]],
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: int = 1,
- num_inference_steps: int = 25,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- guidance_scale: float = 4.0,
- output_type: Optional[str] = "pt",
- return_dict: bool = True,
- ):
- """
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- num_inference_steps (`int`, *optional*, defaults to 25):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- guidance_scale (`float`, *optional*, defaults to 4.0):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- output_type (`str`, *optional*, defaults to `"pt"`):
- The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"`
- (`torch.Tensor`).
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
-
- Examples:
-
- Returns:
- [`KandinskyPriorPipelineOutput`] or `tuple`
- """
-
- if isinstance(prompt, str):
- prompt = [prompt]
- elif not isinstance(prompt, list):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if isinstance(negative_prompt, str):
- negative_prompt = [negative_prompt]
- elif not isinstance(negative_prompt, list) and negative_prompt is not None:
- raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
-
- # if the negative prompt is defined we double the batch size to
- # directly retrieve the negative prompt embedding
- if negative_prompt is not None:
- prompt = prompt + negative_prompt
- negative_prompt = 2 * negative_prompt
-
- device = self._execution_device
-
- batch_size = len(prompt)
- batch_size = batch_size * num_images_per_prompt
-
- do_classifier_free_guidance = guidance_scale > 1.0
- prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
- )
-
- # prior
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- prior_timesteps_tensor = self.scheduler.timesteps
-
- embedding_dim = self.prior.config.embedding_dim
-
- latents = self.prepare_latents(
- (batch_size, embedding_dim),
- prompt_embeds.dtype,
- device,
- generator,
- latents,
- self.scheduler,
- )
-
- for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
-
- predicted_image_embedding = self.prior(
- latent_model_input,
- timestep=t,
- proj_embedding=prompt_embeds,
- encoder_hidden_states=text_encoder_hidden_states,
- attention_mask=text_mask,
- ).predicted_image_embedding
-
- if do_classifier_free_guidance:
- predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
- predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * (
- predicted_image_embedding_text - predicted_image_embedding_uncond
- )
-
- if i + 1 == prior_timesteps_tensor.shape[0]:
- prev_timestep = None
- else:
- prev_timestep = prior_timesteps_tensor[i + 1]
-
- latents = self.scheduler.step(
- predicted_image_embedding,
- timestep=t,
- sample=latents,
- generator=generator,
- prev_timestep=prev_timestep,
- ).prev_sample
-
- latents = self.prior.post_process_latents(latents)
-
- image_embeddings = latents
-
- # if negative prompt has been defined, we retrieve split the image embedding into two
- if negative_prompt is None:
- zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)
-
- self.maybe_free_model_hooks
- else:
- image_embeddings, zero_embeds = image_embeddings.chunk(2)
-
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.prior_hook.offload()
-
- if output_type not in ["pt", "np"]:
- raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}")
-
- if output_type == "np":
- image_embeddings = image_embeddings.cpu().numpy()
- zero_embeds = zero_embeds.cpu().numpy()
-
- if not return_dict:
- return (image_embeddings, zero_embeds)
-
- return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds)
diff --git a/spaces/pinkq/Newbing/src/components/chat-image.tsx b/spaces/pinkq/Newbing/src/components/chat-image.tsx
deleted file mode 100644
index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000
--- a/spaces/pinkq/Newbing/src/components/chat-image.tsx
+++ /dev/null
@@ -1,170 +0,0 @@
-import {
- useEffect,
- useState,
- useCallback,
- ChangeEvent,
- ClipboardEvent,
- MouseEventHandler,
- FormEvent,
- useRef
-} from "react"
-import Image from 'next/image'
-import PasteIcon from '@/assets/images/paste.svg'
-import UploadIcon from '@/assets/images/upload.svg'
-import CameraIcon from '@/assets/images/camera.svg'
-import { useBing } from '@/lib/hooks/use-bing'
-import { cn } from '@/lib/utils'
-
-interface ChatImageProps extends Pick, 'uploadImage'> {}
-
-const preventDefault: MouseEventHandler = (event) => {
- event.nativeEvent.stopImmediatePropagation()
-}
-
-const toBase64 = (file: File): Promise => new Promise((resolve, reject) => {
- const reader = new FileReader()
- reader.readAsDataURL(file)
- reader.onload = () => resolve(reader.result as string)
- reader.onerror = reject
-})
-
-export function ChatImage({ children, uploadImage }: React.PropsWithChildren) {
- const videoRef = useRef(null)
- const canvasRef = useRef(null)
- const mediaStream = useRef()
- const [panel, setPanel] = useState('none')
-
- const upload = useCallback((url: string) => {
- if (url) {
- uploadImage(url)
- }
- setPanel('none')
- }, [panel])
-
- const onUpload = useCallback(async (event: ChangeEvent) => {
- const file = event.target.files?.[0]
- if (file) {
- const fileDataUrl = await toBase64(file)
- if (fileDataUrl) {
- upload(fileDataUrl)
- }
- }
- }, [])
-
- const onPaste = useCallback((event: ClipboardEvent) => {
- const pasteUrl = event.clipboardData.getData('text') ?? ''
- upload(pasteUrl)
- }, [])
-
- const onEnter = useCallback((event: FormEvent) => {
- event.preventDefault()
- event.stopPropagation()
- // @ts-ignore
- const inputUrl = event.target.elements.image.value
- if (inputUrl) {
- upload(inputUrl)
- }
- }, [])
-
- const openVideo: MouseEventHandler = async (event) => {
- event.stopPropagation()
- setPanel('camera-mode')
- }
-
- const onCapture = () => {
- if (canvasRef.current && videoRef.current) {
- const canvas = canvasRef.current
- canvas.width = videoRef.current!.videoWidth
- canvas.height = videoRef.current!.videoHeight
- canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height)
- const cameraUrl = canvas.toDataURL('image/jpeg')
- upload(cameraUrl)
- }
- }
-
- useEffect(() => {
- const handleBlur = () => {
- if (panel !== 'none') {
- setPanel('none')
- }
- }
- document.addEventListener('click', handleBlur)
- return () => {
- document.removeEventListener('click', handleBlur)
- }
- }, [panel])
-
- useEffect(() => {
- if (panel === 'camera-mode') {
- navigator.mediaDevices.getUserMedia({ video: true, audio: false })
- .then(videoStream => {
- mediaStream.current = videoStream
- if (videoRef.current) {
- videoRef.current.srcObject = videoStream
- }
- })
- } else {
- if (mediaStream.current) {
- mediaStream.current.getTracks().forEach(function(track) {
- track.stop()
- })
- mediaStream.current = undefined
- }
- }
- }, [panel])
-
- return (
-
- )
-}
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_lib.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_lib.py
deleted file mode 100644
index 2e9d8757a582b1dcdb47a34c35c6cfb3ed23ba90..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_lib.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import os
-import sys
-from itertools import product, starmap
-import distutils.command.install_lib as orig
-
-
-class install_lib(orig.install_lib):
- """Don't add compiled flags to filenames of non-Python files"""
-
- def run(self):
- self.build()
- outfiles = self.install()
- if outfiles is not None:
- # always compile, in case we have any extension stubs to deal with
- self.byte_compile(outfiles)
-
- def get_exclusions(self):
- """
- Return a collections.Sized collections.Container of paths to be
- excluded for single_version_externally_managed installations.
- """
- all_packages = (
- pkg
- for ns_pkg in self._get_SVEM_NSPs()
- for pkg in self._all_packages(ns_pkg)
- )
-
- excl_specs = product(all_packages, self._gen_exclusion_paths())
- return set(starmap(self._exclude_pkg_path, excl_specs))
-
- def _exclude_pkg_path(self, pkg, exclusion_path):
- """
- Given a package name and exclusion path within that package,
- compute the full exclusion path.
- """
- parts = pkg.split('.') + [exclusion_path]
- return os.path.join(self.install_dir, *parts)
-
- @staticmethod
- def _all_packages(pkg_name):
- """
- >>> list(install_lib._all_packages('foo.bar.baz'))
- ['foo.bar.baz', 'foo.bar', 'foo']
- """
- while pkg_name:
- yield pkg_name
- pkg_name, sep, child = pkg_name.rpartition('.')
-
- def _get_SVEM_NSPs(self):
- """
- Get namespace packages (list) but only for
- single_version_externally_managed installations and empty otherwise.
- """
- # TODO: is it necessary to short-circuit here? i.e. what's the cost
- # if get_finalized_command is called even when namespace_packages is
- # False?
- if not self.distribution.namespace_packages:
- return []
-
- install_cmd = self.get_finalized_command('install')
- svem = install_cmd.single_version_externally_managed
-
- return self.distribution.namespace_packages if svem else []
-
- @staticmethod
- def _gen_exclusion_paths():
- """
- Generate file paths to be excluded for namespace packages (bytecode
- cache files).
- """
- # always exclude the package module itself
- yield '__init__.py'
-
- yield '__init__.pyc'
- yield '__init__.pyo'
-
- if not hasattr(sys, 'implementation'):
- return
-
- base = os.path.join(
- '__pycache__', '__init__.' + sys.implementation.cache_tag)
- yield base + '.pyc'
- yield base + '.pyo'
- yield base + '.opt-1.pyc'
- yield base + '.opt-2.pyc'
-
- def copy_tree(
- self, infile, outfile,
- preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
- ):
- assert preserve_mode and preserve_times and not preserve_symlinks
- exclude = self.get_exclusions()
-
- if not exclude:
- return orig.install_lib.copy_tree(self, infile, outfile)
-
- # Exclude namespace package __init__.py* files from the output
-
- from setuptools.archive_util import unpack_directory
- from distutils import log
-
- outfiles = []
-
- def pf(src, dst):
- if dst in exclude:
- log.warn("Skipping installation of %s (namespace package)",
- dst)
- return False
-
- log.info("copying %s -> %s", src, os.path.dirname(dst))
- outfiles.append(dst)
- return dst
-
- unpack_directory(infile, outfile, pf)
- return outfiles
-
- def get_outputs(self):
- outputs = orig.install_lib.get_outputs(self)
- exclude = self.get_exclusions()
- if exclude:
- return [f for f in outputs if f not in exclude]
- return outputs
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/FontFile.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/FontFile.py
deleted file mode 100644
index 5ec0a6632e3182382467688662ebc5e6c324da91..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/FontFile.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-# The Python Imaging Library
-# $Id$
-#
-# base class for raster font file parsers
-#
-# history:
-# 1997-06-05 fl created
-# 1997-08-19 fl restrict image width
-#
-# Copyright (c) 1997-1998 by Secret Labs AB
-# Copyright (c) 1997-1998 by Fredrik Lundh
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-import os
-
-from . import Image, _binary
-
-WIDTH = 800
-
-
-def puti16(fp, values):
- """Write network order (big-endian) 16-bit sequence"""
- for v in values:
- if v < 0:
- v += 65536
- fp.write(_binary.o16be(v))
-
-
-class FontFile:
- """Base class for raster font file handlers."""
-
- bitmap = None
-
- def __init__(self):
- self.info = {}
- self.glyph = [None] * 256
-
- def __getitem__(self, ix):
- return self.glyph[ix]
-
- def compile(self):
- """Create metrics and bitmap"""
-
- if self.bitmap:
- return
-
- # create bitmap large enough to hold all data
- h = w = maxwidth = 0
- lines = 1
- for glyph in self:
- if glyph:
- d, dst, src, im = glyph
- h = max(h, src[3] - src[1])
- w = w + (src[2] - src[0])
- if w > WIDTH:
- lines += 1
- w = src[2] - src[0]
- maxwidth = max(maxwidth, w)
-
- xsize = maxwidth
- ysize = lines * h
-
- if xsize == 0 and ysize == 0:
- return ""
-
- self.ysize = h
-
- # paste glyphs into bitmap
- self.bitmap = Image.new("1", (xsize, ysize))
- self.metrics = [None] * 256
- x = y = 0
- for i in range(256):
- glyph = self[i]
- if glyph:
- d, dst, src, im = glyph
- xx = src[2] - src[0]
- # yy = src[3] - src[1]
- x0, y0 = x, y
- x = x + xx
- if x > WIDTH:
- x, y = 0, y + h
- x0, y0 = x, y
- x = xx
- s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
- self.bitmap.paste(im.crop(src), s)
- self.metrics[i] = d, dst, s
-
- def save(self, filename):
- """Save font"""
-
- self.compile()
-
- # font data
- self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
-
- # font metrics
- with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
- fp.write(b"PILfont\n")
- fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
- fp.write(b"DATA\n")
- for id in range(256):
- m = self.metrics[id]
- if not m:
- puti16(fp, [0] * 10)
- else:
- puti16(fp, m[0] + m[1] + m[2])
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/layouts/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/layouts/__init__.py
deleted file mode 100644
index d0513b93c6aa6bd147ac88f62a432bab9f45fa88..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/layouts/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from .accordion import Accordion
-from .column import Column
-from .form import Form
-from .group import Group
-from .row import Row
-from .tabs import Tab, TabItem, Tabs
-
-__all__ = [
- "Accordion",
- "Column",
- "Form",
- "Row",
- "Group",
- "Tabs",
- "Tab",
- "TabItem",
-]
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/doc/constants.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/doc/constants.py
deleted file mode 100644
index 4db5c639047fc3de2c519b2ca1f6b8d525469900..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/doc/constants.py
+++ /dev/null
@@ -1,412 +0,0 @@
-"""
-=========
-Constants
-=========
-
-.. currentmodule:: numpy
-
-NumPy includes several constants:
-
-%(constant_list)s
-"""
-#
-# Note: the docstring is autogenerated.
-#
-import re
-import textwrap
-
-# Maintain same format as in numpy.add_newdocs
-constants = []
-def add_newdoc(module, name, doc):
- constants.append((name, doc))
-
-add_newdoc('numpy', 'pi',
- """
- ``pi = 3.1415926535897932384626433...``
-
- References
- ----------
- https://en.wikipedia.org/wiki/Pi
-
- """)
-
-add_newdoc('numpy', 'e',
- """
- Euler's constant, base of natural logarithms, Napier's constant.
-
- ``e = 2.71828182845904523536028747135266249775724709369995...``
-
- See Also
- --------
- exp : Exponential function
- log : Natural logarithm
-
- References
- ----------
- https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
-
- """)
-
-add_newdoc('numpy', 'euler_gamma',
- """
- ``γ = 0.5772156649015328606065120900824024310421...``
-
- References
- ----------
- https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
-
- """)
-
-add_newdoc('numpy', 'inf',
- """
- IEEE 754 floating point representation of (positive) infinity.
-
- Returns
- -------
- y : float
- A floating point representation of positive infinity.
-
- See Also
- --------
- isinf : Shows which elements are positive or negative infinity
-
- isposinf : Shows which elements are positive infinity
-
- isneginf : Shows which elements are negative infinity
-
- isnan : Shows which elements are Not a Number
-
- isfinite : Shows which elements are finite (not one of Not a Number,
- positive infinity and negative infinity)
-
- Notes
- -----
- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
- Also that positive infinity is not equivalent to negative infinity. But
- infinity is equivalent to positive infinity.
-
- `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
-
- Examples
- --------
- >>> np.inf
- inf
- >>> np.array([1]) / 0.
- array([ Inf])
-
- """)
-
-add_newdoc('numpy', 'nan',
- """
- IEEE 754 floating point representation of Not a Number (NaN).
-
- Returns
- -------
- y : A floating point representation of Not a Number.
-
- See Also
- --------
- isnan : Shows which elements are Not a Number.
-
- isfinite : Shows which elements are finite (not one of
- Not a Number, positive infinity and negative infinity)
-
- Notes
- -----
- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
-
- `NaN` and `NAN` are aliases of `nan`.
-
- Examples
- --------
- >>> np.nan
- nan
- >>> np.log(-1)
- nan
- >>> np.log([-1, 1, 2])
- array([ NaN, 0. , 0.69314718])
-
- """)
-
-add_newdoc('numpy', 'newaxis',
- """
- A convenient alias for None, useful for indexing arrays.
-
- Examples
- --------
- >>> newaxis is None
- True
- >>> x = np.arange(3)
- >>> x
- array([0, 1, 2])
- >>> x[:, newaxis]
- array([[0],
- [1],
- [2]])
- >>> x[:, newaxis, newaxis]
- array([[[0]],
- [[1]],
- [[2]]])
- >>> x[:, newaxis] * x
- array([[0, 0, 0],
- [0, 1, 2],
- [0, 2, 4]])
-
- Outer product, same as ``outer(x, y)``:
-
- >>> y = np.arange(3, 6)
- >>> x[:, newaxis] * y
- array([[ 0, 0, 0],
- [ 3, 4, 5],
- [ 6, 8, 10]])
-
- ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
-
- >>> x[newaxis, :].shape
- (1, 3)
- >>> x[newaxis].shape
- (1, 3)
- >>> x[None].shape
- (1, 3)
- >>> x[:, newaxis].shape
- (3, 1)
-
- """)
-
-add_newdoc('numpy', 'NZERO',
- """
- IEEE 754 floating point representation of negative zero.
-
- Returns
- -------
- y : float
- A floating point representation of negative zero.
-
- See Also
- --------
- PZERO : Defines positive zero.
-
- isinf : Shows which elements are positive or negative infinity.
-
- isposinf : Shows which elements are positive infinity.
-
- isneginf : Shows which elements are negative infinity.
-
- isnan : Shows which elements are Not a Number.
-
- isfinite : Shows which elements are finite - not one of
- Not a Number, positive infinity and negative infinity.
-
- Notes
- -----
- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). Negative zero is considered to be a finite number.
-
- Examples
- --------
- >>> np.NZERO
- -0.0
- >>> np.PZERO
- 0.0
-
- >>> np.isfinite([np.NZERO])
- array([ True])
- >>> np.isnan([np.NZERO])
- array([False])
- >>> np.isinf([np.NZERO])
- array([False])
-
- """)
-
-add_newdoc('numpy', 'PZERO',
- """
- IEEE 754 floating point representation of positive zero.
-
- Returns
- -------
- y : float
- A floating point representation of positive zero.
-
- See Also
- --------
- NZERO : Defines negative zero.
-
- isinf : Shows which elements are positive or negative infinity.
-
- isposinf : Shows which elements are positive infinity.
-
- isneginf : Shows which elements are negative infinity.
-
- isnan : Shows which elements are Not a Number.
-
- isfinite : Shows which elements are finite - not one of
- Not a Number, positive infinity and negative infinity.
-
- Notes
- -----
- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). Positive zero is considered to be a finite number.
-
- Examples
- --------
- >>> np.PZERO
- 0.0
- >>> np.NZERO
- -0.0
-
- >>> np.isfinite([np.PZERO])
- array([ True])
- >>> np.isnan([np.PZERO])
- array([False])
- >>> np.isinf([np.PZERO])
- array([False])
-
- """)
-
-add_newdoc('numpy', 'NAN',
- """
- IEEE 754 floating point representation of Not a Number (NaN).
-
- `NaN` and `NAN` are equivalent definitions of `nan`. Please use
- `nan` instead of `NAN`.
-
- See Also
- --------
- nan
-
- """)
-
-add_newdoc('numpy', 'NaN',
- """
- IEEE 754 floating point representation of Not a Number (NaN).
-
- `NaN` and `NAN` are equivalent definitions of `nan`. Please use
- `nan` instead of `NaN`.
-
- See Also
- --------
- nan
-
- """)
-
-add_newdoc('numpy', 'NINF',
- """
- IEEE 754 floating point representation of negative infinity.
-
- Returns
- -------
- y : float
- A floating point representation of negative infinity.
-
- See Also
- --------
- isinf : Shows which elements are positive or negative infinity
-
- isposinf : Shows which elements are positive infinity
-
- isneginf : Shows which elements are negative infinity
-
- isnan : Shows which elements are Not a Number
-
- isfinite : Shows which elements are finite (not one of Not a Number,
- positive infinity and negative infinity)
-
- Notes
- -----
- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
- Also that positive infinity is not equivalent to negative infinity. But
- infinity is equivalent to positive infinity.
-
- Examples
- --------
- >>> np.NINF
- -inf
- >>> np.log(0)
- -inf
-
- """)
-
-add_newdoc('numpy', 'PINF',
- """
- IEEE 754 floating point representation of (positive) infinity.
-
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
-
- See Also
- --------
- inf
-
- """)
-
-add_newdoc('numpy', 'infty',
- """
- IEEE 754 floating point representation of (positive) infinity.
-
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
-
- See Also
- --------
- inf
-
- """)
-
-add_newdoc('numpy', 'Inf',
- """
- IEEE 754 floating point representation of (positive) infinity.
-
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
-
- See Also
- --------
- inf
-
- """)
-
-add_newdoc('numpy', 'Infinity',
- """
- IEEE 754 floating point representation of (positive) infinity.
-
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
-
- See Also
- --------
- inf
-
- """)
-
-
-if __doc__:
- constants_str = []
- constants.sort()
- for name, doc in constants:
- s = textwrap.dedent(doc).replace("\n", "\n ")
-
- # Replace sections by rubrics
- lines = s.split("\n")
- new_lines = []
- for line in lines:
- m = re.match(r'^(\s+)[-=]+\s*$', line)
- if m and new_lines:
- prev = textwrap.dedent(new_lines.pop())
- new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
- new_lines.append('')
- else:
- new_lines.append(line)
- s = "\n".join(new_lines)
-
- # Done.
- constants_str.append(""".. data:: %s\n %s""" % (name, s))
- constants_str = "\n".join(constants_str)
-
- __doc__ = __doc__ % dict(constant_list=constants_str)
- del constants_str, name, doc
- del line, lines, new_lines, m, s, prev
-
-del constants, add_newdoc
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/fft/helper.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/fft/helper.py
deleted file mode 100644
index 927ee1af1622c14c0d35bdc20660cfff77d6b6b7..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/fft/helper.py
+++ /dev/null
@@ -1,221 +0,0 @@
-"""
-Discrete Fourier Transforms - helper.py
-
-"""
-from numpy.core import integer, empty, arange, asarray, roll
-from numpy.core.overrides import array_function_dispatch, set_module
-
-# Created by Pearu Peterson, September 2002
-
-__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
-
-integer_types = (int, integer)
-
-
-def _fftshift_dispatcher(x, axes=None):
- return (x,)
-
-
-@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
-def fftshift(x, axes=None):
- """
- Shift the zero-frequency component to the center of the spectrum.
-
- This function swaps half-spaces for all axes listed (defaults to all).
- Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
-
- Parameters
- ----------
- x : array_like
- Input array.
- axes : int or shape tuple, optional
- Axes over which to shift. Default is None, which shifts all axes.
-
- Returns
- -------
- y : ndarray
- The shifted array.
-
- See Also
- --------
- ifftshift : The inverse of `fftshift`.
-
- Examples
- --------
- >>> freqs = np.fft.fftfreq(10, 0.1)
- >>> freqs
- array([ 0., 1., 2., ..., -3., -2., -1.])
- >>> np.fft.fftshift(freqs)
- array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
-
- Shift the zero-frequency component only along the second axis:
-
- >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
- >>> freqs
- array([[ 0., 1., 2.],
- [ 3., 4., -4.],
- [-3., -2., -1.]])
- >>> np.fft.fftshift(freqs, axes=(1,))
- array([[ 2., 0., 1.],
- [-4., 3., 4.],
- [-1., -3., -2.]])
-
- """
- x = asarray(x)
- if axes is None:
- axes = tuple(range(x.ndim))
- shift = [dim // 2 for dim in x.shape]
- elif isinstance(axes, integer_types):
- shift = x.shape[axes] // 2
- else:
- shift = [x.shape[ax] // 2 for ax in axes]
-
- return roll(x, shift, axes)
-
-
-@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
-def ifftshift(x, axes=None):
- """
- The inverse of `fftshift`. Although identical for even-length `x`, the
- functions differ by one sample for odd-length `x`.
-
- Parameters
- ----------
- x : array_like
- Input array.
- axes : int or shape tuple, optional
- Axes over which to calculate. Defaults to None, which shifts all axes.
-
- Returns
- -------
- y : ndarray
- The shifted array.
-
- See Also
- --------
- fftshift : Shift zero-frequency component to the center of the spectrum.
-
- Examples
- --------
- >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
- >>> freqs
- array([[ 0., 1., 2.],
- [ 3., 4., -4.],
- [-3., -2., -1.]])
- >>> np.fft.ifftshift(np.fft.fftshift(freqs))
- array([[ 0., 1., 2.],
- [ 3., 4., -4.],
- [-3., -2., -1.]])
-
- """
- x = asarray(x)
- if axes is None:
- axes = tuple(range(x.ndim))
- shift = [-(dim // 2) for dim in x.shape]
- elif isinstance(axes, integer_types):
- shift = -(x.shape[axes] // 2)
- else:
- shift = [-(x.shape[ax] // 2) for ax in axes]
-
- return roll(x, shift, axes)
-
-
-@set_module('numpy.fft')
-def fftfreq(n, d=1.0):
- """
- Return the Discrete Fourier Transform sample frequencies.
-
- The returned float array `f` contains the frequency bin centers in cycles
- per unit of the sample spacing (with zero at the start). For instance, if
- the sample spacing is in seconds, then the frequency unit is cycles/second.
-
- Given a window length `n` and a sample spacing `d`::
-
- f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
- f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
-
- Parameters
- ----------
- n : int
- Window length.
- d : scalar, optional
- Sample spacing (inverse of the sampling rate). Defaults to 1.
-
- Returns
- -------
- f : ndarray
- Array of length `n` containing the sample frequencies.
-
- Examples
- --------
- >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
- >>> fourier = np.fft.fft(signal)
- >>> n = signal.size
- >>> timestep = 0.1
- >>> freq = np.fft.fftfreq(n, d=timestep)
- >>> freq
- array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
-
- """
- if not isinstance(n, integer_types):
- raise ValueError("n should be an integer")
- val = 1.0 / (n * d)
- results = empty(n, int)
- N = (n-1)//2 + 1
- p1 = arange(0, N, dtype=int)
- results[:N] = p1
- p2 = arange(-(n//2), 0, dtype=int)
- results[N:] = p2
- return results * val
-
-
-@set_module('numpy.fft')
-def rfftfreq(n, d=1.0):
- """
- Return the Discrete Fourier Transform sample frequencies
- (for usage with rfft, irfft).
-
- The returned float array `f` contains the frequency bin centers in cycles
- per unit of the sample spacing (with zero at the start). For instance, if
- the sample spacing is in seconds, then the frequency unit is cycles/second.
-
- Given a window length `n` and a sample spacing `d`::
-
- f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
- f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
-
- Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
- the Nyquist frequency component is considered to be positive.
-
- Parameters
- ----------
- n : int
- Window length.
- d : scalar, optional
- Sample spacing (inverse of the sampling rate). Defaults to 1.
-
- Returns
- -------
- f : ndarray
- Array of length ``n//2 + 1`` containing the sample frequencies.
-
- Examples
- --------
- >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
- >>> fourier = np.fft.rfft(signal)
- >>> n = signal.size
- >>> sample_rate = 100
- >>> freq = np.fft.fftfreq(n, d=1./sample_rate)
- >>> freq
- array([ 0., 10., 20., ..., -30., -20., -10.])
- >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
- >>> freq
- array([ 0., 10., 20., 30., 40., 50.])
-
- """
- if not isinstance(n, integer_types):
- raise ValueError("n should be an integer")
- val = 1.0/(n*d)
- N = n//2 + 1
- results = arange(0, N, dtype=int)
- return results * val
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/tests/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/reshape/tile.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/reshape/tile.py
deleted file mode 100644
index 43eea7c669ce7ba47b4a54dfe85c285adf4e58c9..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/reshape/tile.py
+++ /dev/null
@@ -1,657 +0,0 @@
-"""
-Quantilization functions and related stuff
-"""
-from __future__ import annotations
-
-from typing import (
- TYPE_CHECKING,
- Any,
- Callable,
- Literal,
-)
-
-import numpy as np
-
-from pandas._libs import (
- Timedelta,
- Timestamp,
- lib,
-)
-from pandas._libs.lib import infer_dtype
-
-from pandas.core.dtypes.common import (
- DT64NS_DTYPE,
- ensure_platform_int,
- is_bool_dtype,
- is_integer,
- is_list_like,
- is_numeric_dtype,
- is_scalar,
-)
-from pandas.core.dtypes.dtypes import (
- CategoricalDtype,
- DatetimeTZDtype,
- ExtensionDtype,
-)
-from pandas.core.dtypes.generic import ABCSeries
-from pandas.core.dtypes.missing import isna
-
-from pandas import (
- Categorical,
- Index,
- IntervalIndex,
- to_datetime,
- to_timedelta,
-)
-from pandas.core import nanops
-import pandas.core.algorithms as algos
-
-if TYPE_CHECKING:
- from pandas._typing import (
- DtypeObj,
- IntervalLeftRight,
- )
-
-
-def cut(
- x,
- bins,
- right: bool = True,
- labels=None,
- retbins: bool = False,
- precision: int = 3,
- include_lowest: bool = False,
- duplicates: str = "raise",
- ordered: bool = True,
-):
- """
- Bin values into discrete intervals.
-
- Use `cut` when you need to segment and sort data values into bins. This
- function is also useful for going from a continuous variable to a
- categorical variable. For example, `cut` could convert ages to groups of
- age ranges. Supports binning into an equal number of bins, or a
- pre-specified array of bins.
-
- Parameters
- ----------
- x : array-like
- The input array to be binned. Must be 1-dimensional.
- bins : int, sequence of scalars, or IntervalIndex
- The criteria to bin by.
-
- * int : Defines the number of equal-width bins in the range of `x`. The
- range of `x` is extended by .1% on each side to include the minimum
- and maximum values of `x`.
- * sequence of scalars : Defines the bin edges allowing for non-uniform
- width. No extension of the range of `x` is done.
- * IntervalIndex : Defines the exact bins to be used. Note that
- IntervalIndex for `bins` must be non-overlapping.
-
- right : bool, default True
- Indicates whether `bins` includes the rightmost edge or not. If
- ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
- indicate (1,2], (2,3], (3,4]. This argument is ignored when
- `bins` is an IntervalIndex.
- labels : array or False, default None
- Specifies the labels for the returned bins. Must be the same length as
- the resulting bins. If False, returns only integer indicators of the
- bins. This affects the type of the output container (see below).
- This argument is ignored when `bins` is an IntervalIndex. If True,
- raises an error. When `ordered=False`, labels must be provided.
- retbins : bool, default False
- Whether to return the bins or not. Useful when bins is provided
- as a scalar.
- precision : int, default 3
- The precision at which to store and display the bins labels.
- include_lowest : bool, default False
- Whether the first interval should be left-inclusive or not.
- duplicates : {default 'raise', 'drop'}, optional
- If bin edges are not unique, raise ValueError or drop non-uniques.
- ordered : bool, default True
- Whether the labels are ordered or not. Applies to returned types
- Categorical and Series (with Categorical dtype). If True,
- the resulting categorical will be ordered. If False, the resulting
- categorical will be unordered (labels must be provided).
-
- Returns
- -------
- out : Categorical, Series, or ndarray
- An array-like object representing the respective bin for each value
- of `x`. The type depends on the value of `labels`.
-
- * None (default) : returns a Series for Series `x` or a
- Categorical for all other inputs. The values stored within
- are Interval dtype.
-
- * sequence of scalars : returns a Series for Series `x` or a
- Categorical for all other inputs. The values stored within
- are whatever the type in the sequence is.
-
- * False : returns an ndarray of integers.
-
- bins : numpy.ndarray or IntervalIndex.
- The computed or specified bins. Only returned when `retbins=True`.
- For scalar or sequence `bins`, this is an ndarray with the computed
- bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
- an IntervalIndex `bins`, this is equal to `bins`.
-
- See Also
- --------
- qcut : Discretize variable into equal-sized buckets based on rank
- or based on sample quantiles.
- Categorical : Array type for storing data that come from a
- fixed set of values.
- Series : One-dimensional array with axis labels (including time series).
- IntervalIndex : Immutable Index implementing an ordered, sliceable set.
-
- Notes
- -----
- Any NA values will be NA in the result. Out of bounds values will be NA in
- the resulting Series or Categorical object.
-
- Reference :ref:`the user guide ` for more examples.
-
- Examples
- --------
- Discretize into three equal-sized bins.
-
- >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
- ... # doctest: +ELLIPSIS
- [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
- Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...
-
- >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
- ... # doctest: +ELLIPSIS
- ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
- Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...
- array([0.994, 3. , 5. , 7. ]))
-
- Discovers the same bins, but assign them specific labels. Notice that
- the returned Categorical's categories are `labels` and is ordered.
-
- >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
- ... 3, labels=["bad", "medium", "good"])
- ['bad', 'good', 'medium', 'medium', 'good', 'bad']
- Categories (3, object): ['bad' < 'medium' < 'good']
-
- ``ordered=False`` will result in unordered categories when labels are passed.
- This parameter can be used to allow non-unique labels:
-
- >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,
- ... labels=["B", "A", "B"], ordered=False)
- ['B', 'B', 'A', 'A', 'B', 'B']
- Categories (2, object): ['A', 'B']
-
- ``labels=False`` implies you just want the bins back.
-
- >>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
- array([0, 1, 1, 3])
-
- Passing a Series as an input returns a Series with categorical dtype:
-
- >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
- ... index=['a', 'b', 'c', 'd', 'e'])
- >>> pd.cut(s, 3)
- ... # doctest: +ELLIPSIS
- a (1.992, 4.667]
- b (1.992, 4.667]
- c (4.667, 7.333]
- d (7.333, 10.0]
- e (7.333, 10.0]
- dtype: category
- Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ...
-
- Passing a Series as an input returns a Series with mapping value.
- It is used to map numerically to intervals based on bins.
-
- >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
- ... index=['a', 'b', 'c', 'd', 'e'])
- >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
- ... # doctest: +ELLIPSIS
- (a 1.0
- b 2.0
- c 3.0
- d 4.0
- e NaN
- dtype: float64,
- array([ 0, 2, 4, 6, 8, 10]))
-
- Use `drop` optional when bins is not unique
-
- >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
- ... right=False, duplicates='drop')
- ... # doctest: +ELLIPSIS
- (a 1.0
- b 2.0
- c 3.0
- d 3.0
- e NaN
- dtype: float64,
- array([ 0, 2, 4, 6, 10]))
-
- Passing an IntervalIndex for `bins` results in those categories exactly.
- Notice that values not covered by the IntervalIndex are set to NaN. 0
- is to the left of the first bin (which is closed on the right), and 1.5
- falls between two bins.
-
- >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
- >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
- [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]]
- Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]]
- """
- # NOTE: this binning code is changed a bit from histogram for var(x) == 0
-
- original = x
- x = _preprocess_for_cut(x)
- x, dtype = _coerce_to_type(x)
-
- if not np.iterable(bins):
- if is_scalar(bins) and bins < 1:
- raise ValueError("`bins` should be a positive integer.")
-
- sz = x.size
-
- if sz == 0:
- raise ValueError("Cannot cut empty array")
-
- rng = (nanops.nanmin(x), nanops.nanmax(x))
- mn, mx = (mi + 0.0 for mi in rng)
-
- if np.isinf(mn) or np.isinf(mx):
- # GH 24314
- raise ValueError(
- "cannot specify integer `bins` when input data contains infinity"
- )
- if mn == mx: # adjust end points before binning
- mn -= 0.001 * abs(mn) if mn != 0 else 0.001
- mx += 0.001 * abs(mx) if mx != 0 else 0.001
- bins = np.linspace(mn, mx, bins + 1, endpoint=True)
- else: # adjust end points after binning
- bins = np.linspace(mn, mx, bins + 1, endpoint=True)
- adj = (mx - mn) * 0.001 # 0.1% of the range
- if right:
- bins[0] -= adj
- else:
- bins[-1] += adj
-
- elif isinstance(bins, IntervalIndex):
- if bins.is_overlapping:
- raise ValueError("Overlapping IntervalIndex is not accepted.")
-
- else:
- if isinstance(getattr(bins, "dtype", None), DatetimeTZDtype):
- bins = np.asarray(bins, dtype=DT64NS_DTYPE)
- else:
- bins = np.asarray(bins)
- bins = _convert_bin_to_numeric_type(bins, dtype)
-
- # GH 26045: cast to float64 to avoid an overflow
- if (np.diff(bins.astype("float64")) < 0).any():
- raise ValueError("bins must increase monotonically.")
-
- fac, bins = _bins_to_cuts(
- x,
- bins,
- right=right,
- labels=labels,
- precision=precision,
- include_lowest=include_lowest,
- dtype=dtype,
- duplicates=duplicates,
- ordered=ordered,
- )
-
- return _postprocess_for_cut(fac, bins, retbins, dtype, original)
-
-
-def qcut(
- x,
- q,
- labels=None,
- retbins: bool = False,
- precision: int = 3,
- duplicates: str = "raise",
-):
- """
- Quantile-based discretization function.
-
- Discretize variable into equal-sized buckets based on rank or based
- on sample quantiles. For example 1000 values for 10 quantiles would
- produce a Categorical object indicating quantile membership for each data point.
-
- Parameters
- ----------
- x : 1d ndarray or Series
- q : int or list-like of float
- Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
- array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.
- labels : array or False, default None
- Used as labels for the resulting bins. Must be of the same length as
- the resulting bins. If False, return only integer indicators of the
- bins. If True, raises an error.
- retbins : bool, optional
- Whether to return the (bins, labels) or not. Can be useful if bins
- is given as a scalar.
- precision : int, optional
- The precision at which to store and display the bins labels.
- duplicates : {default 'raise', 'drop'}, optional
- If bin edges are not unique, raise ValueError or drop non-uniques.
-
- Returns
- -------
- out : Categorical or Series or array of integers if labels is False
- The return type (Categorical or Series) depends on the input: a Series
- of type category if input is a Series else Categorical. Bins are
- represented as categories when categorical data is returned.
- bins : ndarray of floats
- Returned only if `retbins` is True.
-
- Notes
- -----
- Out of bounds values will be NA in the resulting Categorical object
-
- Examples
- --------
- >>> pd.qcut(range(5), 4)
- ... # doctest: +ELLIPSIS
- [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
- Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ...
-
- >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
- ... # doctest: +SKIP
- [good, good, medium, bad, bad]
- Categories (3, object): [good < medium < bad]
-
- >>> pd.qcut(range(5), 4, labels=False)
- array([0, 0, 1, 2, 3])
- """
- original = x
- x = _preprocess_for_cut(x)
- x, dtype = _coerce_to_type(x)
-
- quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q
-
- x_np = np.asarray(x)
- x_np = x_np[~np.isnan(x_np)]
- bins = np.quantile(x_np, quantiles)
-
- fac, bins = _bins_to_cuts(
- x,
- bins,
- labels=labels,
- precision=precision,
- include_lowest=True,
- dtype=dtype,
- duplicates=duplicates,
- )
-
- return _postprocess_for_cut(fac, bins, retbins, dtype, original)
-
-
-def _bins_to_cuts(
- x,
- bins: np.ndarray,
- right: bool = True,
- labels=None,
- precision: int = 3,
- include_lowest: bool = False,
- dtype: DtypeObj | None = None,
- duplicates: str = "raise",
- ordered: bool = True,
-):
- if not ordered and labels is None:
- raise ValueError("'labels' must be provided if 'ordered = False'")
-
- if duplicates not in ["raise", "drop"]:
- raise ValueError(
- "invalid value for 'duplicates' parameter, valid options are: raise, drop"
- )
-
- if isinstance(bins, IntervalIndex):
- # we have a fast-path here
- ids = bins.get_indexer(x)
- cat_dtype = CategoricalDtype(bins, ordered=True)
- result = Categorical.from_codes(ids, dtype=cat_dtype, validate=False)
- return result, bins
-
- unique_bins = algos.unique(bins)
- if len(unique_bins) < len(bins) and len(bins) != 2:
- if duplicates == "raise":
- raise ValueError(
- f"Bin edges must be unique: {repr(bins)}.\n"
- f"You can drop duplicate edges by setting the 'duplicates' kwarg"
- )
- bins = unique_bins
-
- side: Literal["left", "right"] = "left" if right else "right"
- ids = ensure_platform_int(bins.searchsorted(x, side=side))
-
- if include_lowest:
- ids[np.asarray(x) == bins[0]] = 1
-
- na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
- has_nas = na_mask.any()
-
- if labels is not False:
- if not (labels is None or is_list_like(labels)):
- raise ValueError(
- "Bin labels must either be False, None or passed in as a "
- "list-like argument"
- )
-
- if labels is None:
- labels = _format_labels(
- bins, precision, right=right, include_lowest=include_lowest, dtype=dtype
- )
- elif ordered and len(set(labels)) != len(labels):
- raise ValueError(
- "labels must be unique if ordered=True; pass ordered=False "
- "for duplicate labels"
- )
- else:
- if len(labels) != len(bins) - 1:
- raise ValueError(
- "Bin labels must be one fewer than the number of bin edges"
- )
-
- if not isinstance(getattr(labels, "dtype", None), CategoricalDtype):
- labels = Categorical(
- labels,
- categories=labels if len(set(labels)) == len(labels) else None,
- ordered=ordered,
- )
- # TODO: handle mismatch between categorical label order and pandas.cut order.
- np.putmask(ids, na_mask, 0)
- result = algos.take_nd(labels, ids - 1)
-
- else:
- result = ids - 1
- if has_nas:
- result = result.astype(np.float64)
- np.putmask(result, na_mask, np.nan)
-
- return result, bins
-
-
-def _coerce_to_type(x):
- """
- if the passed data is of datetime/timedelta, bool or nullable int type,
- this method converts it to numeric so that cut or qcut method can
- handle it
- """
- dtype: DtypeObj | None = None
-
- if isinstance(x.dtype, DatetimeTZDtype):
- dtype = x.dtype
- elif lib.is_np_dtype(x.dtype, "M"):
- x = to_datetime(x).astype("datetime64[ns]", copy=False)
- dtype = np.dtype("datetime64[ns]")
- elif lib.is_np_dtype(x.dtype, "m"):
- x = to_timedelta(x)
- dtype = np.dtype("timedelta64[ns]")
- elif is_bool_dtype(x.dtype):
- # GH 20303
- x = x.astype(np.int64)
- # To support cut and qcut for IntegerArray we convert to float dtype.
- # Will properly support in the future.
- # https://github.com/pandas-dev/pandas/pull/31290
- # https://github.com/pandas-dev/pandas/issues/31389
- elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype):
- x = x.to_numpy(dtype=np.float64, na_value=np.nan)
-
- if dtype is not None:
- # GH 19768: force NaT to NaN during integer conversion
- x = np.where(x.notna(), x.view(np.int64), np.nan)
-
- return x, dtype
-
-
-def _convert_bin_to_numeric_type(bins, dtype: DtypeObj | None):
- """
- if the passed bin is of datetime/timedelta type,
- this method converts it to integer
-
- Parameters
- ----------
- bins : list-like of bins
- dtype : dtype of data
-
- Raises
- ------
- ValueError if bins are not of a compat dtype to dtype
- """
- bins_dtype = infer_dtype(bins, skipna=False)
- if lib.is_np_dtype(dtype, "m"):
- if bins_dtype in ["timedelta", "timedelta64"]:
- bins = to_timedelta(bins).view(np.int64)
- else:
- raise ValueError("bins must be of timedelta64 dtype")
- elif lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
- if bins_dtype in ["datetime", "datetime64"]:
- bins = to_datetime(bins)
- if lib.is_np_dtype(bins.dtype, "M"):
- # As of 2.0, to_datetime may give non-nano, so we need to convert
- # here until the rest of this file recognizes non-nano
- bins = bins.astype("datetime64[ns]", copy=False)
- bins = bins.view(np.int64)
- else:
- raise ValueError("bins must be of datetime64 dtype")
-
- return bins
-
-
-def _convert_bin_to_datelike_type(bins, dtype: DtypeObj | None):
- """
- Convert bins to a DatetimeIndex or TimedeltaIndex if the original dtype is
- datelike
-
- Parameters
- ----------
- bins : list-like of bins
- dtype : dtype of data
-
- Returns
- -------
- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
- datelike
- """
- if isinstance(dtype, DatetimeTZDtype):
- bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz)
- elif lib.is_np_dtype(dtype, "mM"):
- bins = Index(bins.astype(np.int64), dtype=dtype)
- return bins
-
-
-def _format_labels(
- bins,
- precision: int,
- right: bool = True,
- include_lowest: bool = False,
- dtype: DtypeObj | None = None,
-):
- """based on the dtype, return our labels"""
- closed: IntervalLeftRight = "right" if right else "left"
-
- formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta]
-
- if isinstance(dtype, DatetimeTZDtype):
- formatter = lambda x: Timestamp(x, tz=dtype.tz)
- adjust = lambda x: x - Timedelta("1ns")
- elif lib.is_np_dtype(dtype, "M"):
- formatter = Timestamp
- adjust = lambda x: x - Timedelta("1ns")
- elif lib.is_np_dtype(dtype, "m"):
- formatter = Timedelta
- adjust = lambda x: x - Timedelta("1ns")
- else:
- precision = _infer_precision(precision, bins)
- formatter = lambda x: _round_frac(x, precision)
- adjust = lambda x: x - 10 ** (-precision)
-
- breaks = [formatter(b) for b in bins]
- if right and include_lowest:
- # adjust lhs of first interval by precision to account for being right closed
- breaks[0] = adjust(breaks[0])
-
- return IntervalIndex.from_breaks(breaks, closed=closed)
-
-
-def _preprocess_for_cut(x):
- """
- handles preprocessing for cut where we convert passed
- input to array, strip the index information and store it
- separately
- """
- # Check that the passed array is a Pandas or Numpy object
- # We don't want to strip away a Pandas data-type here (e.g. datetimetz)
- ndim = getattr(x, "ndim", None)
- if ndim is None:
- x = np.asarray(x)
- if x.ndim != 1:
- raise ValueError("Input array must be 1 dimensional")
-
- return x
-
-
-def _postprocess_for_cut(fac, bins, retbins: bool, dtype: DtypeObj | None, original):
- """
- handles post processing for the cut method where
- we combine the index information if the originally passed
- datatype was a series
- """
- if isinstance(original, ABCSeries):
- fac = original._constructor(fac, index=original.index, name=original.name)
-
- if not retbins:
- return fac
-
- bins = _convert_bin_to_datelike_type(bins, dtype)
-
- return fac, bins
-
-
-def _round_frac(x, precision: int):
- """
- Round the fractional part of the given number
- """
- if not np.isfinite(x) or x == 0:
- return x
- else:
- frac, whole = np.modf(x)
- if whole == 0:
- digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
- else:
- digits = precision
- return np.around(x, digits)
-
-
-def _infer_precision(base_precision: int, bins) -> int:
- """
- Infer an appropriate precision for _round_frac
- """
- for precision in range(base_precision, 20):
- levels = np.asarray([_round_frac(b, precision) for b in bins])
- if algos.unique(levels).size == bins.size:
- return precision
- return base_precision # default
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/sas/sas_constants.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/sas/sas_constants.py
deleted file mode 100644
index 62c17bd03927e5f852af708e6b9ef6cf7e74d57c..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/sas/sas_constants.py
+++ /dev/null
@@ -1,310 +0,0 @@
-from __future__ import annotations
-
-from typing import Final
-
-magic: Final = (
- b"\x00\x00\x00\x00\x00\x00\x00\x00"
- b"\x00\x00\x00\x00\xc2\xea\x81\x60"
- b"\xb3\x14\x11\xcf\xbd\x92\x08\x00"
- b"\x09\xc7\x31\x8c\x18\x1f\x10\x11"
-)
-
-align_1_checker_value: Final = b"3"
-align_1_offset: Final = 32
-align_1_length: Final = 1
-align_1_value: Final = 4
-u64_byte_checker_value: Final = b"3"
-align_2_offset: Final = 35
-align_2_length: Final = 1
-align_2_value: Final = 4
-endianness_offset: Final = 37
-endianness_length: Final = 1
-platform_offset: Final = 39
-platform_length: Final = 1
-encoding_offset: Final = 70
-encoding_length: Final = 1
-dataset_offset: Final = 92
-dataset_length: Final = 64
-file_type_offset: Final = 156
-file_type_length: Final = 8
-date_created_offset: Final = 164
-date_created_length: Final = 8
-date_modified_offset: Final = 172
-date_modified_length: Final = 8
-header_size_offset: Final = 196
-header_size_length: Final = 4
-page_size_offset: Final = 200
-page_size_length: Final = 4
-page_count_offset: Final = 204
-page_count_length: Final = 4
-sas_release_offset: Final = 216
-sas_release_length: Final = 8
-sas_server_type_offset: Final = 224
-sas_server_type_length: Final = 16
-os_version_number_offset: Final = 240
-os_version_number_length: Final = 16
-os_maker_offset: Final = 256
-os_maker_length: Final = 16
-os_name_offset: Final = 272
-os_name_length: Final = 16
-page_bit_offset_x86: Final = 16
-page_bit_offset_x64: Final = 32
-subheader_pointer_length_x86: Final = 12
-subheader_pointer_length_x64: Final = 24
-page_type_offset: Final = 0
-page_type_length: Final = 2
-block_count_offset: Final = 2
-block_count_length: Final = 2
-subheader_count_offset: Final = 4
-subheader_count_length: Final = 2
-page_type_mask: Final = 0x0F00
-# Keep "page_comp_type" bits
-page_type_mask2: Final = 0xF000 | page_type_mask
-page_meta_type: Final = 0x0000
-page_data_type: Final = 0x0100
-page_mix_type: Final = 0x0200
-page_amd_type: Final = 0x0400
-page_meta2_type: Final = 0x4000
-page_comp_type: Final = 0x9000
-page_meta_types: Final = [page_meta_type, page_meta2_type]
-subheader_pointers_offset: Final = 8
-truncated_subheader_id: Final = 1
-compressed_subheader_id: Final = 4
-compressed_subheader_type: Final = 1
-text_block_size_length: Final = 2
-row_length_offset_multiplier: Final = 5
-row_count_offset_multiplier: Final = 6
-col_count_p1_multiplier: Final = 9
-col_count_p2_multiplier: Final = 10
-row_count_on_mix_page_offset_multiplier: Final = 15
-column_name_pointer_length: Final = 8
-column_name_text_subheader_offset: Final = 0
-column_name_text_subheader_length: Final = 2
-column_name_offset_offset: Final = 2
-column_name_offset_length: Final = 2
-column_name_length_offset: Final = 4
-column_name_length_length: Final = 2
-column_data_offset_offset: Final = 8
-column_data_length_offset: Final = 8
-column_data_length_length: Final = 4
-column_type_offset: Final = 14
-column_type_length: Final = 1
-column_format_text_subheader_index_offset: Final = 22
-column_format_text_subheader_index_length: Final = 2
-column_format_offset_offset: Final = 24
-column_format_offset_length: Final = 2
-column_format_length_offset: Final = 26
-column_format_length_length: Final = 2
-column_label_text_subheader_index_offset: Final = 28
-column_label_text_subheader_index_length: Final = 2
-column_label_offset_offset: Final = 30
-column_label_offset_length: Final = 2
-column_label_length_offset: Final = 32
-column_label_length_length: Final = 2
-rle_compression: Final = b"SASYZCRL"
-rdc_compression: Final = b"SASYZCR2"
-
-compression_literals: Final = [rle_compression, rdc_compression]
-
-# Incomplete list of encodings, using SAS nomenclature:
-# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html
-# corresponding to the Python documentation of standard encodings
-# https://docs.python.org/3/library/codecs.html#standard-encodings
-encoding_names: Final = {
- 20: "utf-8",
- 29: "latin1",
- 30: "latin2",
- 31: "latin3",
- 32: "latin4",
- 33: "cyrillic",
- 34: "arabic",
- 35: "greek",
- 36: "hebrew",
- 37: "latin5",
- 38: "latin6",
- 39: "cp874",
- 40: "latin9",
- 41: "cp437",
- 42: "cp850",
- 43: "cp852",
- 44: "cp857",
- 45: "cp858",
- 46: "cp862",
- 47: "cp864",
- 48: "cp865",
- 49: "cp866",
- 50: "cp869",
- 51: "cp874",
- # 52: "", # not found
- # 53: "", # not found
- # 54: "", # not found
- 55: "cp720",
- 56: "cp737",
- 57: "cp775",
- 58: "cp860",
- 59: "cp863",
- 60: "cp1250",
- 61: "cp1251",
- 62: "cp1252",
- 63: "cp1253",
- 64: "cp1254",
- 65: "cp1255",
- 66: "cp1256",
- 67: "cp1257",
- 68: "cp1258",
- 118: "cp950",
- # 119: "", # not found
- 123: "big5",
- 125: "gb2312",
- 126: "cp936",
- 134: "euc_jp",
- 136: "cp932",
- 138: "shift_jis",
- 140: "euc-kr",
- 141: "cp949",
- 227: "latin8",
- # 228: "", # not found
- # 229: "" # not found
-}
-
-
-class SASIndex:
- row_size_index: Final = 0
- column_size_index: Final = 1
- subheader_counts_index: Final = 2
- column_text_index: Final = 3
- column_name_index: Final = 4
- column_attributes_index: Final = 5
- format_and_label_index: Final = 6
- column_list_index: Final = 7
- data_subheader_index: Final = 8
-
-
-subheader_signature_to_index: Final = {
- b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
- b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
- b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index,
- b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index,
- b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
- b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
- b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index,
- b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index,
- b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index,
- b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
- b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
- b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index,
- b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
- b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
- b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
- b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index,
- b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
- b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
- b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index,
- b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
- b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
- b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index,
- b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
- b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
-}
-
-
-# List of frequently used SAS date and datetime formats
-# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm
-# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java
-sas_date_formats: Final = (
- "DATE",
- "DAY",
- "DDMMYY",
- "DOWNAME",
- "JULDAY",
- "JULIAN",
- "MMDDYY",
- "MMYY",
- "MMYYC",
- "MMYYD",
- "MMYYP",
- "MMYYS",
- "MMYYN",
- "MONNAME",
- "MONTH",
- "MONYY",
- "QTR",
- "QTRR",
- "NENGO",
- "WEEKDATE",
- "WEEKDATX",
- "WEEKDAY",
- "WEEKV",
- "WORDDATE",
- "WORDDATX",
- "YEAR",
- "YYMM",
- "YYMMC",
- "YYMMD",
- "YYMMP",
- "YYMMS",
- "YYMMN",
- "YYMON",
- "YYMMDD",
- "YYQ",
- "YYQC",
- "YYQD",
- "YYQP",
- "YYQS",
- "YYQN",
- "YYQR",
- "YYQRC",
- "YYQRD",
- "YYQRP",
- "YYQRS",
- "YYQRN",
- "YYMMDDP",
- "YYMMDDC",
- "E8601DA",
- "YYMMDDN",
- "MMDDYYC",
- "MMDDYYS",
- "MMDDYYD",
- "YYMMDDS",
- "B8601DA",
- "DDMMYYN",
- "YYMMDDD",
- "DDMMYYB",
- "DDMMYYP",
- "MMDDYYP",
- "YYMMDDB",
- "MMDDYYN",
- "DDMMYYC",
- "DDMMYYD",
- "DDMMYYS",
- "MINGUO",
-)
-
-sas_datetime_formats: Final = (
- "DATETIME",
- "DTWKDATX",
- "B8601DN",
- "B8601DT",
- "B8601DX",
- "B8601DZ",
- "B8601LX",
- "E8601DN",
- "E8601DT",
- "E8601DX",
- "E8601DZ",
- "E8601LX",
- "DATEAMPM",
- "DTDATE",
- "DTMONYY",
- "DTMONYY",
- "DTWKDATX",
- "DTYEAR",
- "TOD",
- "MDYAMPM",
-)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/base/test_misc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/base/test_misc.py
deleted file mode 100644
index 3ca53c40104491f914c1813895a20d246284aa59..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/base/test_misc.py
+++ /dev/null
@@ -1,184 +0,0 @@
-import sys
-
-import numpy as np
-import pytest
-
-from pandas.compat import PYPY
-
-from pandas.core.dtypes.common import (
- is_dtype_equal,
- is_object_dtype,
-)
-
-import pandas as pd
-from pandas import (
- Index,
- Series,
-)
-import pandas._testing as tm
-
-
-def test_isnull_notnull_docstrings():
- # GH#41855 make sure its clear these are aliases
- doc = pd.DataFrame.notnull.__doc__
- assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n")
- doc = pd.DataFrame.isnull.__doc__
- assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n")
-
- doc = Series.notnull.__doc__
- assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n")
- doc = Series.isnull.__doc__
- assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n")
-
-
-@pytest.mark.parametrize(
- "op_name, op",
- [
- ("add", "+"),
- ("sub", "-"),
- ("mul", "*"),
- ("mod", "%"),
- ("pow", "**"),
- ("truediv", "/"),
- ("floordiv", "//"),
- ],
-)
-def test_binary_ops_docstring(frame_or_series, op_name, op):
- # not using the all_arithmetic_functions fixture with _get_opstr
- # as _get_opstr is used internally in the dynamic implementation of the docstring
- klass = frame_or_series
-
- operand1 = klass.__name__.lower()
- operand2 = "other"
- expected_str = " ".join([operand1, op, operand2])
- assert expected_str in getattr(klass, op_name).__doc__
-
- # reverse version of the binary ops
- expected_str = " ".join([operand2, op, operand1])
- assert expected_str in getattr(klass, "r" + op_name).__doc__
-
-
-def test_ndarray_compat_properties(index_or_series_obj):
- obj = index_or_series_obj
-
- # Check that we work.
- for p in ["shape", "dtype", "T", "nbytes"]:
- assert getattr(obj, p, None) is not None
-
- # deprecated properties
- for p in ["strides", "itemsize", "base", "data"]:
- assert not hasattr(obj, p)
-
- msg = "can only convert an array of size 1 to a Python scalar"
- with pytest.raises(ValueError, match=msg):
- obj.item() # len > 1
-
- assert obj.ndim == 1
- assert obj.size == len(obj)
-
- assert Index([1]).item() == 1
- assert Series([1]).item() == 1
-
-
-@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
-def test_memory_usage(index_or_series_memory_obj):
- obj = index_or_series_memory_obj
- # Clear index caches so that len(obj) == 0 report 0 memory usage
- if isinstance(obj, Series):
- is_ser = True
- obj.index._engine.clear_mapping()
- else:
- is_ser = False
- obj._engine.clear_mapping()
-
- res = obj.memory_usage()
- res_deep = obj.memory_usage(deep=True)
-
- is_object = is_object_dtype(obj) or (is_ser and is_object_dtype(obj.index))
- is_categorical = isinstance(obj.dtype, pd.CategoricalDtype) or (
- is_ser and isinstance(obj.index.dtype, pd.CategoricalDtype)
- )
- is_object_string = is_dtype_equal(obj, "string[python]") or (
- is_ser and is_dtype_equal(obj.index.dtype, "string[python]")
- )
-
- if len(obj) == 0:
- expected = 0
- assert res_deep == res == expected
- elif is_object or is_categorical or is_object_string:
- # only deep will pick them up
- assert res_deep > res
- else:
- assert res == res_deep
-
- # sys.getsizeof will call the .memory_usage with
- # deep=True, and add on some GC overhead
- diff = res_deep - sys.getsizeof(obj)
- assert abs(diff) < 100
-
-
-def test_memory_usage_components_series(series_with_simple_index):
- series = series_with_simple_index
- total_usage = series.memory_usage(index=True)
- non_index_usage = series.memory_usage(index=False)
- index_usage = series.index.memory_usage()
- assert total_usage == non_index_usage + index_usage
-
-
-@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES)
-def test_memory_usage_components_narrow_series(dtype):
- series = tm.make_rand_series(name="a", dtype=dtype)
- total_usage = series.memory_usage(index=True)
- non_index_usage = series.memory_usage(index=False)
- index_usage = series.index.memory_usage()
- assert total_usage == non_index_usage + index_usage
-
-
-def test_searchsorted(request, index_or_series_obj):
- # numpy.searchsorted calls obj.searchsorted under the hood.
- # See gh-12238
- obj = index_or_series_obj
-
- if isinstance(obj, pd.MultiIndex):
- # See gh-14833
- request.node.add_marker(
- pytest.mark.xfail(
- reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833"
- )
- )
- elif obj.dtype.kind == "c" and isinstance(obj, Index):
- # TODO: Should Series cases also raise? Looks like they use numpy
- # comparison semantics https://github.com/numpy/numpy/issues/15981
- mark = pytest.mark.xfail(reason="complex objects are not comparable")
- request.node.add_marker(mark)
-
- max_obj = max(obj, default=0)
- index = np.searchsorted(obj, max_obj)
- assert 0 <= index <= len(obj)
-
- index = np.searchsorted(obj, max_obj, sorter=range(len(obj)))
- assert 0 <= index <= len(obj)
-
-
-def test_access_by_position(index_flat):
- index = index_flat
-
- if len(index) == 0:
- pytest.skip("Test doesn't make sense on empty data")
-
- series = Series(index)
- assert index[0] == series.iloc[0]
- assert index[5] == series.iloc[5]
- assert index[-1] == series.iloc[-1]
-
- size = len(index)
- assert index[-1] == index[size - 1]
-
- msg = f"index {size} is out of bounds for axis 0 with size {size}"
- if is_dtype_equal(index.dtype, "string[pyarrow]"):
- msg = "index out of bounds"
- with pytest.raises(IndexError, match=msg):
- index[size]
- msg = "single positional indexer is out-of-bounds"
- with pytest.raises(IndexError, match=msg):
- series.iloc[size]
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/commands/index.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/commands/index.py
deleted file mode 100644
index 9d8aae3b542bcdcf6d0ca2f60a48bd47908dae7b..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/commands/index.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import logging
-from optparse import Values
-from typing import Any, Iterable, List, Optional, Union
-
-from pip._vendor.packaging.version import LegacyVersion, Version
-
-from pip._internal.cli import cmdoptions
-from pip._internal.cli.req_command import IndexGroupCommand
-from pip._internal.cli.status_codes import ERROR, SUCCESS
-from pip._internal.commands.search import print_dist_installation_info
-from pip._internal.exceptions import CommandError, DistributionNotFound, PipError
-from pip._internal.index.collector import LinkCollector
-from pip._internal.index.package_finder import PackageFinder
-from pip._internal.models.selection_prefs import SelectionPreferences
-from pip._internal.models.target_python import TargetPython
-from pip._internal.network.session import PipSession
-from pip._internal.utils.misc import write_output
-
-logger = logging.getLogger(__name__)
-
-
-class IndexCommand(IndexGroupCommand):
- """
- Inspect information available from package indexes.
- """
-
- usage = """
- %prog versions
- """
-
- def add_options(self) -> None:
- cmdoptions.add_target_python_options(self.cmd_opts)
-
- self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
- self.cmd_opts.add_option(cmdoptions.pre())
- self.cmd_opts.add_option(cmdoptions.no_binary())
- self.cmd_opts.add_option(cmdoptions.only_binary())
-
- index_opts = cmdoptions.make_option_group(
- cmdoptions.index_group,
- self.parser,
- )
-
- self.parser.insert_option_group(0, index_opts)
- self.parser.insert_option_group(0, self.cmd_opts)
-
- def run(self, options: Values, args: List[str]) -> int:
- handlers = {
- "versions": self.get_available_package_versions,
- }
-
- logger.warning(
- "pip index is currently an experimental command. "
- "It may be removed/changed in a future release "
- "without prior warning."
- )
-
- # Determine action
- if not args or args[0] not in handlers:
- logger.error(
- "Need an action (%s) to perform.",
- ", ".join(sorted(handlers)),
- )
- return ERROR
-
- action = args[0]
-
- # Error handling happens here, not in the action-handlers.
- try:
- handlers[action](options, args[1:])
- except PipError as e:
- logger.error(e.args[0])
- return ERROR
-
- return SUCCESS
-
- def _build_package_finder(
- self,
- options: Values,
- session: PipSession,
- target_python: Optional[TargetPython] = None,
- ignore_requires_python: Optional[bool] = None,
- ) -> PackageFinder:
- """
- Create a package finder appropriate to the index command.
- """
- link_collector = LinkCollector.create(session, options=options)
-
- # Pass allow_yanked=False to ignore yanked versions.
- selection_prefs = SelectionPreferences(
- allow_yanked=False,
- allow_all_prereleases=options.pre,
- ignore_requires_python=ignore_requires_python,
- )
-
- return PackageFinder.create(
- link_collector=link_collector,
- selection_prefs=selection_prefs,
- target_python=target_python,
- use_deprecated_html5lib="html5lib" in options.deprecated_features_enabled,
- )
-
- def get_available_package_versions(self, options: Values, args: List[Any]) -> None:
- if len(args) != 1:
- raise CommandError("You need to specify exactly one argument")
-
- target_python = cmdoptions.make_target_python(options)
- query = args[0]
-
- with self._build_session(options) as session:
- finder = self._build_package_finder(
- options=options,
- session=session,
- target_python=target_python,
- ignore_requires_python=options.ignore_requires_python,
- )
-
- versions: Iterable[Union[LegacyVersion, Version]] = (
- candidate.version for candidate in finder.find_all_candidates(query)
- )
-
- if not options.pre:
- # Remove prereleases
- versions = (
- version for version in versions if not version.is_prerelease
- )
- versions = set(versions)
-
- if not versions:
- raise DistributionNotFound(
- "No matching distribution found for {}".format(query)
- )
-
- formatted_versions = [str(ver) for ver in sorted(versions, reverse=True)]
- latest = formatted_versions[0]
-
- write_output("{} ({})".format(query, latest))
- write_output("Available versions: {}".format(", ".join(formatted_versions)))
- print_dist_installation_info(query, latest)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/metadata/languages.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/metadata/languages.py
deleted file mode 100644
index 3237d5abf60122e0cea5463ff34f2256b11b5a81..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/metadata/languages.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-Metadata about languages used by our model training code for our
-SingleByteCharSetProbers. Could be used for other things in the future.
-
-This code is based on the language metadata from the uchardet project.
-"""
-from __future__ import absolute_import, print_function
-
-from string import ascii_letters
-
-
-# TODO: Add Ukranian (KOI8-U)
-
-class Language(object):
- """Metadata about a language useful for training models
-
- :ivar name: The human name for the language, in English.
- :type name: str
- :ivar iso_code: 2-letter ISO 639-1 if possible, 3-letter ISO code otherwise,
- or use another catalog as a last resort.
- :type iso_code: str
- :ivar use_ascii: Whether or not ASCII letters should be included in trained
- models.
- :type use_ascii: bool
- :ivar charsets: The charsets we want to support and create data for.
- :type charsets: list of str
- :ivar alphabet: The characters in the language's alphabet. If `use_ascii` is
- `True`, you only need to add those not in the ASCII set.
- :type alphabet: str
- :ivar wiki_start_pages: The Wikipedia pages to start from if we're crawling
- Wikipedia for training data.
- :type wiki_start_pages: list of str
- """
- def __init__(self, name=None, iso_code=None, use_ascii=True, charsets=None,
- alphabet=None, wiki_start_pages=None):
- super(Language, self).__init__()
- self.name = name
- self.iso_code = iso_code
- self.use_ascii = use_ascii
- self.charsets = charsets
- if self.use_ascii:
- if alphabet:
- alphabet += ascii_letters
- else:
- alphabet = ascii_letters
- elif not alphabet:
- raise ValueError('Must supply alphabet if use_ascii is False')
- self.alphabet = ''.join(sorted(set(alphabet))) if alphabet else None
- self.wiki_start_pages = wiki_start_pages
-
- def __repr__(self):
- return '{}({})'.format(self.__class__.__name__,
- ', '.join('{}={!r}'.format(k, v)
- for k, v in self.__dict__.items()
- if not k.startswith('_')))
-
-
-LANGUAGES = {'Arabic': Language(name='Arabic',
- iso_code='ar',
- use_ascii=False,
- # We only support encodings that use isolated
- # forms, because the current recommendation is
- # that the rendering system handles presentation
- # forms. This means we purposefully skip IBM864.
- charsets=['ISO-8859-6', 'WINDOWS-1256',
- 'CP720', 'CP864'],
- alphabet=u'ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ',
- wiki_start_pages=[u'الصفحة_الرئيسية']),
- 'Belarusian': Language(name='Belarusian',
- iso_code='be',
- use_ascii=False,
- charsets=['ISO-8859-5', 'WINDOWS-1251',
- 'IBM866', 'MacCyrillic'],
- alphabet=(u'АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯ'
- u'абвгдеёжзійклмнопрстуўфхцчшыьэюяʼ'),
- wiki_start_pages=[u'Галоўная_старонка']),
- 'Bulgarian': Language(name='Bulgarian',
- iso_code='bg',
- use_ascii=False,
- charsets=['ISO-8859-5', 'WINDOWS-1251',
- 'IBM855'],
- alphabet=(u'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯ'
- u'абвгдежзийклмнопрстуфхцчшщъьюя'),
- wiki_start_pages=[u'Начална_страница']),
- 'Czech': Language(name='Czech',
- iso_code='cz',
- use_ascii=True,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=u'áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ',
- wiki_start_pages=[u'Hlavní_strana']),
- 'Danish': Language(name='Danish',
- iso_code='da',
- use_ascii=True,
- charsets=['ISO-8859-1', 'ISO-8859-15',
- 'WINDOWS-1252'],
- alphabet=u'æøåÆØÅ',
- wiki_start_pages=[u'Forside']),
- 'German': Language(name='German',
- iso_code='de',
- use_ascii=True,
- charsets=['ISO-8859-1', 'WINDOWS-1252'],
- alphabet=u'äöüßÄÖÜ',
- wiki_start_pages=[u'Wikipedia:Hauptseite']),
- 'Greek': Language(name='Greek',
- iso_code='el',
- use_ascii=False,
- charsets=['ISO-8859-7', 'WINDOWS-1253'],
- alphabet=(u'αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώ'
- u'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ'),
- wiki_start_pages=[u'Πύλη:Κύρια']),
- 'English': Language(name='English',
- iso_code='en',
- use_ascii=True,
- charsets=['ISO-8859-1', 'WINDOWS-1252'],
- wiki_start_pages=[u'Main_Page']),
- 'Esperanto': Language(name='Esperanto',
- iso_code='eo',
- # Q, W, X, and Y not used at all
- use_ascii=False,
- charsets=['ISO-8859-3'],
- alphabet=(u'abcĉdefgĝhĥijĵklmnoprsŝtuŭvz'
- u'ABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ'),
- wiki_start_pages=[u'Vikipedio:Ĉefpaĝo']),
- 'Spanish': Language(name='Spanish',
- iso_code='es',
- use_ascii=True,
- charsets=['ISO-8859-1', 'ISO-8859-15',
- 'WINDOWS-1252'],
- alphabet=u'ñáéíóúüÑÁÉÍÓÚÜ',
- wiki_start_pages=[u'Wikipedia:Portada']),
- 'Estonian': Language(name='Estonian',
- iso_code='et',
- use_ascii=False,
- charsets=['ISO-8859-4', 'ISO-8859-13',
- 'WINDOWS-1257'],
- # C, F, Š, Q, W, X, Y, Z, Ž are only for
- # loanwords
- alphabet=(u'ABDEGHIJKLMNOPRSTUVÕÄÖÜ'
- u'abdeghijklmnoprstuvõäöü'),
- wiki_start_pages=[u'Esileht']),
- 'Finnish': Language(name='Finnish',
- iso_code='fi',
- use_ascii=True,
- charsets=['ISO-8859-1', 'ISO-8859-15',
- 'WINDOWS-1252'],
- alphabet=u'ÅÄÖŠŽåäöšž',
- wiki_start_pages=[u'Wikipedia:Etusivu']),
- 'French': Language(name='French',
- iso_code='fr',
- use_ascii=True,
- charsets=['ISO-8859-1', 'ISO-8859-15',
- 'WINDOWS-1252'],
- alphabet=u'œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ',
- wiki_start_pages=[u'Wikipédia:Accueil_principal',
- u'Bœuf (animal)']),
- 'Hebrew': Language(name='Hebrew',
- iso_code='he',
- use_ascii=False,
- charsets=['ISO-8859-8', 'WINDOWS-1255'],
- alphabet=u'אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ',
- wiki_start_pages=[u'עמוד_ראשי']),
- 'Croatian': Language(name='Croatian',
- iso_code='hr',
- # Q, W, X, Y are only used for foreign words.
- use_ascii=False,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=(u'abcčćdđefghijklmnoprsštuvzž'
- u'ABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ'),
- wiki_start_pages=[u'Glavna_stranica']),
- 'Hungarian': Language(name='Hungarian',
- iso_code='hu',
- # Q, W, X, Y are only used for foreign words.
- use_ascii=False,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=(u'abcdefghijklmnoprstuvzáéíóöőúüű'
- u'ABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ'),
- wiki_start_pages=[u'Kezdőlap']),
- 'Italian': Language(name='Italian',
- iso_code='it',
- use_ascii=True,
- charsets=['ISO-8859-1', 'ISO-8859-15',
- 'WINDOWS-1252'],
- alphabet=u'ÀÈÉÌÒÓÙàèéìòóù',
- wiki_start_pages=[u'Pagina_principale']),
- 'Lithuanian': Language(name='Lithuanian',
- iso_code='lt',
- use_ascii=False,
- charsets=['ISO-8859-13', 'WINDOWS-1257',
- 'ISO-8859-4'],
- # Q, W, and X not used at all
- alphabet=(u'AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽ'
- u'aąbcčdeęėfghiįyjklmnoprsštuųūvzž'),
- wiki_start_pages=[u'Pagrindinis_puslapis']),
- 'Latvian': Language(name='Latvian',
- iso_code='lv',
- use_ascii=False,
- charsets=['ISO-8859-13', 'WINDOWS-1257',
- 'ISO-8859-4'],
- # Q, W, X, Y are only for loanwords
- alphabet=(u'AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽ'
- u'aābcčdeēfgģhiījkķlļmnņoprsštuūvzž'),
- wiki_start_pages=[u'Sākumlapa']),
- 'Macedonian': Language(name='Macedonian',
- iso_code='mk',
- use_ascii=False,
- charsets=['ISO-8859-5', 'WINDOWS-1251',
- 'MacCyrillic', 'IBM855'],
- alphabet=(u'АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШ'
- u'абвгдѓежзѕијклљмнњопрстќуфхцчџш'),
- wiki_start_pages=[u'Главна_страница']),
- 'Dutch': Language(name='Dutch',
- iso_code='nl',
- use_ascii=True,
- charsets=['ISO-8859-1', 'WINDOWS-1252'],
- wiki_start_pages=[u'Hoofdpagina']),
- 'Polish': Language(name='Polish',
- iso_code='pl',
- # Q and X are only used for foreign words.
- use_ascii=False,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=(u'AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ'
- u'aąbcćdeęfghijklłmnńoóprsśtuwyzźż'),
- wiki_start_pages=[u'Wikipedia:Strona_główna']),
- 'Portuguese': Language(name='Portuguese',
- iso_code='pt',
- use_ascii=True,
- charsets=['ISO-8859-1', 'ISO-8859-15',
- 'WINDOWS-1252'],
- alphabet=u'ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú',
- wiki_start_pages=[u'Wikipédia:Página_principal']),
- 'Romanian': Language(name='Romanian',
- iso_code='ro',
- use_ascii=True,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=u'ăâîșțĂÂÎȘȚ',
- wiki_start_pages=[u'Pagina_principală']),
- 'Russian': Language(name='Russian',
- iso_code='ru',
- use_ascii=False,
- charsets=['ISO-8859-5', 'WINDOWS-1251',
- 'KOI8-R', 'MacCyrillic', 'IBM866',
- 'IBM855'],
- alphabet=(u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
- u'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'),
- wiki_start_pages=[u'Заглавная_страница']),
- 'Slovak': Language(name='Slovak',
- iso_code='sk',
- use_ascii=True,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=u'áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ',
- wiki_start_pages=[u'Hlavná_stránka']),
- 'Slovene': Language(name='Slovene',
- iso_code='sl',
- # Q, W, X, Y are only used for foreign words.
- use_ascii=False,
- charsets=['ISO-8859-2', 'WINDOWS-1250'],
- alphabet=(u'abcčdefghijklmnoprsštuvzž'
- u'ABCČDEFGHIJKLMNOPRSŠTUVZŽ'),
- wiki_start_pages=[u'Glavna_stran']),
- # Serbian can be written in both Latin and Cyrillic, but there's no
- # simple way to get the Latin alphabet pages from Wikipedia through
- # the API, so for now we just support Cyrillic.
- 'Serbian': Language(name='Serbian',
- iso_code='sr',
- alphabet=(u'АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШ'
- u'абвгдђежзијклљмнњопрстћуфхцчџш'),
- charsets=['ISO-8859-5', 'WINDOWS-1251',
- 'MacCyrillic', 'IBM855'],
- wiki_start_pages=[u'Главна_страна']),
- 'Thai': Language(name='Thai',
- iso_code='th',
- use_ascii=False,
- charsets=['ISO-8859-11', 'TIS-620', 'CP874'],
- alphabet=u'กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛',
- wiki_start_pages=[u'หน้าหลัก']),
- 'Turkish': Language(name='Turkish',
- iso_code='tr',
- # Q, W, and X are not used by Turkish
- use_ascii=False,
- charsets=['ISO-8859-3', 'ISO-8859-9',
- 'WINDOWS-1254'],
- alphabet=(u'abcçdefgğhıijklmnoöprsştuüvyzâîû'
- u'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ'),
- wiki_start_pages=[u'Ana_Sayfa']),
- 'Vietnamese': Language(name='Vietnamese',
- iso_code='vi',
- use_ascii=False,
- # Windows-1258 is the only common 8-bit
- # Vietnamese encoding supported by Python.
- # From Wikipedia:
- # For systems that lack support for Unicode,
- # dozens of 8-bit Vietnamese code pages are
- # available.[1] The most common are VISCII
- # (TCVN 5712:1993), VPS, and Windows-1258.[3]
- # Where ASCII is required, such as when
- # ensuring readability in plain text e-mail,
- # Vietnamese letters are often encoded
- # according to Vietnamese Quoted-Readable
- # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4]
- # though usage of either variable-width
- # scheme has declined dramatically following
- # the adoption of Unicode on the World Wide
- # Web.
- charsets=['WINDOWS-1258'],
- alphabet=(u'aăâbcdđeêghiklmnoôơpqrstuưvxy'
- u'AĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY'),
- wiki_start_pages=[u'Chữ_Quốc_ngữ']),
- }
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/connection.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/connection.py
deleted file mode 100644
index 4d92ac6d2c3bd213f88c84f045364b1728adc01c..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/connection.py
+++ /dev/null
@@ -1,569 +0,0 @@
-from __future__ import absolute_import
-
-import datetime
-import logging
-import os
-import re
-import socket
-import warnings
-from socket import error as SocketError
-from socket import timeout as SocketTimeout
-
-from .packages import six
-from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
-from .packages.six.moves.http_client import HTTPException # noqa: F401
-from .util.proxy import create_proxy_ssl_context
-
-try: # Compiled with SSL?
- import ssl
-
- BaseSSLError = ssl.SSLError
-except (ImportError, AttributeError): # Platform-specific: No SSL.
- ssl = None
-
- class BaseSSLError(BaseException):
- pass
-
-
-try:
- # Python 3: not a no-op, we're adding this to the namespace so it can be imported.
- ConnectionError = ConnectionError
-except NameError:
- # Python 2
- class ConnectionError(Exception):
- pass
-
-
-try: # Python 3:
- # Not a no-op, we're adding this to the namespace so it can be imported.
- BrokenPipeError = BrokenPipeError
-except NameError: # Python 2:
-
- class BrokenPipeError(Exception):
- pass
-
-
-from ._collections import HTTPHeaderDict # noqa (historical, removed in v2)
-from ._version import __version__
-from .exceptions import (
- ConnectTimeoutError,
- NewConnectionError,
- SubjectAltNameWarning,
- SystemTimeWarning,
-)
-from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection
-from .util.ssl_ import (
- assert_fingerprint,
- create_urllib3_context,
- is_ipaddress,
- resolve_cert_reqs,
- resolve_ssl_version,
- ssl_wrap_socket,
-)
-from .util.ssl_match_hostname import CertificateError, match_hostname
-
-log = logging.getLogger(__name__)
-
-port_by_scheme = {"http": 80, "https": 443}
-
-# When it comes time to update this value as a part of regular maintenance
-# (ie test_recent_date is failing) update it to ~6 months before the current date.
-RECENT_DATE = datetime.date(2020, 7, 1)
-
-_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
-
-
-class HTTPConnection(_HTTPConnection, object):
- """
- Based on :class:`http.client.HTTPConnection` but provides an extra constructor
- backwards-compatibility layer between older and newer Pythons.
-
- Additional keyword parameters are used to configure attributes of the connection.
- Accepted parameters include:
-
- - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- - ``source_address``: Set the source address for the current connection.
- - ``socket_options``: Set specific options on the underlying socket. If not specified, then
- defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
- Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
-
- For example, if you wish to enable TCP Keep Alive in addition to the defaults,
- you might pass:
-
- .. code-block:: python
-
- HTTPConnection.default_socket_options + [
- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
- ]
-
- Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
- """
-
- default_port = port_by_scheme["http"]
-
- #: Disable Nagle's algorithm by default.
- #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
- default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
-
- #: Whether this connection verifies the host's certificate.
- is_verified = False
-
- #: Whether this proxy connection (if used) verifies the proxy host's
- #: certificate.
- proxy_is_verified = None
-
- def __init__(self, *args, **kw):
- if not six.PY2:
- kw.pop("strict", None)
-
- # Pre-set source_address.
- self.source_address = kw.get("source_address")
-
- #: The socket options provided by the user. If no options are
- #: provided, we use the default options.
- self.socket_options = kw.pop("socket_options", self.default_socket_options)
-
- # Proxy options provided by the user.
- self.proxy = kw.pop("proxy", None)
- self.proxy_config = kw.pop("proxy_config", None)
-
- _HTTPConnection.__init__(self, *args, **kw)
-
- @property
- def host(self):
- """
- Getter method to remove any trailing dots that indicate the hostname is an FQDN.
-
- In general, SSL certificates don't include the trailing dot indicating a
- fully-qualified domain name, and thus, they don't validate properly when
- checked against a domain name that includes the dot. In addition, some
- servers may not expect to receive the trailing dot when provided.
-
- However, the hostname with trailing dot is critical to DNS resolution; doing a
- lookup with the trailing dot will properly only resolve the appropriate FQDN,
- whereas a lookup without a trailing dot will search the system's search domain
- list. Thus, it's important to keep the original host around for use only in
- those cases where it's appropriate (i.e., when doing DNS lookup to establish the
- actual TCP connection across which we're going to send HTTP requests).
- """
- return self._dns_host.rstrip(".")
-
- @host.setter
- def host(self, value):
- """
- Setter for the `host` property.
-
- We assume that only urllib3 uses the _dns_host attribute; httplib itself
- only uses `host`, and it seems reasonable that other libraries follow suit.
- """
- self._dns_host = value
-
- def _new_conn(self):
- """Establish a socket connection and set nodelay settings on it.
-
- :return: New socket connection.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw["source_address"] = self.source_address
-
- if self.socket_options:
- extra_kw["socket_options"] = self.socket_options
-
- try:
- conn = connection.create_connection(
- (self._dns_host, self.port), self.timeout, **extra_kw
- )
-
- except SocketTimeout:
- raise ConnectTimeoutError(
- self,
- "Connection to %s timed out. (connect timeout=%s)"
- % (self.host, self.timeout),
- )
-
- except SocketError as e:
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e
- )
-
- return conn
-
- def _is_using_tunnel(self):
- # Google App Engine's httplib does not define _tunnel_host
- return getattr(self, "_tunnel_host", None)
-
- def _prepare_conn(self, conn):
- self.sock = conn
- if self._is_using_tunnel():
- # TODO: Fix tunnel so it doesn't depend on self.sock state.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- def putrequest(self, method, url, *args, **kwargs):
- """ """
- # Empty docstring because the indentation of CPython's implementation
- # is broken but we don't want this method in our documentation.
- match = _CONTAINS_CONTROL_CHAR_RE.search(method)
- if match:
- raise ValueError(
- "Method cannot contain non-token characters %r (found at least %r)"
- % (method, match.group())
- )
-
- return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
-
- def putheader(self, header, *values):
- """ """
- if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
- _HTTPConnection.putheader(self, header, *values)
- elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
- raise ValueError(
- "urllib3.util.SKIP_HEADER only supports '%s'"
- % ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
- )
-
- def request(self, method, url, body=None, headers=None):
- if headers is None:
- headers = {}
- else:
- # Avoid modifying the headers passed into .request()
- headers = headers.copy()
- if "user-agent" not in (six.ensure_str(k.lower()) for k in headers):
- headers["User-Agent"] = _get_default_user_agent()
- super(HTTPConnection, self).request(method, url, body=body, headers=headers)
-
- def request_chunked(self, method, url, body=None, headers=None):
- """
- Alternative to the common request method, which sends the
- body with chunked encoding and not as one block
- """
- headers = headers or {}
- header_keys = set([six.ensure_str(k.lower()) for k in headers])
- skip_accept_encoding = "accept-encoding" in header_keys
- skip_host = "host" in header_keys
- self.putrequest(
- method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
- )
- if "user-agent" not in header_keys:
- self.putheader("User-Agent", _get_default_user_agent())
- for header, value in headers.items():
- self.putheader(header, value)
- if "transfer-encoding" not in header_keys:
- self.putheader("Transfer-Encoding", "chunked")
- self.endheaders()
-
- if body is not None:
- stringish_types = six.string_types + (bytes,)
- if isinstance(body, stringish_types):
- body = (body,)
- for chunk in body:
- if not chunk:
- continue
- if not isinstance(chunk, bytes):
- chunk = chunk.encode("utf8")
- len_str = hex(len(chunk))[2:]
- to_send = bytearray(len_str.encode())
- to_send += b"\r\n"
- to_send += chunk
- to_send += b"\r\n"
- self.send(to_send)
-
- # After the if clause, to always have a closed body
- self.send(b"0\r\n\r\n")
-
-
-class HTTPSConnection(HTTPConnection):
- """
- Many of the parameters to this constructor are passed to the underlying SSL
- socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
- """
-
- default_port = port_by_scheme["https"]
-
- cert_reqs = None
- ca_certs = None
- ca_cert_dir = None
- ca_cert_data = None
- ssl_version = None
- assert_fingerprint = None
- tls_in_tls_required = False
-
- def __init__(
- self,
- host,
- port=None,
- key_file=None,
- cert_file=None,
- key_password=None,
- strict=None,
- timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- ssl_context=None,
- server_hostname=None,
- **kw
- ):
-
- HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.key_password = key_password
- self.ssl_context = ssl_context
- self.server_hostname = server_hostname
-
- # Required property for Google AppEngine 1.9.0 which otherwise causes
- # HTTPS requests to go out as HTTP. (See Issue #356)
- self._protocol = "https"
-
- def set_cert(
- self,
- key_file=None,
- cert_file=None,
- cert_reqs=None,
- key_password=None,
- ca_certs=None,
- assert_hostname=None,
- assert_fingerprint=None,
- ca_cert_dir=None,
- ca_cert_data=None,
- ):
- """
- This method should only be called once, before the connection is used.
- """
- # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
- # have an SSLContext object in which case we'll use its verify_mode.
- if cert_reqs is None:
- if self.ssl_context is not None:
- cert_reqs = self.ssl_context.verify_mode
- else:
- cert_reqs = resolve_cert_reqs(None)
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.key_password = key_password
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
- self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
- self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
- self.ca_cert_data = ca_cert_data
-
- def connect(self):
- # Add certificate verification
- conn = self._new_conn()
- hostname = self.host
- tls_in_tls = False
-
- if self._is_using_tunnel():
- if self.tls_in_tls_required:
- conn = self._connect_tls_proxy(hostname, conn)
- tls_in_tls = True
-
- self.sock = conn
-
- # Calls self._set_hostport(), so self.host is
- # self._tunnel_host below.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- # Override the host with the one we're requesting data from.
- hostname = self._tunnel_host
-
- server_hostname = hostname
- if self.server_hostname is not None:
- server_hostname = self.server_hostname
-
- is_time_off = datetime.date.today() < RECENT_DATE
- if is_time_off:
- warnings.warn(
- (
- "System time is way off (before {0}). This will probably "
- "lead to SSL verification errors"
- ).format(RECENT_DATE),
- SystemTimeWarning,
- )
-
- # Wrap socket using verification with the root certs in
- # trusted_root_certs
- default_ssl_context = False
- if self.ssl_context is None:
- default_ssl_context = True
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(self.ssl_version),
- cert_reqs=resolve_cert_reqs(self.cert_reqs),
- )
-
- context = self.ssl_context
- context.verify_mode = resolve_cert_reqs(self.cert_reqs)
-
- # Try to load OS default certs if none are given.
- # Works well on Windows (requires Python3.4+)
- if (
- not self.ca_certs
- and not self.ca_cert_dir
- and not self.ca_cert_data
- and default_ssl_context
- and hasattr(context, "load_default_certs")
- ):
- context.load_default_certs()
-
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- key_password=self.key_password,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- ca_cert_data=self.ca_cert_data,
- server_hostname=server_hostname,
- ssl_context=context,
- tls_in_tls=tls_in_tls,
- )
-
- # If we're using all defaults and the connection
- # is TLSv1 or TLSv1.1 we throw a DeprecationWarning
- # for the host.
- if (
- default_ssl_context
- and self.ssl_version is None
- and hasattr(self.sock, "version")
- and self.sock.version() in {"TLSv1", "TLSv1.1"}
- ):
- warnings.warn(
- "Negotiating TLSv1/TLSv1.1 by default is deprecated "
- "and will be disabled in urllib3 v2.0.0. Connecting to "
- "'%s' with '%s' can be enabled by explicitly opting-in "
- "with 'ssl_version'" % (self.host, self.sock.version()),
- DeprecationWarning,
- )
-
- if self.assert_fingerprint:
- assert_fingerprint(
- self.sock.getpeercert(binary_form=True), self.assert_fingerprint
- )
- elif (
- context.verify_mode != ssl.CERT_NONE
- and not getattr(context, "check_hostname", False)
- and self.assert_hostname is not False
- ):
- # While urllib3 attempts to always turn off hostname matching from
- # the TLS library, this cannot always be done. So we check whether
- # the TLS Library still thinks it's matching hostnames.
- cert = self.sock.getpeercert()
- if not cert.get("subjectAltName", ()):
- warnings.warn(
- (
- "Certificate for {0} has no `subjectAltName`, falling back to check for a "
- "`commonName` for now. This feature is being removed by major browsers and "
- "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
- "for details.)".format(hostname)
- ),
- SubjectAltNameWarning,
- )
- _match_hostname(cert, self.assert_hostname or server_hostname)
-
- self.is_verified = (
- context.verify_mode == ssl.CERT_REQUIRED
- or self.assert_fingerprint is not None
- )
-
- def _connect_tls_proxy(self, hostname, conn):
- """
- Establish a TLS connection to the proxy using the provided SSL context.
- """
- proxy_config = self.proxy_config
- ssl_context = proxy_config.ssl_context
- if ssl_context:
- # If the user provided a proxy context, we assume CA and client
- # certificates have already been set
- return ssl_wrap_socket(
- sock=conn,
- server_hostname=hostname,
- ssl_context=ssl_context,
- )
-
- ssl_context = create_proxy_ssl_context(
- self.ssl_version,
- self.cert_reqs,
- self.ca_certs,
- self.ca_cert_dir,
- self.ca_cert_data,
- )
-
- # If no cert was provided, use only the default options for server
- # certificate validation
- socket = ssl_wrap_socket(
- sock=conn,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- ca_cert_data=self.ca_cert_data,
- server_hostname=hostname,
- ssl_context=ssl_context,
- )
-
- if ssl_context.verify_mode != ssl.CERT_NONE and not getattr(
- ssl_context, "check_hostname", False
- ):
- # While urllib3 attempts to always turn off hostname matching from
- # the TLS library, this cannot always be done. So we check whether
- # the TLS Library still thinks it's matching hostnames.
- cert = socket.getpeercert()
- if not cert.get("subjectAltName", ()):
- warnings.warn(
- (
- "Certificate for {0} has no `subjectAltName`, falling back to check for a "
- "`commonName` for now. This feature is being removed by major browsers and "
- "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
- "for details.)".format(hostname)
- ),
- SubjectAltNameWarning,
- )
- _match_hostname(cert, hostname)
-
- self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED
- return socket
-
-
-def _match_hostname(cert, asserted_hostname):
- # Our upstream implementation of ssl.match_hostname()
- # only applies this normalization to IP addresses so it doesn't
- # match DNS SANs so we do the same thing!
- stripped_hostname = asserted_hostname.strip("u[]")
- if is_ipaddress(stripped_hostname):
- asserted_hostname = stripped_hostname
-
- try:
- match_hostname(cert, asserted_hostname)
- except CertificateError as e:
- log.warning(
- "Certificate did not match expected hostname: %s. Certificate: %s",
- asserted_hostname,
- cert,
- )
- # Add cert to exception and reraise so client code can inspect
- # the cert when catching the exception, if they want to
- e._peer_cert = cert
- raise
-
-
-def _get_default_user_agent():
- return "python-urllib3/%s" % __version__
-
-
-class DummyConnection(object):
- """Used to detect a failed ConnectionCls import."""
-
- pass
-
-
-if not ssl:
- HTTPSConnection = DummyConnection # noqa: F811
-
-
-VerifiedHTTPSConnection = HTTPSConnection
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/sas.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/sas.py
deleted file mode 100644
index c34066b02e6bf23c70dc8d1c63c6c6a228e091aa..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/sas.py
+++ /dev/null
@@ -1,227 +0,0 @@
-"""
- pygments.lexers.sas
- ~~~~~~~~~~~~~~~~~~~
-
- Lexer for SAS.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import re
-from pygments.lexer import RegexLexer, include, words
-from pygments.token import Comment, Keyword, Name, Number, String, Text, \
- Other, Generic
-
-__all__ = ['SASLexer']
-
-
-class SASLexer(RegexLexer):
- """
- For SAS files.
-
- .. versionadded:: 2.2
- """
- # Syntax from syntax/sas.vim by James Kidd
-
- name = 'SAS'
- aliases = ['sas']
- filenames = ['*.SAS', '*.sas']
- mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
- flags = re.IGNORECASE | re.MULTILINE
-
- builtins_macros = (
- "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
- "display", "do", "else", "end", "eval", "global", "goto", "if",
- "index", "input", "keydef", "label", "left", "length", "let",
- "local", "lowcase", "macro", "mend", "nrquote",
- "nrstr", "put", "qleft", "qlowcase", "qscan",
- "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
- "str", "substr", "superq", "syscall", "sysevalf", "sysexec",
- "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
- "then", "to", "trim", "unquote", "until", "upcase", "verify",
- "while", "window"
- )
-
- builtins_conditionals = (
- "do", "if", "then", "else", "end", "until", "while"
- )
-
- builtins_statements = (
- "abort", "array", "attrib", "by", "call", "cards", "cards4",
- "catname", "continue", "datalines", "datalines4", "delete", "delim",
- "delimiter", "display", "dm", "drop", "endsas", "error", "file",
- "filename", "footnote", "format", "goto", "in", "infile", "informat",
- "input", "keep", "label", "leave", "length", "libname", "link",
- "list", "lostcard", "merge", "missing", "modify", "options", "output",
- "out", "page", "put", "redirect", "remove", "rename", "replace",
- "retain", "return", "select", "set", "skip", "startsas", "stop",
- "title", "update", "waitsas", "where", "window", "x", "systask"
- )
-
- builtins_sql = (
- "add", "and", "alter", "as", "cascade", "check", "create",
- "delete", "describe", "distinct", "drop", "foreign", "from",
- "group", "having", "index", "insert", "into", "in", "key", "like",
- "message", "modify", "msgtype", "not", "null", "on", "or",
- "order", "primary", "references", "reset", "restrict", "select",
- "set", "table", "unique", "update", "validate", "view", "where"
- )
-
- builtins_functions = (
- "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
- "attrn", "band", "betainv", "blshift", "bnot", "bor",
- "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
- "close", "cnonct", "collate", "compbl", "compound",
- "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
- "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
- "datejul", "datepart", "datetime", "day", "dclose", "depdb",
- "depdbsl", "depsl", "depsyd",
- "deptab", "dequote", "dhms", "dif", "digamma",
- "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
- "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
- "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
- "fexist", "fget", "fileexist", "filename", "fileref",
- "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
- "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
- "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
- "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
- "hbound", "hms", "hosthelp", "hour", "ibessel", "index",
- "indexc", "indexw", "input", "inputc", "inputn", "int",
- "intck", "intnx", "intrr", "irr", "jbessel", "juldate",
- "kurtosis", "lag", "lbound", "left", "length", "lgamma",
- "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
- "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
- "mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
- "normal", "note", "npv", "open", "ordinal", "pathname",
- "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
- "probbeta", "probbnml", "probchi", "probf", "probgam",
- "probhypr", "probit", "probnegb", "probnorm", "probt",
- "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
- "ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
- "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
- "rewind", "right", "round", "saving", "scan", "sdf", "second",
- "sign", "sin", "sinh", "skewness", "soundex", "spedis",
- "sqrt", "std", "stderr", "stfips", "stname", "stnamel",
- "substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
- "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
- "tnonct", "today", "translate", "tranwrd", "trigamma",
- "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
- "varfmt", "varinfmt", "varlabel", "varlen", "varname",
- "varnum", "varray", "varrayx", "vartype", "verify", "vformat",
- "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
- "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
- "vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
- "vinformatw", "vinformatwx", "vinformatx", "vlabel",
- "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
- "vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
- "zipnamel", "zipstate"
- )
-
- tokens = {
- 'root': [
- include('comments'),
- include('proc-data'),
- include('cards-datalines'),
- include('logs'),
- include('general'),
- (r'.', Text),
- ],
- # SAS is multi-line regardless, but * is ended by ;
- 'comments': [
- (r'^\s*\*.*?;', Comment),
- (r'/\*.*?\*/', Comment),
- (r'^\s*\*(.|\n)*?;', Comment.Multiline),
- (r'/[*](.|\n)*?[*]/', Comment.Multiline),
- ],
- # Special highlight for proc, data, quit, run
- 'proc-data': [
- (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
- Keyword.Reserved),
- ],
- # Special highlight cards and datalines
- 'cards-datalines': [
- (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
- ],
- 'data': [
- (r'(.|\n)*^\s*;\s*$', Other, '#pop'),
- ],
- # Special highlight for put NOTE|ERROR|WARNING (order matters)
- 'logs': [
- (r'\n?^\s*%?put ', Keyword, 'log-messages'),
- ],
- 'log-messages': [
- (r'NOTE(:|-).*', Generic, '#pop'),
- (r'WARNING(:|-).*', Generic.Emph, '#pop'),
- (r'ERROR(:|-).*', Generic.Error, '#pop'),
- include('general'),
- ],
- 'general': [
- include('keywords'),
- include('vars-strings'),
- include('special'),
- include('numbers'),
- ],
- # Keywords, statements, functions, macros
- 'keywords': [
- (words(builtins_statements,
- prefix = r'\b',
- suffix = r'\b'),
- Keyword),
- (words(builtins_sql,
- prefix = r'\b',
- suffix = r'\b'),
- Keyword),
- (words(builtins_conditionals,
- prefix = r'\b',
- suffix = r'\b'),
- Keyword),
- (words(builtins_macros,
- prefix = r'%',
- suffix = r'\b'),
- Name.Builtin),
- (words(builtins_functions,
- prefix = r'\b',
- suffix = r'\('),
- Name.Builtin),
- ],
- # Strings and user-defined variables and macros (order matters)
- 'vars-strings': [
- (r'&[a-z_]\w{0,31}\.?', Name.Variable),
- (r'%[a-z_]\w{0,31}', Name.Function),
- (r'\'', String, 'string_squote'),
- (r'"', String, 'string_dquote'),
- ],
- 'string_squote': [
- ('\'', String, '#pop'),
- (r'\\\\|\\"|\\\n', String.Escape),
- # AFAIK, macro variables are not evaluated in single quotes
- # (r'&', Name.Variable, 'validvar'),
- (r'[^$\'\\]+', String),
- (r'[$\'\\]', String),
- ],
- 'string_dquote': [
- (r'"', String, '#pop'),
- (r'\\\\|\\"|\\\n', String.Escape),
- (r'&', Name.Variable, 'validvar'),
- (r'[^$&"\\]+', String),
- (r'[$"\\]', String),
- ],
- 'validvar': [
- (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
- ],
- # SAS numbers and special variables
- 'numbers': [
- (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
- Number),
- ],
- 'special': [
- (r'(null|missing|_all_|_automatic_|_character_|_n_|'
- r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
- Keyword.Constant),
- ],
- # 'operators': [
- # (r'(-|=|<=|>=|<|>|<>|&|!=|'
- # r'\||\*|\+|\^|/|!|~|~=)', Operator)
- # ],
- }
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/utils.py
deleted file mode 100644
index 5a70819c308d33845b1a40846dcb34cdc540c3f8..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/utils.py
+++ /dev/null
@@ -1,398 +0,0 @@
-"""
-General helpers required for `tqdm.std`.
-"""
-import os
-import re
-import sys
-from functools import partial, partialmethod, wraps
-from inspect import signature
-# TODO consider using wcswidth third-party package for 0-width characters
-from unicodedata import east_asian_width
-from warnings import warn
-from weakref import proxy
-
-_range, _unich, _unicode, _basestring = range, chr, str, str
-CUR_OS = sys.platform
-IS_WIN = any(CUR_OS.startswith(i) for i in ['win32', 'cygwin'])
-IS_NIX = any(CUR_OS.startswith(i) for i in ['aix', 'linux', 'darwin'])
-RE_ANSI = re.compile(r"\x1b\[[;\d]*[A-Za-z]")
-
-try:
- if IS_WIN:
- import colorama
- else:
- raise ImportError
-except ImportError:
- colorama = None
-else:
- try:
- colorama.init(strip=False)
- except TypeError:
- colorama.init()
-
-
-def envwrap(prefix, types=None, is_method=False):
- """
- Override parameter defaults via `os.environ[prefix + param_name]`.
- Maps UPPER_CASE env vars map to lower_case param names.
- camelCase isn't supported (because Windows ignores case).
-
- Precedence (highest first):
- - call (`foo(a=3)`)
- - environ (`FOO_A=2`)
- - signature (`def foo(a=1)`)
-
- Parameters
- ----------
- prefix : str
- Env var prefix, e.g. "FOO_"
- types : dict, optional
- Fallback mappings `{'param_name': type, ...}` if types cannot be
- inferred from function signature.
- Consider using `types=collections.defaultdict(lambda: ast.literal_eval)`.
- is_method : bool, optional
- Whether to use `functools.partialmethod`. If (default: False) use `functools.partial`.
-
- Examples
- --------
- ```
- $ cat foo.py
- from tqdm.utils import envwrap
- @envwrap("FOO_")
- def test(a=1, b=2, c=3):
- print(f"received: a={a}, b={b}, c={c}")
-
- $ FOO_A=42 FOO_C=1337 python -c 'import foo; foo.test(c=99)'
- received: a=42, b=2, c=99
- ```
- """
- if types is None:
- types = {}
- i = len(prefix)
- env_overrides = {k[i:].lower(): v for k, v in os.environ.items() if k.startswith(prefix)}
- part = partialmethod if is_method else partial
-
- def wrap(func):
- params = signature(func).parameters
- # ignore unknown env vars
- overrides = {k: v for k, v in env_overrides.items() if k in params}
- # infer overrides' `type`s
- for k in overrides:
- param = params[k]
- if param.annotation is not param.empty: # typehints
- for typ in getattr(param.annotation, '__args__', (param.annotation,)):
- try:
- overrides[k] = typ(overrides[k])
- except Exception:
- pass
- else:
- break
- elif param.default is not None: # type of default value
- overrides[k] = type(param.default)(overrides[k])
- else:
- try: # `types` fallback
- overrides[k] = types[k](overrides[k])
- except KeyError: # keep unconverted (`str`)
- pass
- return part(func, **overrides)
- return wrap
-
-
-class FormatReplace(object):
- """
- >>> a = FormatReplace('something')
- >>> "{:5d}".format(a)
- 'something'
- """ # NOQA: P102
- def __init__(self, replace=''):
- self.replace = replace
- self.format_called = 0
-
- def __format__(self, _):
- self.format_called += 1
- return self.replace
-
-
-class Comparable(object):
- """Assumes child has self._comparable attr/@property"""
- def __lt__(self, other):
- return self._comparable < other._comparable
-
- def __le__(self, other):
- return (self < other) or (self == other)
-
- def __eq__(self, other):
- return self._comparable == other._comparable
-
- def __ne__(self, other):
- return not self == other
-
- def __gt__(self, other):
- return not self <= other
-
- def __ge__(self, other):
- return not self < other
-
-
-class ObjectWrapper(object):
- def __getattr__(self, name):
- return getattr(self._wrapped, name)
-
- def __setattr__(self, name, value):
- return setattr(self._wrapped, name, value)
-
- def wrapper_getattr(self, name):
- """Actual `self.getattr` rather than self._wrapped.getattr"""
- try:
- return object.__getattr__(self, name)
- except AttributeError: # py2
- return getattr(self, name)
-
- def wrapper_setattr(self, name, value):
- """Actual `self.setattr` rather than self._wrapped.setattr"""
- return object.__setattr__(self, name, value)
-
- def __init__(self, wrapped):
- """
- Thin wrapper around a given object
- """
- self.wrapper_setattr('_wrapped', wrapped)
-
-
-class SimpleTextIOWrapper(ObjectWrapper):
- """
- Change only `.write()` of the wrapped object by encoding the passed
- value and passing the result to the wrapped object's `.write()` method.
- """
- # pylint: disable=too-few-public-methods
- def __init__(self, wrapped, encoding):
- super(SimpleTextIOWrapper, self).__init__(wrapped)
- self.wrapper_setattr('encoding', encoding)
-
- def write(self, s):
- """
- Encode `s` and pass to the wrapped object's `.write()` method.
- """
- return self._wrapped.write(s.encode(self.wrapper_getattr('encoding')))
-
- def __eq__(self, other):
- return self._wrapped == getattr(other, '_wrapped', other)
-
-
-class DisableOnWriteError(ObjectWrapper):
- """
- Disable the given `tqdm_instance` upon `write()` or `flush()` errors.
- """
- @staticmethod
- def disable_on_exception(tqdm_instance, func):
- """
- Quietly set `tqdm_instance.miniters=inf` if `func` raises `errno=5`.
- """
- tqdm_instance = proxy(tqdm_instance)
-
- def inner(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except OSError as e:
- if e.errno != 5:
- raise
- try:
- tqdm_instance.miniters = float('inf')
- except ReferenceError:
- pass
- except ValueError as e:
- if 'closed' not in str(e):
- raise
- try:
- tqdm_instance.miniters = float('inf')
- except ReferenceError:
- pass
- return inner
-
- def __init__(self, wrapped, tqdm_instance):
- super(DisableOnWriteError, self).__init__(wrapped)
- if hasattr(wrapped, 'write'):
- self.wrapper_setattr(
- 'write', self.disable_on_exception(tqdm_instance, wrapped.write))
- if hasattr(wrapped, 'flush'):
- self.wrapper_setattr(
- 'flush', self.disable_on_exception(tqdm_instance, wrapped.flush))
-
- def __eq__(self, other):
- return self._wrapped == getattr(other, '_wrapped', other)
-
-
-class CallbackIOWrapper(ObjectWrapper):
- def __init__(self, callback, stream, method="read"):
- """
- Wrap a given `file`-like object's `read()` or `write()` to report
- lengths to the given `callback`
- """
- super(CallbackIOWrapper, self).__init__(stream)
- func = getattr(stream, method)
- if method == "write":
- @wraps(func)
- def write(data, *args, **kwargs):
- res = func(data, *args, **kwargs)
- callback(len(data))
- return res
- self.wrapper_setattr('write', write)
- elif method == "read":
- @wraps(func)
- def read(*args, **kwargs):
- data = func(*args, **kwargs)
- callback(len(data))
- return data
- self.wrapper_setattr('read', read)
- else:
- raise KeyError("Can only wrap read/write methods")
-
-
-def _is_utf(encoding):
- try:
- u'\u2588\u2589'.encode(encoding)
- except UnicodeEncodeError:
- return False
- except Exception:
- try:
- return encoding.lower().startswith('utf-') or ('U8' == encoding)
- except Exception:
- return False
- else:
- return True
-
-
-def _supports_unicode(fp):
- try:
- return _is_utf(fp.encoding)
- except AttributeError:
- return False
-
-
-def _is_ascii(s):
- if isinstance(s, str):
- for c in s:
- if ord(c) > 255:
- return False
- return True
- return _supports_unicode(s)
-
-
-def _screen_shape_wrapper(): # pragma: no cover
- """
- Return a function which returns console dimensions (width, height).
- Supported: linux, osx, windows, cygwin.
- """
- _screen_shape = None
- if IS_WIN:
- _screen_shape = _screen_shape_windows
- if _screen_shape is None:
- _screen_shape = _screen_shape_tput
- if IS_NIX:
- _screen_shape = _screen_shape_linux
- return _screen_shape
-
-
-def _screen_shape_windows(fp): # pragma: no cover
- try:
- import struct
- from ctypes import create_string_buffer, windll
- from sys import stdin, stdout
-
- io_handle = -12 # assume stderr
- if fp == stdin:
- io_handle = -10
- elif fp == stdout:
- io_handle = -11
-
- h = windll.kernel32.GetStdHandle(io_handle)
- csbi = create_string_buffer(22)
- res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
- if res:
- (_bufx, _bufy, _curx, _cury, _wattr, left, top, right, bottom,
- _maxx, _maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
- return right - left, bottom - top # +1
- except Exception: # nosec
- pass
- return None, None
-
-
-def _screen_shape_tput(*_): # pragma: no cover
- """cygwin xterm (windows)"""
- try:
- import shlex
- from subprocess import check_call # nosec
- return [int(check_call(shlex.split('tput ' + i))) - 1
- for i in ('cols', 'lines')]
- except Exception: # nosec
- pass
- return None, None
-
-
-def _screen_shape_linux(fp): # pragma: no cover
-
- try:
- from array import array
- from fcntl import ioctl
- from termios import TIOCGWINSZ
- except ImportError:
- return None, None
- else:
- try:
- rows, cols = array('h', ioctl(fp, TIOCGWINSZ, '\0' * 8))[:2]
- return cols, rows
- except Exception:
- try:
- return [int(os.environ[i]) - 1 for i in ("COLUMNS", "LINES")]
- except (KeyError, ValueError):
- return None, None
-
-
-def _environ_cols_wrapper(): # pragma: no cover
- """
- Return a function which returns console width.
- Supported: linux, osx, windows, cygwin.
- """
- warn("Use `_screen_shape_wrapper()(file)[0]` instead of"
- " `_environ_cols_wrapper()(file)`", DeprecationWarning, stacklevel=2)
- shape = _screen_shape_wrapper()
- if not shape:
- return None
-
- @wraps(shape)
- def inner(fp):
- return shape(fp)[0]
-
- return inner
-
-
-def _term_move_up(): # pragma: no cover
- return '' if (os.name == 'nt') and (colorama is None) else '\x1b[A'
-
-
-def _text_width(s):
- return sum(2 if east_asian_width(ch) in 'FW' else 1 for ch in str(s))
-
-
-def disp_len(data):
- """
- Returns the real on-screen length of a string which may contain
- ANSI control codes and wide chars.
- """
- return _text_width(RE_ANSI.sub('', data))
-
-
-def disp_trim(data, length):
- """
- Trim a string which may contain ANSI control characters.
- """
- if len(data) == disp_len(data):
- return data[:length]
-
- ansi_present = bool(RE_ANSI.search(data))
- while disp_len(data) > length: # carefully delete one char at a time
- data = data[:-1]
- if ansi_present and bool(RE_ANSI.search(data)):
- # assume ANSI reset is required
- return data if data.endswith("\033[0m") else data + "\033[0m"
- return data
diff --git a/spaces/protoxx91/webui-docker/Dockerfile b/spaces/protoxx91/webui-docker/Dockerfile
deleted file mode 100644
index 3544b54f213708b32e3a046f3310a96062d9d2b0..0000000000000000000000000000000000000000
--- a/spaces/protoxx91/webui-docker/Dockerfile
+++ /dev/null
@@ -1,61 +0,0 @@
-# Dockerfile Public T4
-
-# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/devel/cudnn8/Dockerfile
-# FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
-# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/base/Dockerfile
-FROM nvidia/cuda:11.7.1-base-ubuntu22.04
-ENV DEBIAN_FRONTEND noninteractive
-
-RUN apt-get update -y && apt-get upgrade -y && apt-get install -y libgl1 libglib2.0-0 wget git git-lfs python3-pip python-is-python3 && rm -rf /var/lib/apt/lists/*
-
-RUN adduser --disabled-password --gecos '' user
-RUN mkdir /content && chown -R user:user /content
-WORKDIR /content
-USER user
-
-RUN pip3 install --upgrade pip
-RUN pip install https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.16/xformers-0.0.16+814314d.d20230118-cp310-cp310-linux_x86_64.whl
-RUN pip install --pre triton
-RUN pip install numexpr
-
-RUN git clone -b v1.6 https://github.com/camenduru/stable-diffusion-webui
-RUN sed -i '$a fastapi==0.90.0' /content/stable-diffusion-webui/requirements_versions.txt
-RUN sed -i -e '''/prepare_environment()/a\ os.system\(f\"""sed -i -e ''\"s/dict()))/dict())).cuda()/g\"'' /content/stable-diffusion-webui/repositories/stable-diffusion-stability-ai/ldm/util.py""")''' /content/stable-diffusion-webui/launch.py
-RUN sed -i -e 's/ start()/ #start()/g' /content/stable-diffusion-webui/launch.py
-RUN cd stable-diffusion-webui && python launch.py --skip-torch-cuda-test
-
-ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/env_patch.py /content/env_patch.py
-RUN sed -i -e '/import image_from_url_text/r /content/env_patch.py' /content/stable-diffusion-webui/modules/ui.py
-ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/header_patch.py /content/header_patch.py
-RUN sed -i -e '/demo:/r /content/header_patch.py' /content/stable-diffusion-webui/modules/ui.py
-
-RUN sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /content/stable-diffusion-webui/modules/ui.py
-RUN sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /content/stable-diffusion-webui/modules/ui.py
-RUN sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /content/stable-diffusion-webui/modules/ui.py
-RUN sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /content/stable-diffusion-webui/modules/ui.py
-RUN sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /content/stable-diffusion-webui/script.js
-RUN sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /content/stable-diffusion-webui/modules/ui.py
-RUN sed -i -e 's/default_enabled=False/default_enabled=True/g' /content/stable-diffusion-webui/webui.py
-RUN sed -i -e 's/ outputs=\[/queue=False, &/g' /content/stable-diffusion-webui/modules/ui.py
-RUN sed -i -e 's/ queue=False, / /g' /content/stable-diffusion-webui/modules/ui.py
-
-ADD --chown=user https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py /content/stable-diffusion-webui/scripts/run_n_times.py
-RUN git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /content/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui
-RUN git clone https://github.com/AlUlkesh/stable-diffusion-webui-images-browser /content/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser
-RUN git clone https://github.com/camenduru/stable-diffusion-webui-huggingface /content/stable-diffusion-webui/extensions/stable-diffusion-webui-huggingface
-RUN git clone -b v2.0 https://github.com/camenduru/sd-civitai-browser /content/stable-diffusion-webui/extensions/sd-civitai-browser
-RUN git clone https://github.com/kohya-ss/sd-webui-additional-networks /content/stable-diffusion-webui/extensions/sd-webui-additional-networks
-RUN git clone https://github.com/Mikubill/sd-webui-controlnet /content/stable-diffusion-webui/extensions/sd-webui-controlnet
-RUN git clone https://github.com/fkunn1326/openpose-editor /content/stable-diffusion-webui/extensions/openpose-editor
-
-RUN rm -rfv /content/stable-diffusion-webui/scripts/
-
-ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/shared-config.json /content/shared-config.json
-ADD --chown=user https://github.com/camenduru/webui-docker/raw/main/shared-ui-config.json /content/shared-ui-config.json
-
-ADD --chown=user https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v4.5-pruned.ckpt
-ADD --chown=user https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.0.vae.pt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v4.5-pruned.vae.pt
-
-EXPOSE 7860
-
-CMD cd /content/stable-diffusion-webui && python webui.py --xformers --listen --disable-console-progressbars --enable-console-prompts --no-progressbar-hiding --ui-config-file /content/shared-ui-config.json --ui-settings-file /content/shared-config.json
diff --git a/spaces/pycoming/bingo/src/components/ui/separator.tsx b/spaces/pycoming/bingo/src/components/ui/separator.tsx
deleted file mode 100644
index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000
--- a/spaces/pycoming/bingo/src/components/ui/separator.tsx
+++ /dev/null
@@ -1,31 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as SeparatorPrimitive from '@radix-ui/react-separator'
-
-import { cn } from '@/lib/utils'
-
-const Separator = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(
- (
- { className, orientation = 'horizontal', decorative = true, ...props },
- ref
- ) => (
-
- )
-)
-Separator.displayName = SeparatorPrimitive.Root.displayName
-
-export { Separator }
diff --git a/spaces/pycui/RealChar/client/web/src/setupTests.js b/spaces/pycui/RealChar/client/web/src/setupTests.js
deleted file mode 100644
index 8f2609b7b3e0e3897ab3bcaad13caf6876e48699..0000000000000000000000000000000000000000
--- a/spaces/pycui/RealChar/client/web/src/setupTests.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// jest-dom adds custom jest matchers for asserting on DOM nodes.
-// allows you to do things like:
-// expect(element).toHaveTextContent(/react/i)
-// learn more: https://github.com/testing-library/jest-dom
-import '@testing-library/jest-dom';
diff --git a/spaces/pyodide-demo/self-hosted/autograd.js b/spaces/pyodide-demo/self-hosted/autograd.js
deleted file mode 100644
index 6821bbaec1cd52035de0d0c2f76d81a262043f5a..0000000000000000000000000000000000000000
--- a/spaces/pyodide-demo/self-hosted/autograd.js
+++ /dev/null
@@ -1 +0,0 @@
-var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="autograd.data";var REMOTE_PACKAGE_BASE="autograd.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","autograd",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/autograd","numpy",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/autograd","scipy",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/autograd/scipy","stats",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/autograd","misc",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","autograd-1.3-py3.9.egg-info",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:73813,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1233,2471,3820,5120,6360,7403,8482,9465,10399,11365,12424,13388,14403,15671,16832,18080,19372,20641,21740,23084,23922,24736,25436,26159,26954,28003,29214,30544,31368,32518,33601,34516,35418,36586,37744,38799,40125,41265,42056,43214,44562,45843,47192,48036,49258,49848,50696,51604,52477,53713,54800,55731,56682,57959,59262,60572,61603,62806,63574,64260,65112,65867,66796,67968,69220,70480,71872,73026],sizes:[1233,1238,1349,1300,1240,1043,1079,983,934,966,1059,964,1015,1268,1161,1248,1292,1269,1099,1344,838,814,700,723,795,1049,1211,1330,824,1150,1083,915,902,1168,1158,1055,1326,1140,791,1158,1348,1281,1349,844,1222,590,848,908,873,1236,1087,931,951,1277,1303,1310,1031,1203,768,686,852,755,929,1172,1252,1260,1392,1154,787],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_autograd.data")}Module["addRunDependency"]("datafile_autograd.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/autograd/differential_operators.py",start:0,end:8392,audio:0},{filename:"/lib/python3.9/site-packages/autograd/core.py",start:8392,end:20627,audio:0},{filename:"/lib/python3.9/site-packages/autograd/builtins.py",start:20627,end:26737,audio:0},{filename:"/lib/python3.9/site-packages/autograd/util.py",start:26737,end:28219,audio:0},{filename:"/lib/python3.9/site-packages/autograd/tracer.py",start:28219,end:32170,audio:0},{filename:"/lib/python3.9/site-packages/autograd/wrap_util.py",start:32170,end:33751,audio:0},{filename:"/lib/python3.9/site-packages/autograd/__init__.py",start:33751,end:34255,audio:0},{filename:"/lib/python3.9/site-packages/autograd/extend.py",start:34255,end:34559,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/linalg.py",start:34559,end:42482,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/numpy_vjps.py",start:42482,end:75051,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/random.py",start:75051,end:75200,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/numpy_vspaces.py",start:75200,end:77204,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/fft.py",start:77204,end:82454,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/numpy_wrapper.py",start:82454,end:87922,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/__init__.py",start:87922,end:88154,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/numpy_boxes.py",start:88154,end:91278,audio:0},{filename:"/lib/python3.9/site-packages/autograd/numpy/numpy_jvps.py",start:91278,end:102047,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/misc.py",start:102047,end:102215,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/linalg.py",start:102215,end:104578,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/special.py",start:104578,end:109713,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/integrate.py",start:109713,end:112580,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/signal.py",start:112580,end:118504,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/__init__.py",start:118504,end:118687,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/chi2.py",start:118687,end:119496,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/norm.py",start:119496,end:122254,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/poisson.py",start:122254,end:122969,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/t.py",start:122969,end:125639,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/beta.py",start:125639,end:126971,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/dirichlet.py",start:126971,end:127743,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/__init__.py",start:127743,end:128134,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/gamma.py",start:128134,end:129121,audio:0},{filename:"/lib/python3.9/site-packages/autograd/scipy/stats/multivariate_normal.py",start:129121,end:131630,audio:0},{filename:"/lib/python3.9/site-packages/autograd/misc/fixed_points.py",start:131630,end:132401,audio:0},{filename:"/lib/python3.9/site-packages/autograd/misc/flatten.py",start:132401,end:133521,audio:0},{filename:"/lib/python3.9/site-packages/autograd/misc/tracers.py",start:133521,end:135721,audio:0},{filename:"/lib/python3.9/site-packages/autograd/misc/optimizers.py",start:135721,end:138478,audio:0},{filename:"/lib/python3.9/site-packages/autograd/misc/__init__.py",start:138478,end:138540,audio:0},{filename:"/lib/python3.9/site-packages/autograd-1.3-py3.9.egg-info/dependency_links.txt",start:138540,end:138541,audio:0},{filename:"/lib/python3.9/site-packages/autograd-1.3-py3.9.egg-info/SOURCES.txt",start:138541,end:139758,audio:0},{filename:"/lib/python3.9/site-packages/autograd-1.3-py3.9.egg-info/PKG-INFO",start:139758,end:140426,audio:0},{filename:"/lib/python3.9/site-packages/autograd-1.3-py3.9.egg-info/requires.txt",start:140426,end:140453,audio:0},{filename:"/lib/python3.9/site-packages/autograd-1.3-py3.9.egg-info/top_level.txt",start:140453,end:140462,audio:0}],remote_package_size:77909,package_uuid:"888acad2-eab5-4cab-ba58-a181920e660c"})})();
\ No newline at end of file
diff --git a/spaces/qinzhu/diy-girlfriend-online/README.md b/spaces/qinzhu/diy-girlfriend-online/README.md
deleted file mode 100644
index 6dd4e72d53e8bfe384933b913e724066117fbe86..0000000000000000000000000000000000000000
--- a/spaces/qinzhu/diy-girlfriend-online/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Moe TTS
-emoji: 😊🎙️
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: qinzhu/diy-girlfriend
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-netsh advfirewall firewall add rule name="diy_chat_girl_web" dir=in action=allow protocol=TCP localport=7860,7870
-netsh advfirewall firewall add rule name="diy_chat_girl_web" dir=out action=allow protocol=TCP localport=7860,7870
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/AutoCAD Architecture 2010 64 Bit Keygen Free.md b/spaces/quidiaMuxgu/Expedit-SAM/AutoCAD Architecture 2010 64 Bit Keygen Free.md
deleted file mode 100644
index ea2beb60483a6be60ba6de7ec5b362fb52bef6a8..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/AutoCAD Architecture 2010 64 Bit Keygen Free.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
How to Download and Activate AutoCAD Architecture 2010 64 bit for Free
-
AutoCAD Architecture 2010 is a software that helps you design and document architectural projects. It has features such as building components, walls, doors, windows, roofs, stairs, and more. You can also create 2D drawings and 3D models with realistic materials and textures.
-
If you want to download and activate AutoCAD Architecture 2010 64 bit for free, you will need a keygen that can generate a valid serial number and product key. A keygen is a program that can create unique codes for software activation. However, using a keygen is illegal and risky, as it may contain viruses or malware that can harm your computer or data.
Therefore, we do not recommend using a keygen to activate AutoCAD Architecture 2010 64 bit. Instead, you should buy a legitimate license from Autodesk or use their free trial version for 30 days. You can also use their educational version if you are a student or teacher.
-
Here are the steps to download and activate AutoCAD Architecture 2010 64 bit legally:
Enter your payment details and confirm your order.
-
You will receive an email with your serial number and product key.
-
Enter them in the software activation window and click on "Activate".
-
-
Congratulations! You have successfully downloaded and activated AutoCAD Architecture 2010 64 bit.
-
-
AutoCAD Architecture 2010 64 bit has many advantages over the 32 bit version. It can handle larger and more complex projects, as it can use more memory and processing power. It can also work faster and smoother, as it can access more data at once. It can also support higher resolution displays and graphics cards.
-
However, AutoCAD Architecture 2010 64 bit also has some requirements and limitations. You need to have a 64 bit operating system and a 64 bit processor to run it. You also need to have enough disk space and RAM to install and run it. You may also encounter some compatibility issues with some third-party applications or plugins that are not designed for 64 bit.
-
Therefore, before you download and activate AutoCAD Architecture 2010 64 bit, you should check your system specifications and compatibility. You can do this by using the System Requirements Tool on the Autodesk website. You can also read the FAQ and the Readme file for more information and troubleshooting tips.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/English Subtitles The Wolf Of Wall Street 1080p.md b/spaces/quidiaMuxgu/Expedit-SAM/English Subtitles The Wolf Of Wall Street 1080p.md
deleted file mode 100644
index 9796fdab378a7c0c2eece27f37037c71ac5cfff7..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/English Subtitles The Wolf Of Wall Street 1080p.md
+++ /dev/null
@@ -1,54 +0,0 @@
-## english subtitles the wolf of wall street 1080p
-
-
-
-
-
- 
-
-
-
-
-
-**LINK ->>->>->> [https://jinyurl.com/2txsMZ](https://jinyurl.com/2txsMZ)**
-
-
-
-
-
-
-
-
-
-
-
- Here is a possible title and article with HTML formatting for the keyword "english subtitles the wolf of wall street 1080p":
-
-# How to Watch The Wolf of Wall Street with English Subtitles
-
-
-
-The Wolf of Wall Street is a 2013 American biographical black comedy crime film directed by Martin Scorsese and starring Leonardo DiCaprio as Jordan Belfort, a New York stockbroker who engages in rampant corruption and fraud on Wall Street in the 1990s. The film is based on Belfort's memoir of the same name and was nominated for five Academy Awards, including Best Picture, Best Director, Best Actor, Best Adapted Screenplay and Best Supporting Actor for Jonah Hill.
-
-
-
-If you want to watch The Wolf of Wall Street with English subtitles, you have several options. Here are some of them:
-
-
-
-- **Download subtitles from online sources.** There are many websites that offer subtitles for various movies and TV shows in different languages. Some of the popular ones are [YIFY Subtitles](https://yifysubtitles.ch/movie-imdb/tt0993846)[^1^], [OpenSubtitles](https://www.opensubtitles.com/en/subtitles/5582943-the-wolf-of-wall-street-2013-720p-bluray-x264-yify)[^2^] and [SUBDL](https://subdl.com/s/subtitle/sd19528/the-wolf-of-wall-street)[^3^]. You can search for The Wolf of Wall Street and choose the subtitle file that matches your video quality (1080p) and format (BluRay, BRRip, etc.). You can then download the subtitle file and save it in the same folder as your video file. Make sure that the subtitle file has the same name as the video file, except for the extension (.srt, .sub, etc.). For example, if your video file is named The.Wolf.of.Wall.Street.2013.1080p.BluRay.x264.YIFY.mp4, your subtitle file should be named The.Wolf.of.Wall.Street.2013.1080p.BluRay.x264.YIFY.srt.
-
-- **Use a media player that supports subtitles.** Most media players have the option to load subtitles from an external file or from an online source. Some of the popular ones are [VLC Media Player](https://www.videolan.org/vlc/index.html), [Media Player Classic - Home Cinema](https://mpc-hc.org/) and [PotPlayer](https://potplayer.daum.net/). You can open your video file with one of these media players and then select the subtitle option from the menu or right-click on the screen. You can then browse for the subtitle file that you downloaded or search for it online. You can also adjust the subtitle settings such as font size, color, position and synchronization.
-
-- **Stream the movie online with subtitles.** There are some streaming platforms that offer movies with subtitles in different languages. Some of the popular ones are [Netflix](https://www.netflix.com/), [Amazon Prime Video](https://www.amazon.com/Prime-Video/b?ie=UTF8&node=2676882011) and [Hulu](https://www.hulu.com/). You can check if The Wolf of Wall Street is available on any of these platforms in your region and then choose the subtitle option from the menu or settings. You can also change the subtitle language if you prefer.
-
-
-
-We hope this article helped you find a way to watch The Wolf of Wall Street with English subtitles. Enjoy the movie!
-
- dfd1c89656
-
-
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Himmatwar 2 Hindi Dubbed Movies.md b/spaces/quidiaMuxgu/Expedit-SAM/Himmatwar 2 Hindi Dubbed Movies.md
deleted file mode 100644
index 9c282be0c3d10d76abf32e20d4c1233b4573561f..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Himmatwar 2 Hindi Dubbed Movies.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
How to Crack IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual for Free
-
If you are looking for a way to crack IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual for free, you have come to the right place. In this article, I will show you how to download, install and activate this powerful driver updater software without paying a dime.
-
CRACK IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual
IObit Driver Booster 6 PRO is a software that can help you update your drivers easily and quickly. It can scan your PC for outdated, faulty, missing and game-ready drivers and download and install them with one click. It can also backup your drivers for safe restore and optimize your PC performance and gaming experience.
-
IObit Driver Booster 6 PRO has a large database of over 2.5 million drivers from Intel, Nvidia, AMD and other major brands. It also supports Windows 11 and more new hardware devices. It has a simple and easy interface that makes it user-friendly and convenient.
-
However, IObit Driver Booster 6 PRO is not a free software. It costs $22.95 for a one-year license for up to three PCs. If you want to use it for free, you need to crack it with a keygen or a patch.
-
How to Crack IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual
-
To crack IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual, you need to follow these steps:
-
-
Download the setup file of IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual from the official website or from any of these links: [^1^] [^2^] [^3^] [^4^].
-
Run the setup file and install the software on your PC.
-
Do not launch the software after installation.
-
Download the keygen or patch file from any of these links: [^1^] [^2^] [^4^].
-
Extract the keygen or patch file and run it as administrator.
-
Click on the "Generate" button to generate a serial key or click on the "Patch" button to apply the patch.
-
Copy the serial key or patch file and paste it into the registration window of IObit Driver Booster 6 PRO.
-
Click on the "Activate" button to activate the software.
-
Enjoy IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual for free.
-
-
Conclusion
-
In this article, I have shown you how to crack IObit Driver Booster 6 PRO (v6.0.2.691) Multilingual for free with a keygen or a patch. This method is easy and effective, but it may not be legal or safe. You may face some risks such as malware infection, system instability, license violation or legal issues.
-
-
Therefore, I recommend you to buy the official license of IObit Driver Booster 6 PRO from the official website or from any of these links: [^1^] [^2^] [^3^]. This way, you can support the developers, enjoy the full features and benefits of the software, and avoid any potential problems.
-
I hope this article has been helpful for you. If you have any questions or suggestions, please leave a comment below.
-
-
Benefits of IObit Driver Booster 6 PRO
-
IObit Driver Booster 6 PRO is not only a driver updater, but also a driver optimizer. It has many benefits that can make your PC run faster, smoother and safer. Here are some of the benefits of IObit Driver Booster 6 PRO:
-
-
It can scan and update over 8.5 million drivers and game components with one click[^1^] [^2^]. It can also detect and update rare and outdated drivers that are hard to find online.
-
It can automatically backup your drivers before updating them and restore them if anything goes wrong. It can also create a system restore point for extra safety[^2^].
-
It can fix common device errors such as no sound, network failure, device malfunction, etc. with one click[^2^]. It can also resolve Windows issues such as blue screen of death, black screen, etc.
-
It can boost your gaming performance by updating game-ready drivers and game components. It can also optimize your system resources and enhance your gaming experience with the improved Game Boost feature[^1^] [^3^].
-
It can update your drivers offline without internet connection. This is useful when you need to reinstall or upgrade your system or when you have network issues[^1^].
-
It can update your drivers automatically in the background while your system is idle. This saves you time and avoids interrupting your work or play[^2^].
-
It can provide you with free 24/7 technical support on demand if you encounter any problems with the software or the drivers[^2^].
-
-
How to Get IObit Driver Booster 6 PRO for Free
-
If you want to try IObit Driver Booster 6 PRO for free, you can download the trial version from the official website or from any of these links: [^1^] [^2^] [^3^] [^4^] [^5^]. The trial version allows you to scan and update up to 3 drivers per day for 15 days. You can also enjoy some of the features of the PRO version such as offline driver update, driver backup and restore, etc.
-
If you want to get IObit Driver Booster 6 PRO for free without any limitations, you can use a giveaway license key or a crack tool. However, these methods are not recommended as they may be illegal, unsafe or unreliable. You may face some risks such as malware infection, system instability, license violation or legal issues.
-
The best way to get IObit Driver Booster 6 PRO for free is to participate in the official promotion or giveaway events. You can follow the official website or social media accounts of IObit to get the latest news and offers. You may also find some coupons or discounts from some third-party websites or platforms.
7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Cabelas Big Game Hunter Pro Hunts Crack Only Free Download and Installation Guide.md b/spaces/raedeXanto/academic-chatgpt-beta/Cabelas Big Game Hunter Pro Hunts Crack Only Free Download and Installation Guide.md
deleted file mode 100644
index 06c9c3a6b6412e1e829e7a73c351da062ac75ee5..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Cabelas Big Game Hunter Pro Hunts Crack Only Free Download and Installation Guide.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
Cabelas Big Game Hunter Pro Hunts Crack Only: How to Download and Play the Game for Free
-
If you are a fan of hunting games, you might have heard of Cabelas Big Game Hunter Pro Hunts, a realistic and immersive simulation that lets you hunt various animals across North America. The game features stunning graphics, realistic physics, diverse environments, and expert guidance from four renowned hunters. However, if you don't want to pay for the game or deal with its DRM restrictions, you might be looking for a way to play it with a crack. In this article, we will show you how to download and install Cabelas Big Game Hunter Pro Hunts crack only from a reliable source, and how to troubleshoot some common problems that might occur when using it. But before we do that, let's take a look at what this game is all about and why you might want to play it with a crack.
-
What is Cabelas Big Game Hunter Pro Hunts and why you might want to play it with a crack
-
Cabelas Big Game Hunter Pro Hunts is an action-packed hunting game that was released in 2014 by Activision. The game allows you to hunt various trophy animals in four different regions of North America: Southeast, Northeast, Southwest, and Northwest. You can choose from four different weapons: rifle, shotgun, bow, or handgun. You can also customize your gear, such as scopes, ammo, calls, decoys, etc. The game features realistic ballistics, animal AI, weather effects, and environmental sounds. You can also learn from four professional hunters who will give you tips and advice on how to track, scout, and target your prey.
Playing Cabelas Big Game Hunter Pro Hunts with a crack has some advantages over playing it legally. For one thing, you can save money by not buying the game or paying for any additional DLCs or updates. For another thing, you can bypass the DRM restrictions that might prevent you from playing the game offline or on multiple devices. You can also avoid any potential bugs or glitches that might occur in the official version of the game.
-
However, playing Cabelas Big Game Hunter Pro Hunts with a crack also has some disadvantages. For one thing, you will miss out on any updates or patches that might improve the game's performance or fix any issues. For another thing, you will not be able to access any online features or multiplayer modes that might enhance your gaming experience. You will also risk getting infected by malware or viruses that might be hidden in the crack file. Moreover, you will be violating the game's copyright and terms of service, which could result in legal actions or penalties. And last but not least, you will be depriving the developers of their hard work and deserved revenue.
-
The risks and legal issues of using a cracked game
-
As we mentioned above, using a cracked game is not without risks and legal issues. Here are some of them:
-
-
Malware and viruses: One of the biggest dangers of downloading a crack from an untrusted source is that it might contain malware or viruses that could harm your computer or steal your personal information. Some cracks might also contain adware or spyware that could display unwanted ads or monitor your online activity. To avoid this risk, you should always scan any file you download with an antivirus software before opening it.
-
Legal consequences: Another risk of using a cracked game is that you might face legal consequences for violating the game's copyright and terms of service. According to Activision's End User License Agreement (EULA), "You may not copy (except as expressly permitted by this license), decompile ... reverse engineer ... disassemble ... modify ... create derivative works based upon ... distribute ... sublicense ... rent ... lease ... lend ... transfer ... publicly display ... publicly perform ... transmit ... publish ... commercially exploit ... or otherwise use any portion of this Software except as expressly authorized by Activision." If you do any of these things without Activision's permission, you could be sued for damages or face criminal charges.
-
Ethical implications: Finally, using a cracked game also has ethical implications for depriving the developers of their hard work and deserved revenue. Developing a game like Cabelas Big Game Hunter Pro Hunts takes a lot of time 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Crack Ram Elements 13 1000 Days Why You Should Try the Cracked Version of Ram Elements.md b/spaces/raedeXanto/academic-chatgpt-beta/Crack Ram Elements 13 1000 Days Why You Should Try the Cracked Version of Ram Elements.md
deleted file mode 100644
index e65729189aba6568e718372d06b00be09001cbbb..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Crack Ram Elements 13 1000 Days Why You Should Try the Cracked Version of Ram Elements.md
+++ /dev/null
@@ -1,118 +0,0 @@
-
-
How to Crack RAM Elements 13 for 1000 Days
-
Are you looking for a way to crack RAM Elements 13 for 1000 days? If so, you are not alone. RAM Elements 13 is a powerful software for structural design and analysis that can help you create complex models, perform various types of analysis, and design different kinds of materials. However, it is also a costly software that requires a license to use. If you don't want to pay for a license or renew it every year, you might be tempted to crack RAM Elements 13 and use it for free.
-
But how can you crack RAM Elements 13 for 1000 days? Is it easy or hard? Is it safe or risky? Is it legal or illegal? In this article, we will answer these questions and show you how to crack RAM Elements 13 for 1000 days step by step. We will also give you some tips on how to use RAM Elements 13 for structural design and analysis effectively.
Before we get into how to crack RAM Elements 13 for 1000 days, let's first understand what RAM Elements 13 is and what it can do. RAM Elements 13 is a software developed by Bentley Systems that provides unequaled flexibility for the design and analysis of different types of structures containing linear members and shell elements. The types of analysis available are:
-
-
First order (Linear Analysis)
-
Second order (P-Delta Analysis)
-
Dynamic (Seismic Analysis)
-
-
In addition, RAM Elements 13 is also capable of designing hot-rolled or cold-formed steel members, wood (sawn lumber and glulam) and reinforced concrete members using various codes such as AISC, BS, AISI, AS, CSA, NDS, ACI and BS8110. The program also includes special modules for designing spread footings, combined footings, reinforced concrete columns, retaining walls, concrete walls, tilt-up walls, masonry walls, continuous beams and trusses.
-
Why do you need to crack RAM Elements 13?
-
RAM Elements 13 is a software that can help you with your structural design and analysis projects. It can save you time and money by allowing you to create models quickly, perform analysis accurately, and design materials efficiently. It can also help you present your results clearly with reports and graphics.
-
How to crack ram elements 13 for 1000 days free trial
-Ram elements 13 crack download with 1000 days license
-Crack ram elements 13 software and use it for 1000 days
-Ram elements 13 cracked version with 1000 days activation
-Crack ram elements 13 structural analysis and design software for 1000 days
-Ram elements 13 crack tutorial for 1000 days usage
-Crack ram elements 13 full version with 1000 days keygen
-Ram elements 13 crack patch with 1000 days serial number
-Crack ram elements 13 engineering software and get 1000 days access
-Ram elements 13 crack file with 1000 days registration code
-Crack ram elements 13 finite element analysis software for 1000 days
-Ram elements 13 crack online with 1000 days subscription
-Crack ram elements 13 professional software and enjoy it for 1000 days
-Ram elements 13 crack tool with 1000 days product key
-Crack ram elements 13 advanced software and use it for free for 1000 days
-Ram elements 13 crack generator with 1000 days activation code
-Crack ram elements 13 latest version with 1000 days license key
-Ram elements 13 crack installer with 1000 days validation code
-Crack ram elements 13 premium software and get unlimited features for 1000 days
-Ram elements 13 crack setup with 1000 days verification code
-Crack ram elements v13.00.03.12 and use it for free for a long time (1000 days)
-Ram elements v13.00.03.12 crack exe with a long-term license (1000 days)
-Crack ram elements v13.00.03.12 update and get extended trial period (1000 days)
-Ram elements v13.00.03.12 crack zip with a long-lasting activation (1000 days)
-Crack ram elements v13.00.03.12 release and enjoy it for a long duration (1000 days)
-Ram elements v13.00.03.12 crack rar with a long-term validation (1000 days)
-Crack Bentley ram elements v13 and use it for free for a long time (1000 days)
-Bentley ram elements v13 crack exe with a long-term license (1000 days)
-Crack Bentley ram elements v13 update and get extended trial period (1000 days)
-Bentley ram elements v13 crack zip with a long-lasting activation (1000 days)
-Crack Bentley ram elements v13 release and enjoy it for a long duration (1000 days)
-Bentley ram elements v13 crack rar with a long-term validation (1000 days)
-Crack Bentley Systems ram elements v13 and use it for free for a long time (1000 days)
-Bentley Systems ram elements v13 crack exe with a long-term license (1000 days)
-Crack Bentley Systems ram elements v13 update and get extended trial period (1000 days)
-Bentley Systems ram elements v13 crack zip with a long-lasting activation (1000 days)
-Crack Bentley Systems ram elements v13 release and enjoy it for a long duration (1000 days)
-Bentley Systems ram elements v13 crack rar with a long-term validation (1000 days)
-How to get cracked version of ram elements v13 for free for a long time (1000 days)
-Where to download cracked version of ram elements v13 with a long-term license (1000 days)
-How to install cracked version of ram elements v13 and get extended trial period (1000 days)
-Where to find cracked version of ram elements v13 with a long-lasting activation (1000 days)
-How to use cracked version of ram elements v13 and enjoy it for a long duration (1000 days)
-Where to get cracked version of ram elements v13 with a long-term validation (1000 days)
-How to activate cracked version of ram elements v13 for free for a long time (1000 days)
-Where to buy cracked version of ram elements v13 with a cheap price and a long-term license (1000 days)
-How to update cracked version of ram elements v13 and get more features for free for a long time (1000 days)
-Where to find the best cracked version of ram elements v13 with the latest updates and a long-lasting activation (1000 days)
-How to fix cracked version of ram elements v13 if it stops working or expires before the end of the trial period (1000 days)
-Where to get help or support for cracked version of ram elements v13 if you encounter any problems or errors during the usage or installation process
-
However, RAM Elements 13 is not a free software. It requires a license to use. A license can cost you hundreds or thousands of dollars depending on the type of license you choose. A license also expires after a certain period of time (usually one year) and needs to be renewed regularly.
-
If you don't want to pay for a license or renew it every year, you might want to crack RAM Elements 13 and use it for free. Cracking RAM Elements 13 means bypassing its license verification system and making it think that you have a valid license when you don't. By cracking RAM Elements 13 for 1000 days, you can use it without paying or renewing anything for almost three years.
-
What are the risks of cracking RAM Elements 13?
-the installation folder of RAM Elements 13. The installation folder is usually located in C:\Program Files (x86)\Bentley\Engineering\RAM Elements V8i (SELECTseries 4). You will need to replace the original file "IEGLicLib.dll" with the crack file that you downloaded. To do this, you will need to have administrator privileges on your computer.
-
Here are the steps to copy and paste the crack file into the installation folder:
-
-
Locate the crack file that you downloaded and right-click on it. Select "Copy" from the menu.
-
Go to the installation folder of RAM Elements 13 and find the file "IEGLicLib.dll". Right-click on it and select "Rename" from the menu. Change its name to something else, such as "IEGLicLib.bak". This will create a backup of the original file in case something goes wrong.
-
Right-click on an empty space in the installation folder and select "Paste" from the menu. This will paste the crack file into the folder.
-
Right-click on the crack file and select "Rename" from the menu. Change its name to "IEGLicLib.dll". This will make RAM Elements 13 recognize it as the license file.
-
-
Congratulations, you have successfully copied and pasted the crack file into the installation folder of RAM Elements 13.
-
Step 4: Run RAM Elements 13 and enjoy its features for 1000 days
-
The final step is to run RAM Elements 13 and enjoy its features for 1000 days. To do this, you will need to double-click on the shortcut icon of RAM Elements 13 on your desktop or start menu. This will launch the software and open its main window.
-
You will notice that RAM Elements 13 will not ask you for a license key or verification anymore. It will also show you that you have a trial license for 1000 days in the bottom right corner of the window. This means that you have successfully cracked RAM Elements 13 for 1000 days.
-
You can now use RAM Elements 13 for structural design and analysis without any limitations or restrictions. You can create complex models, perform various types of analysis, and design different kinds of materials. You can also use the special modules for foundations, walls, beams, and trusses. You can also generate reports and graphics to present your results clearly.
-
Enjoy using RAM Elements 13 for 1000 days!
-
How to use RAM Elements 13 for structural design and analysis?
-
Now that you have cracked RAM Elements 13 for 1000 days, you might be wondering how to use it for structural design and analysis. In this section, we will give you an overview of RAM Elements 13 features and some tips and tricks for using it efficiently.
-
Overview of RAM Elements 13 features
-
RAM Elements 13 is a software that provides unequaled flexibility for the design and analysis of different types of structures containing linear members and shell elements. It has many features that can help you with your structural design and analysis projects. Some of these features are:
-
Linear and nonlinear analysis
-
RAM Elements 13 can perform linear and nonlinear analysis of structures using various methods such as first order, second order, or dynamic analysis. You can choose the type of analysis that suits your needs and objectives. You can also define different types of loads such as dead, live, wind, seismic, temperature, or user-defined loads. You can also apply different types of boundary conditions such as supports, restraints, springs, or releases. You can also perform modal analysis to find the natural frequencies and mode shapes of your structure.
-
Design of steel, wood, concrete, and masonry members
-rectangles, circles, or user-defined sections. You can also design connections such as bolts, welds, or plates. You can also check the strength, stability, and serviceability of your members using various criteria such as stress, deflection, buckling, or cracking.
-
Special modules for foundations, walls, beams, and trusses
-
RAM Elements 13 also includes special modules for designing foundations, walls, beams, and trusses. These modules allow you to create and design these elements quickly and easily. You can choose from different types of foundations such as spread footings, combined footings, or pile caps. You can also choose from different types of walls such as reinforced concrete walls, tilt-up walls, masonry walls, or retaining walls. You can also choose from different types of beams such as continuous beams or trusses. You can also design these elements using various codes and criteria.
-
Tips and tricks for using RAM Elements 13 efficiently
-
RAM Elements 13 is a software that can help you with your structural design and analysis projects. However, to use it efficiently and effectively, you need to know some tips and tricks that can save you time and effort. Here are some of them:
-
Use templates and wizards to create models quickly
-
RAM Elements 13 provides templates and wizards that can help you create models quickly and easily. You can use these templates and wizards to define the geometry, properties, loads, and boundary conditions of your structure. You can also modify these templates and wizards to suit your needs and preferences. You can also save your own templates and wizards for future use.
-
Use parametric design tools to optimize your design
-
RAM Elements 13 provides parametric design tools that can help you optimize your design. You can use these tools to change the parameters of your structure such as dimensions, sections, materials, loads, or boundary conditions. You can also use these tools to perform sensitivity analysis or optimization analysis to find the best solution for your structure.
-
Use reports and graphics to present your results clearly
-
RAM Elements 13 provides reports and graphics that can help you present your results clearly and professionally. You can use these reports and graphics to display the input data, output data, analysis results, design results, or summary of your structure. You can also customize these reports and graphics to suit your needs and preferences. You can also export these reports and graphics to various formats such as PDF, Excel, Word, or DXF.
-
Conclusion
-
In this article, we have shown you how to crack RAM Elements 13 for 1000 days step by step. We have also given you an overview of RAM Elements 13 features and some tips and tricks for using it efficiently. We hope that this article has been helpful and informative for you.
-unethical, and unfair. It could cause you legal, security, performance, and ethical problems. It could also harm your computer or data. It could also disrespect the work and effort of the developer and the other users. Therefore, we do not recommend or endorse cracking RAM Elements 13 for 1000 days. We suggest that you use RAM Elements 13 legally and responsibly by paying for a license or renewing it regularly.
-
If you have any questions or comments about this article, please feel free to contact us. We would love to hear from you.
-
FAQs
-
Here are some frequently asked questions about cracking RAM Elements 13 for 1000 days:
-
Q: Is cracking RAM Elements 13 for 1000 days easy or hard?
-
A: Cracking RAM Elements 13 for 1000 days is not very hard if you follow the steps that we have shown you in this article. However, it is not very easy either. You will need to download and install RAM Elements 13 from the official website, download and install the crack file from a reliable source, copy and paste the crack file into the installation folder, and run RAM Elements 13. You will also need to have administrator privileges on your computer and an antivirus program to scan the crack file.
-
Q: Is cracking RAM Elements 13 for 1000 days safe or risky?
-
A: Cracking RAM Elements 13 for 1000 days is very risky. It could expose you to legal, security, performance, and ethical problems. You could face fines or lawsuits for violating the terms and conditions of the software and infringing the intellectual property rights of the developer. You could also get malware or viruses that could harm your computer or steal your data. You could also experience errors, crashes, or glitches in the software that could affect your work or results. You could also disrespect the work and effort of the developer and the other users who paid for the license and followed the rules.
-
Q: Is cracking RAM Elements 13 for 1000 days legal or illegal?
-
A: Cracking RAM Elements 13 for 1000 days is illegal. It violates the terms and conditions of the software and infringes the intellectual property rights of the developer. The developer has the right to protect their software from unauthorized use and distribution. The developer also has the right to charge a fee for using their software and providing updates, patches, or bug fixes. By cracking RAM Elements 13 for 1000 days, you are breaking the law and stealing from the developer.
-
Q: Is cracking RAM Elements 13 for 1000 days ethical or unethical?
-the developer who spent time and money to create the software. It is also unfair to the other users who paid for the license and followed the rules. By cracking RAM Elements 13 for 1000 days, you are disrespecting the work and effort of the developer and the other users. You are also taking advantage of their generosity and trust. You are also depriving them of the revenue that they deserve and need to maintain and improve the software.
-
Q: Is cracking RAM Elements 13 for 1000 days worth it or not?
-
A: Cracking RAM Elements 13 for 1000 days is not worth it. It could cause you more trouble than it's worth. It could expose you to legal, security, performance, and ethical problems that could outweigh the benefits of using the software for free. It could also harm your computer or data, affect your work or results, and disrespect the developer and the other users. It could also damage your reputation and credibility as a professional or a student. Therefore, we do not recommend or endorse cracking RAM Elements 13 for 1000 days. We suggest that you use RAM Elements 13 legally and responsibly by paying for a license or renewing it regularly.
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Crystal ICR Software Crack 89 The Ultimate Solution for Image to Text Conversion.md b/spaces/raedeXanto/academic-chatgpt-beta/Crystal ICR Software Crack 89 The Ultimate Solution for Image to Text Conversion.md
deleted file mode 100644
index 3c6892bfd13a0c4f2a20fac9c70a135afc7967c1..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Crystal ICR Software Crack 89 The Ultimate Solution for Image to Text Conversion.md
+++ /dev/null
@@ -1,154 +0,0 @@
-
-
Crystal ICR Software Crack 89: What You Need to Know
-
If you are looking for a way to convert images to text, you may have come across Crystal ICR software. This is a powerful and versatile software that can recognize text from any image format, such as JPG, PNG, BMP, TIFF, PDF, etc. It can also handle handwritten text, cursive text, low-quality text, and multiple languages. It can save you time and effort by converting images to editable text files, such as DOC, RTF, TXT, HTML, etc.
-
However, Crystal ICR software is not free. It requires a license key to activate and use. Some people may try to find a cracked version of Crystal ICR software online, hoping to get it for free or at a lower price. But is this a good idea? What are the risks and drawbacks of using a cracked version of Crystal ICR software? And how can you get a genuine version of Crystal ICR software without breaking the law or compromising your quality? In this article, we will answer these questions and more.
Crystal ICR software is based on intelligent character recognition (ICR) technology, which is an advanced form of optical character recognition (OCR) technology. OCR technology can scan printed text from images and convert it to digital text. However, OCR technology has some limitations when it comes to handwritten text, cursive text, low-quality text, or multiple languages. That's where ICR technology comes in.
-
ICR technology can analyze the shape, size, style, and context of each character in an image and compare it with a database of possible characters. It can then identify the most likely match and convert it to digital text. This way, it can handle more complex and diverse types of text than OCR technology. It can also learn from its mistakes and improve its accuracy over time.
-
Crystal ICR software uses ICR technology to provide fast and accurate image to text conversion. It has several features and benefits that make it stand out from other similar software. Some of these features are:
-
-
It can process multiple images at once in batch mode.
-
It can automatically rotate, crop, resize, enhance, and clean up images before conversion.
-
It can recognize text from any image format, such as JPG, PNG, BMP, TIFF, PDF, etc.
-
It can handle handwritten text, cursive text, low-quality text, and multiple languages.
-
It can convert images to editable text files, such as DOC, RTF, TXT, HTML, etc.
-
It can preserve the original layout, formatting, fonts, colors, and images of the source document.
-
It can export the converted text files to various applications or platforms, such as Microsoft Word, Excel, PowerPoint, Google Docs, Drive, Sheets, Slides, etc.
-
It has a user-friendly interface that is easy to use and customize.
-
-
The Risks and Drawbacks of Using a Cracked Version of Crystal ICR Software
-
While it may be tempting to use a cracked version of Crystal ICR software instead of paying for a license key, you should be aware of the potential risks and drawbacks of doing so. Using a cracked version of Crystal ICR software can expose you to legal, ethical, and technical issues that can harm you or your work. Here are some of the common problems that may arise when using a cracked version of Crystal ICR software:
-
Legal Issues
-
Using a cracked version of Crystal ICR software is illegal. It violates the copyright and licensing terms of the software developer, which state that you must purchase a license key to activate and use the software. By using a cracked version, you are essentially stealing the intellectual property of the developer and depriving them of their rightful revenue. This is not only unfair, but also punishable by law. You may face legal action, such as fines, penalties, or even jail time, if you are caught using or distributing a cracked version of Crystal ICR software.
-
Ethical Issues
-
Using a cracked version of Crystal ICR software is unethical. It shows disrespect for the hard work and creativity of the software developer, who spent years developing and improving the software. It also shows disregard for the quality and integrity of your own work, as you are relying on an unreliable and untrustworthy source of information. By using a cracked version, you are not only hurting the developer, but also yourself and your reputation. You may lose credibility, trust, or respect from your clients, colleagues, or peers, if they find out that you are using or producing low-quality or inaccurate work because of using a cracked version of Crystal ICR software.
-
Technical Issues
-
Using a cracked version of Crystal ICR software is risky. It may contain viruses, malware, spyware, or other harmful programs that can infect your computer or device. It may also have bugs, errors, or glitches that can cause crashes, freezes, or data loss. It may not be compatible with your operating system or hardware specifications. It may not be updated with the latest features or security patches. It may not have customer support or technical assistance available in case you encounter any problems or issues. By using a cracked version, you are putting your computer or device at risk of damage or malfunction. You are also compromising the quality and accuracy of your output by using an outdated or defective version of Crystal ICR software.
-
How to Get a Genuine Version of Crystal ICR Software
-
The best way to avoid all these risks and drawbacks is to get a genuine version of Crystal ICR software from the official website or authorized distributors. This way, you can enjoy all the features and benefits of the software without breaking the law or compromising your quality. Here are some steps on how to get a genuine version of Crystal ICR software:
-
crystal icr software free download with crack 89
-how to install crystal icr software crack 89 on windows 10
-crystal icr software crack 89 license key generator
-crystal icr software crack 89 full version for mac
-crystal icr software crack 89 tutorial pdf
-crystal icr software crack 89 review and ratings
-crystal icr software crack 89 alternative and comparison
-crystal icr software crack 89 features and benefits
-crystal icr software crack 89 system requirements and compatibility
-crystal icr software crack 89 customer support and feedback
-crystal icr software crack 89 discount code and coupon
-crystal icr software crack 89 online demo and trial
-crystal icr software crack 89 update and patch
-crystal icr software crack 89 error and bug fix
-crystal icr software crack 89 refund policy and guarantee
-crystal icr software crack 89 pros and cons
-crystal icr software crack 89 testimonials and case studies
-crystal icr software crack 89 best practices and tips
-crystal icr software crack 89 faq and help center
-crystal icr software crack 89 video and audio converter
-crystal icr software crack 89 image and text recognition
-crystal icr software crack 89 document and pdf conversion
-crystal icr software crack 89 data extraction and analysis
-crystal icr software crack 89 automation and workflow
-crystal icr software crack 89 integration and api
-crystal icr software crack 89 security and privacy
-crystal icr software crack 89 performance and speed
-crystal icr software crack 89 quality and accuracy
-crystal icr software crack 89 customization and flexibility
-crystal icr software crack 89 scalability and reliability
-crystal icr software crack 89 pricing and plans
-crystal icr software crack 89 payment methods and options
-crystal icr software crack 89 delivery and installation
-crystal icr software crack 89 activation and registration
-crystal icr software crack 89 backup and restore
-crystal icr software crack 89 uninstallation and removal
-crystal icr software crack 89 troubleshooting and solutions
-crystal icr software crack 89 feedback form and survey
-crystal icr software crack 89 affiliate program and commission
-crystal icr software crack 89 reseller program and partnership
-crystal icr software crack 89 referral program and rewards
-crystal icr software crack 89 contest and giveaway
-crystal icr software crack 89 webinar and training
-crystal icr software crack 89 ebook and guide
-crystal icr software crack 89 blog and newsletter
-crystal icr software crack 89 forum and community
-crystal icr software crack 89 social media and contact us
-crystal icr software crack 89 legal terms and conditions
-
Pricing and Plans
-
The first step is to choose a pricing plan that suits your needs and budget. Crystal ICR software offers different pricing plans depending on how many pages you want to convert per month. The basic plan costs $29 per month and allows you to convert up to 500 pages per month. The standard plan costs $49 per month and allows you to convert up to 1000 pages per month. and allows you to convert up to 2500 pages per month. The enterprise plan costs $199 per month and allows you to convert up to 5000 pages per month. You can also request a custom plan if you need more pages or features. You can pay by credit card, PayPal, or bank transfer. You can also cancel or change your plan at any time.
-
Installation and Activation
-
The second step is to download and install Crystal ICR software on your computer or device. You can download the software from the official website or from the email confirmation that you will receive after purchasing a license key. The software is compatible with Windows XP, Vista, 7, 8, 10, and Mac OS X. The installation process is simple and straightforward. You just need to follow the instructions on the screen and enter your license key when prompted. The license key will activate your software and allow you to use it for as long as your subscription is valid.
-
Customer Support and Updates
-
The third step is to enjoy the customer support and updates available for Crystal ICR software users. You can access the customer support by email, phone, or live chat. The customer support team is available 24/7 and can help you with any questions or issues that you may have regarding the software. You can also access the updates by checking the official website or the software itself. The updates will provide you with the latest features, improvements, and security patches for Crystal ICR software. You can also request new features or give feedback to the developer through the customer support or the official website.
-
Alternatives to Crystal ICR Software
-
If you are not satisfied with Crystal ICR software or if you want to try some other image to text conversion software, you have some alternatives to choose from. There are many other software that can perform similar functions as Crystal ICR software, but with different features, prices, and quality. Some of these alternatives are:
-
Free Online Tools
-
If you don't want to download or install anything on your computer or device, you can use some free online tools that can convert images to text without any hassle. Some of these tools are:
-
-
Online OCR: This is a free online tool that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document.
-
Free OCR: This is another free online tool that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document.
-
OCR Space: This is a free online tool that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document.
-
-
Paid Desktop Software
-
If you want more features and customization options for your image to text conversion, you can use some paid desktop software that can convert images to text with more accuracy and efficiency. Some of these software are:
-
-
ABBYY FineReader: This is a paid desktop software that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document. It has advanced features such as editing, annotation, collaboration, comparison, etc.
-
Nuance Power PDF: This is another paid desktop software that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document. It has advanced features such as editing, annotation, collaboration, comparison, etc.
-
Wondershare PDFelement: This is another paid desktop software that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document. It has advanced features such as editing, annotation, collaboration, comparison, etc.
-
-
Mobile Apps
-
If you want to convert images to text on the go with ease and convenience, you can use some mobile apps that can convert images to text with your smartphone or tablet camera. Some of these apps are:
-
-
Text Scanner [OCR]: This is a free mobile app that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document. It has features such as editing, sharing, copying, translating, etc.
-
Scanner App: Scan PDF Document: This is another free mobile app that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document. It has features such as editing, sharing, copying, translating, etc.
-
Microsoft Office Lens - PDF Scanner: This is another free mobile app that can convert images to text in various languages and formats. It can handle scanned documents, PDF files, screenshots, photos, etc. It can also preserve the layout and formatting of the original document. It has features such as editing, sharing, copying, translating, etc.
-
-
Conclusion
-
In conclusion, Crystal ICR software is a powerful and versatile software that can convert images to text with high accuracy and efficiency. However, using a cracked version of Crystal ICR software is illegal, unethical, and risky. It may expose you to legal action, moral dilemmas, and technical problems. The best way to avoid these issues is to get a genuine version of Crystal ICR software from the official website or authorized distributors. This way, you can enjoy all the features and benefits of the software without breaking the law or compromising your quality. You can also try some alternatives to Crystal ICR software if you want more options or variety for your image to text conversion needs.
-
FAQs
-
Here are some frequently asked questions about Crystal ICR software:
-
-
What is Crystal ICR software?
-
Crystal ICR software is a software that can convert images to text using intelligent character recognition (ICR) technology.
-
What are the benefits of using Crystal ICR software?
-
Some of the benefits of using Crystal ICR software are:
-
-
It can process multiple images at once in batch mode.
-
It can automatically rotate,crop,resize,enlarge,and clean up images before conversion.
-
It can recognize text from any image format,such as JPG,PNG,BMP,TIFF,PDF,etc.
-
It can handle handwritten text,cursive text,low-quality text,and multiple languages.
-
It can convert images to editable text files,such as DOC,RTF,TXT,HTML,etc.
-
It can preserve the original layout/formatting/fonts/colors/images of the source document.
-
It has a user-friendly interface that is easy to use and customize.
-
-
What are the risks of using a cracked version of Crystal ICR software?
-
Some of the risks of using a cracked version of Crystal ICR software are:
-
-
It may contain viruses,malware,spyware,and other harmful programs that may infect your computer/device.
-
It may have bugs/errors/glitches that may cause crashes/freezes/data loss.
-
It may not be compatible with your operating system/hardware specifications.
-
It may not be updated with latest features/security patches.
-support/technical assistance available in case you encounter any problems/issues.
-
It may violate the copyright/licensing terms of the software developer and expose you to legal action.
-
It may show disrespect for the hard work/creativity of the software developer and deprive them of their rightful revenue.
-
It may show disregard for the quality/integrity of your own work and affect your credibility/trust/respect.
-
-
How to get a genuine version of Crystal ICR software?
-
To get a genuine version of Crystal ICR software, you need to:
-
-
Choose a pricing plan that suits your needs and budget from the official website or authorized distributors.
-
Download and install Crystal ICR software on your computer or device from the official website or the email confirmation that you will receive after purchasing a license key.
-
Enter your license key when prompted to activate and use the software for as long as your subscription is valid.
-
Enjoy the customer support and updates available for Crystal ICR software users.
-
-
What are some alternatives to Crystal ICR software?
-
Some alternatives to Crystal ICR software are:
-
-
Free online tools that can convert images to text without downloading or installing anything,such as Online OCR,Free OCR,and OCR Space.
-
Paid desktop software that can convert images to text with more features and customization options,such as ABBYY FineReader,Nuance Power PDF,and Wondershare PDFelement.
-
Mobile apps that can convert images to text on the go with ease and convenience,such as Text Scanner [OCR],Scanner App: Scan PDF Document,and Microsoft Office Lens - PDF Scanner.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Gadwin Screen Recorder 3.4 Keygen A Complete Guide to Using the Software.md b/spaces/raedeXanto/academic-chatgpt-beta/Gadwin Screen Recorder 3.4 Keygen A Complete Guide to Using the Software.md
deleted file mode 100644
index a220a1a831ef9f44525954da2a41515d4c0fb197..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Gadwin Screen Recorder 3.4 Keygen A Complete Guide to Using the Software.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
- - Record the entire screen or any selected portion - Zoom into any area of the recording - Auto-Pan feature - Mouse highlighting - Record video and audio simultaneously | | H2: How to download and install Gadwin Screen Recorder 3.4 | - Step 1: Visit the official website of Gadwin Systems - Step 2: Choose the version that suits your system - Step 3: Click on the download button and save the file - Step 4: Run the setup file and follow the instructions - Step 5: Launch the program and enjoy | | H2: How to use Gadwin Screen Recorder 3.4 keygen | - Step 1: Download the keygen from a reliable source - Step 2: Extract the zip file and run the keygen.exe file - Step 3: Select Gadwin Screen Recorder from the list of products - Step 4: Click on the generate button and copy the serial key - Step 5: Paste the serial key into the registration window of Gadwin Screen Recorder and activate it | | H2: Tips and tricks for using Gadwin Screen Recorder effectively | - Tip 1: Adjust the settings according to your preferences - Tip 2: Use keyboard shortcuts to start, pause, resume, and stop recording - Tip 3: Use the editing tools to trim, crop, rotate, add annotations, and more - Tip 4: Save your recordings in different formats and quality levels - Tip 5: Share your recordings on YouTube, Facebook, Twitter, or other platforms | | H2: Conclusion | - Summarize the main points of the article - Emphasize the benefits of Gadwin Screen Recorder and its keygen - Call to action: encourage readers to try it out for themselves | | H2: FAQs | - Q1: Is Gadwin Screen Recorder safe to use? - Q2: How much does Gadwin Screen Recorder cost? - Q3: What are the system requirements for Gadwin Screen Recorder? - Q4: How can I contact Gadwin Systems for support? - Q5: What are some alternatives to Gadwin Screen Recorder? | **Table 2: Article with HTML formatting**
Gadwin Screen Recorder 3.4 keygen: A Comprehensive Guide
-
If you are looking for a simple yet powerful tool to record your screen activities, you might want to check out Gadwin Screen Recorder. This software allows you to capture cursor movements, menu selections, pop-up windows, layered windows, typing, sounds, and everything else you see on your screen. You can also edit your recordings with various tools and save them in different formats.
-
However, if you want to enjoy all the features and benefits of Gadwin Screen Recorder, you will need a valid serial key to activate it. Otherwise, you will be limited by a trial period and a watermark on your videos. That's why many people are searching for a way to get a free keygen for Gadwin Screen Recorder.
In this article, we will show you everything you need to know about Gadwin Screen Recorder 3.4 keygen. We will explain what it is, how it works, where to get it, and how to use it. We will also give you some tips and tricks for using Gadwin Screen Recorder effectively. By the end of this article, you will be able to record your screen like a pro with Gadwin Screen Recorder.
-
Features and benefits of Gadwin Screen Recorder
-
Gadwin Screen Recorder is a versatile and easy-to-use software that can help you create movies quickly and easily with automatic recording functions, mouse tracking, and more. Here are some of the features and benefits of Gadwin Screen Recorder:
-
Gadwin Screen Recorder 3.4 crack download
-How to get Gadwin Screen Recorder 3.4 for free
-Gadwin Screen Recorder 3.4 serial number generator
-Gadwin Screen Recorder 3.4 activation code
-Gadwin Screen Recorder 3.4 license key
-Gadwin Screen Recorder 3.4 full version
-Gadwin Screen Recorder 3.4 patch
-Gadwin Screen Recorder 3.4 torrent
-Gadwin Screen Recorder 3.4 registration key
-Gadwin Screen Recorder 3.4 product key
-Gadwin Screen Recorder 3.4 hacked version
-Gadwin Screen Recorder 3.4 keygen online
-Gadwin Screen Recorder 3.4 crack free download
-Gadwin Screen Recorder 3.4 serial key
-Gadwin Screen Recorder 3.4 unlock code
-Gadwin Screen Recorder 3.4 crack file
-Gadwin Screen Recorder 3.4 keygen software
-Gadwin Screen Recorder 3.4 crack serial number
-Gadwin Screen Recorder 3.4 keygen download link
-Gadwin Screen Recorder 3.4 crack activation code
-Gadwin Screen Recorder 3.4 keygen no survey
-Gadwin Screen Recorder 3.4 crack license key
-Gadwin Screen Recorder 3.4 keygen full version
-Gadwin Screen Recorder 3.4 crack patch
-Gadwin Screen Recorder 3.4 keygen torrent
-Gadwin Screen Recorder 3.4 crack registration key
-Gadwin Screen Recorder 3.4 keygen product key
-Gadwin Screen Recorder 3.4 crack hacked version
-Gadwin Screen Recorder 3.4 keygen online generator
-Gadwin Screen Recorder 3.4 crack serial key generator
-Gadwin Screen Recorder 3.4 keygen unlock code generator
-Gadwin Screen Recorder 3.4 crack file download
-Gadwin Screen Recorder 3.4 keygen software download
-Gadwin Screen Recorder 3.4 crack serial number generator online
-Gadwin Screen Recorder 3.4 keygen activation code generator online
-Gadwin Screen Recorder 3.4 crack license key generator online
-Gadwin Screen Recorder 3.4 keygen full version download link
-Gadwin Screen Recorder 3.4 crack patch download link
-Gadwin Screen Recorder 3.4 keygen torrent download link
-Gadwin Screen Recorder 3.4 crack registration key generator online
-Gadwin Screen Recorder 3.4 keygen product key generator online
-Gadwin Screen Recorder 3.4 crack hacked version download link
-How to install and use Gadwin Screen Recorder 3.4 keygen
-How to activate and register Gadwin Screen Recorder 3.4 with crack
-How to fix errors and bugs in Gadwin Screen Recorder 3.4 with patch
-How to uninstall and remove Gadwin Screen Recorder 3.4 with serial number
-How to update and upgrade Gadwin Screen Recorder 3.4 with license key
-How to backup and restore data in Gadwin Screen Recorder 3.4 with product key
-How to customize and optimize settings in Gadwin Screen Recorder 3.4 with activation code
-How to record and edit videos in Gadwin Screen Recorder 3.4 with registration key
-
-
Capture video directly to your hard drive: You don't need to worry about running out of disk space or losing your recordings due to power failure or system crash. Gadwin Screen Recorder saves your videos directly to your hard drive as you record.
-
Record the entire screen or any selected portion: You can choose to record your full screen or select a specific area of your screen with a resizable window. You can also record multiple monitors at once.
-
Zoom into any area of the recording: You can zoom in or out of any area of your recording to better show the details and focus the viewer's attention. You can also pan around the zoomed area with your mouse.
-
Auto-Pan feature: This feature automatically moves the recording window along with your cursor so that you don't have to adjust it manually.
-
Mouse highlighting: This feature spotlights the location of your cursor with a colored circle or a custom image. You can also add click sounds and visual effects to emphasize your mouse actions.
-
Record video and audio simultaneously: You can record audio from your microphone and speakers at the same time as your video. You can also adjust the volume levels and sync them with your video.
-
-
How to download and install Gadwin Screen Recorder 3.4
-
To download and install Gadwin Screen Recorder 3.4 on your computer, follow these steps:
-
-
Visit the official website of Gadwin Systems: Go to https://www.gadwin.com/download/ and find Gadwin ScreenRecorder in the list of products.
-
Choose the version that suits your system: Depending on whether you have a 32-bit or a 64-bit system, click on either "Download (32-bit)" or "Download (64-bit)" button.
-
Click on the download button and save the file: A new window will open with a download link. Click on it and save the file to your preferred location.
-
Run the setup file and follow the instructions: Locate the downloaded file and double-click on it to run it. Follow the instructions on the screen to complete the installation process.
-
Launch the program and enjoy: After installing Gadwin ScreenRecorder, you can launch it from your desktop or start menu. You will see a small icon in your system tray that indicates that Gadwin ScreenRecorder is running.
-
-
How to use Gadwin Screen Recorder 3.4 keygen
-
To use Gadwin Screen Recorder 3.4 keygen to activate your software, follow these steps:
-
-
Download the keygen from a reliable source: There are many websites that claim to offer free keygens for various software products, but not all of them are trustworthy. Some may contain viruses or malware that can harm your computer or steal your personal information. Therefore, be careful when choosing where to download your keygen from. One possible source is https://filecr.com/windows/gadwin-screenrecorder/ which provides a direct download link for Gadwin ScreenRecorder 4.5.0 Free Download along with a keygen.
-
Extract the zip file and run the keygen.exe file: After downloading the zip file from FileCR.com, extract it using any archive software such as WinRAR or 7-Zip. You will find two files inside: gadwinscreenrecorder450.exe (the setup file) and keygen.exe (the keygen file). Run the keygen.exe file by double-clicking on it.
-
Select Gadwin Screen Recorder from the list of products: A small window will open with a drop-down menu that lists various products from Gadwin Systems. Select "Gadwin ScreenRecorder" from this menu.
-
Click on the generate button and copy the serial key: After selecting "Gadwin ScreenRecorder", click on "Generate" button below it. A random serial key will be generated for you. Copy this serial key by clicking on "Copy" button next to it.
-
Paste the serial key into the registration window of Gadwin ScreenRecorder and activate it: Launch GadwinScreenRecorder from your desktop or start menu. You will see a registration window that asks you to enter your name and serial number. Paste the serial key that you copied from the serial number field. Enter your name or any name you want into the name field. Click on "Register" button to activate your software.
-
-
Tips and tricks for using Gadwin Screen Recorder effectively
-
Now that you have activated your Gadwin Screen Recorder with a keygen, you can start using it to record your screen activities. Here are some tips and tricks for using Gadwin Screen Recorder effectively:
-
-
Adjust the settings according to your preferences: Before you start recording, you can adjust the settings of Gadwin Screen Recorder to suit your needs. You can access the settings window by right-clicking on the icon in the system tray and choosing "Settings". You can change the hotkeys, video format, audio source, frame rate, quality, output folder, and more.
-
Use keyboard shortcuts to start, pause, resume, and stop recording: You don't have to use the mouse to control your recording. You can use keyboard shortcuts to start, pause, resume, and stop recording. The default hotkeys are F9 to start or resume recording, F10 to pause recording, and F11 to stop recording. You can change these hotkeys in the settings window.
-
Use the editing tools to trim, crop, rotate, add annotations, and more: After you finish recording, you can use the editing tools of Gadwin Screen Recorder to enhance your video. You can access the editing window by right-clicking on the icon in the system tray and choosing "Edit". You can trim, crop, rotate, add annotations, apply filters, adjust brightness and contrast, and more.
-
Save your recordings in different formats and quality levels: You can save your recordings in different formats and quality levels depending on your purpose. You can choose from AVI, MP4, WMV, MOV, FLV, MKV, and more. You can also choose from low, medium, high, or custom quality levels. You can change these options in the settings window.
-
Share your recordings on YouTube, Facebook, Twitter, or other platforms: You can easily share your recordings on YouTube, Facebook, Twitter, or other platforms with Gadwin Screen Recorder. You can access the sharing window by right-clicking on the icon in the system tray and choosing "Share". You can choose from various options such as uploading directly to YouTube or Facebook, sending via email or FTP, burning to DVD or CD, or copying to clipboard.
-
-
Conclusion
-
Gadwin Screen Recorder is a simple yet powerful tool that can help you record your screen activities with ease. It has many features and benefits that make it a great choice for anyone who wants to create movies quickly and easily. However, if you want to enjoy all the features and benefits of Gadwin Screen Recorder without paying for it, you will need a keygen to activate it.
-
In this article, we showed you how to use Gadwin Screen Recorder 3.4 keygen to get a free serial key for your software. We also gave you some tips and tricks for using Gadwin Screen Recorder effectively. We hope that this article was helpful and informative for you.
-
If you are interested in trying out Gadwin Screen Recorder for yourself, you can download it from https://www.gadwin.com/download/ and use the keygen from https://filecr.com/windows/gadwin-screenrecorder/ to activate it. You will be amazed by how easy and fun it is to record your screen with Gadwin Screen Recorder.
-
Thank you for reading this article. If you have any questions or comments about Gadwin Screen Recorder or its keygen, feel free to leave them below. We would love to hear from you.
-
FAQs
-
Here are some frequently asked questions about Gadwin Screen Recorder and its keygen:
-
Q1: Is Gadwin Screen Recorder safe to use?
-
A1: Gadwin Screen Recorder is safe to use as long as you download it from its official website https://www.gadwin.com/download/. However, the keygen that we used in this article is not from the official source and may contain risks such as viruses or malware. Therefore, we recommend that you scan the keygen file with a reliable antivirus software before using it. We also advise you to use the keygen at your own risk and discretion.
-
Q2: How much does Gadwin Screen Recorder cost?
-
A2: Gadwin Screen Recorder costs $24.95 for a single-user license. You can purchase it from https://www.gadwin.com/purchase/. However, if you use the keygen that we used in this article, you can get a free serial key for your software and activate it without paying anything.
-
Q3: What are the system requirements for Gadwin Screen Recorder?
-
A3: The system requirements for Gadwin Screen Recorder are as follows:
-
-
Operating system: Windows 7/8/10 (32-bit or 64-bit)
-
Processor: Pentium IV or higher
-
RAM: 1 GB RAM (2 GB recommended)
-
Hard disk space: 200 MB or more
-
Sound card: compatible with Windows
-
-
Q4: How can I contact Gadwin Systems for support?
-
A4: You can contact Gadwin Systems for support by visiting their website https://www.gadwin.com/support/ and filling out a support request form. You can also email them at support@gadwin.com or call them at +1 (703) 759-0100.
-
Q5: What are some alternatives to Gadwin Screen Recorder?
-
A5: Some alternatives to Gadwin Screen Recorder are:
-
-
OBS Studio: A free and open-source software that can record and stream video and audio from multiple sources.
-
Camtasia: A professional software that can record and edit screen videos with various effects and transitions.
-
Bandicam: A lightweight software that can record high-quality screen videos with minimal impact on performance.
-
Screencast-O-Matic: A web-based software that can record and share screen videos online with ease.
-
Snagit: A versatile software that can capture screenshots and screen videos with annotations and editing tools.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/rajesh1729/text-summarization-gradio/app.py b/spaces/rajesh1729/text-summarization-gradio/app.py
deleted file mode 100644
index 386fe9e1a1051469d758cde209964db5344375d4..0000000000000000000000000000000000000000
--- a/spaces/rajesh1729/text-summarization-gradio/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-title = 'Text Summarization'
-text_ = "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."
-interface = gr.Interface.load("huggingface/facebook/bart-large-cnn",
-title = title,
-theme = "peach",
-examples = [[text_]]).launch()
\ No newline at end of file
diff --git a/spaces/ramki123/testing/README.md b/spaces/ramki123/testing/README.md
deleted file mode 100644
index 213a770cd7d5828a98d4754e1cf01ade16f1b474..0000000000000000000000000000000000000000
--- a/spaces/ramki123/testing/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Testing
-emoji: 🦀
-colorFrom: indigo
-colorTo: red
-sdk: gradio
-sdk_version: 3.44.4
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Histologia-De-Teresa-Fortoul-Pdf-26.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Histologia-De-Teresa-Fortoul-Pdf-26.md
deleted file mode 100644
index bed360ef45c1d664169fb9d5e1719545dbadb62f..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Histologia-De-Teresa-Fortoul-Pdf-26.md
+++ /dev/null
@@ -1,59 +0,0 @@
-## Histologia De Teresa Fortoul Pdf 26
-
-
-
- 
-
-
-
-**DOWNLOAD 🌟 [https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2twEFB&sa=D&sntz=1&usg=AOvVaw2-X0AQLTaCR5AwwXrGBPoW](https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2twEFB&sa=D&sntz=1&usg=AOvVaw2-X0AQLTaCR5AwwXrGBPoW)**
-
-
-
-# HistologÃa y biologÃa celular: A Review of Teresa Fortoul's Book
-
-
-
-HistologÃa y biologÃa celular is a book written by Teresa I. Fortoul van der Goes, a professor of histology and cell biology at the National Autonomous University of Mexico. The book, published by McGraw-Hill Interamericana de España S.L. in 2017, is aimed at medical students and professionals who want to learn about the structure and function of cells and tissues in the human body.
-
-
-
-The book consists of 18 chapters that cover topics such as microscopy techniques, cytology, blood, lymphatic system, cardiovascular system, respiratory system, skin, digestive system, urinary system, reproductive system, endocrine system, ear, and eye. Each chapter includes clear and concise explanations, diagrams, photographs, tables, and clinical cases that illustrate the relevance of histology and cell biology for medicine.
-
-
-
-The book also features online resources such as animations, videos, quizzes, and flashcards that complement the printed material and enhance the learning experience. The book is available in both Spanish and English versions.
-
-
-
-HistologÃa y biologÃa celular is a comprehensive and updated text that provides a solid foundation for understanding the microscopic anatomy and physiology of the human body. It is a valuable resource for anyone interested in histology and cell biology.
-
-
-
-In this article, we will review some of the main concepts and topics covered in HistologÃa y biologÃa celular by Teresa Fortoul. We will also highlight some of the features and benefits of the book for students and teachers of histology and cell biology.
-
-
-
-## Microscopy Techniques
-
-
-
-The first chapter of the book introduces the different types of microscopy techniques that are used to study cells and tissues. These include light microscopy, electron microscopy, fluorescence microscopy, confocal microscopy, and scanning probe microscopy. The chapter explains the principles, advantages, limitations, and applications of each technique. It also provides examples of images obtained with each technique and how they can be interpreted.
-
-
-
-## Cytology
-
-
-
-The second chapter of the book focuses on cytology, the study of the structure and function of cells. The chapter describes the main components of the cell, such as the plasma membrane, cytoplasm, organelles, nucleus, and cytoskeleton. It also explains how cells communicate with each other and with their environment through signal transduction pathways, cell junctions, and extracellular matrix. The chapter also discusses the cell cycle, cell division, apoptosis, and stem cells.
-
-
-
-## Blood
-
-
-
-The third chapter of the book deals with blood, one of the most important fluids in the body. The chapter explains the composition, functions, and properties of blood. It also describes the different types of blood cells, such as erythrocytes, leukocytes, and platelets. It also covers the processes of hematopoiesis, hemostasis, coagulation, and blood groups.
-
- 1b8d091108
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Toad For Oracle 11.5 Keygen !FREE!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Toad For Oracle 11.5 Keygen !FREE!.md
deleted file mode 100644
index 2b15d28413a404b74520beb971c65824399262ab..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Toad For Oracle 11.5 Keygen !FREE!.md
+++ /dev/null
@@ -1,90 +0,0 @@
-## toad for oracle 11.5 keygen
-
-
-
-
-
- 
-
-
-
-
-
-**CLICK HERE ::: [https://soawresotni.blogspot.com/?d=2tyfT9](https://soawresotni.blogspot.com/?d=2tyfT9)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# What is Toad for Oracle 11.5 and Why You Should Use It
-
-
-
-Toad for Oracle 11.5 is a software tool that helps you work with Oracle databases more efficiently and productively. Whether you are a developer, a DBA, or an analyst, Toad for Oracle 11.5 can help you automate tasks, optimize SQL, compare schemas and data, and access online resources and community insights.
-
-
-
-In this article, we will highlight some of the key features and benefits of Toad for Oracle 11.5 and how it can make your life easier as a database professional.
-
-
-
-## Automation
-
-
-
-One of the main advantages of Toad for Oracle 11.5 is that it allows you to automate routine or repetitive tasks, saving you time and reducing human error. You can use Toad to create scripts, macros, workflows, and schedules that can run automatically or on demand. You can also use Toad to generate documentation, reports, code snippets, and test cases based on your database objects and code.
-
-
-
-## SQL Optimization
-
-
-
-Another benefit of Toad for Oracle 11.5 is that it helps you write and tune SQL code that runs faster and more efficiently in the database. Toad's patented SQL Optimization engine can rewrite your SQL statements to find alternative versions that perform better. You can also use Toad to analyze execution plans, identify bottlenecks, trace sessions, monitor performance metrics, and perform what-if analysis.
-
-
-
-## Schema/Data Compare
-
-
-
-Toad for Oracle 11.5 also enables you to compare and synchronize schemas and data across different databases or environments. You can use Toad to compare objects, properties, dependencies, privileges, data types, and data values. You can also use Toad to generate scripts or sync actions that can apply the changes to the target database.
-
-
-
-## Essential DB Admin Features
-
-
-
-If you are a DBA, Toad for Oracle 11.5 provides you with essential features that help you manage your Oracle databases more effectively. You can use Toad to perform tasks such as backup and recovery, space management, security management, user administration, session management, RMAN integration, ASM support, and more.
-
-
-
-## Social Intelligence
-
-
-
-A unique feature of Toad for Oracle 11.5 is that it integrates with online resources and community platforms that can enhance your knowledge and skills as a database professional. You can use Toad to access Project Lucy, an online analytical tool that lets you upload, analyze, and compare your performance metrics with other Toad users. You can also use Toad to access Toad World, an online resource for education, expertise, and collaboration with other database professionals.
-
-
-
-## Conclusion
-
-
-
-Toad for Oracle 11.5 is a powerful and comprehensive tool that can help you work with Oracle databases more efficiently and productively. It offers features such as automation, SQL optimization, schema/data compare, essential DB admin features, and social intelligence. If you want to try Toad for Oracle 11.5 for free for 30 days, you can download it from Quest Software's website[^1^].
-
- dfd1c89656
-
-
-
-
-
diff --git a/spaces/reha/Stick_Tech/inference_main.py b/spaces/reha/Stick_Tech/inference_main.py
deleted file mode 100644
index 825e791db86d37e955f42e8cb34323dbb248ed32..0000000000000000000000000000000000000000
--- a/spaces/reha/Stick_Tech/inference_main.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import io
-import logging
-import time
-from pathlib import Path
-
-import librosa
-import numpy as np
-import soundfile
-
-from inference import infer_tool
-from inference import slicer
-from inference.infer_tool import Svc
-
-logging.getLogger('numba').setLevel(logging.WARNING)
-chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
-
-model_path = "logs/48k/G_174000-Copy1.pth"
-config_path = "configs/config.json"
-svc_model = Svc(model_path, config_path)
-infer_tool.mkdir(["raw", "results"])
-
-# 支持多个wav文件,放在raw文件夹下
-clean_names = ["君の知らない物語-src"]
-trans = [-5] # 音高调整,支持正负(半音)
-spk_list = ['yunhao'] # 每次同时合成多语者音色
-slice_db = -40 # 默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50
-wav_format = 'flac' # 音频输出格式
-
-infer_tool.fill_a_to_b(trans, clean_names)
-for clean_name, tran in zip(clean_names, trans):
- raw_audio_path = f"raw/{clean_name}"
- if "." not in raw_audio_path:
- raw_audio_path += ".wav"
- infer_tool.format_wav(raw_audio_path)
- wav_path = Path(raw_audio_path).with_suffix('.wav')
- audio, sr = librosa.load(wav_path, mono=True, sr=None)
- wav_hash = infer_tool.get_md5(audio)
- if wav_hash in chunks_dict.keys():
- print("load chunks from temp")
- chunks = chunks_dict[wav_hash]["chunks"]
- else:
- chunks = slicer.cut(wav_path, db_thresh=slice_db)
- print(chunks)
- chunks_dict[wav_hash] = {"chunks": chunks, "time": int(time.time())}
- infer_tool.write_temp("inference/chunks_temp.json", chunks_dict)
- audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
-
- for spk in spk_list:
- audio = []
- for (slice_tag, data) in audio_data:
- print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
- length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample))
- raw_path = io.BytesIO()
- soundfile.write(raw_path, data, audio_sr, format="wav")
- raw_path.seek(0)
- if slice_tag:
- print('jump empty segment')
- _audio = np.zeros(length)
- else:
- out_audio, out_sr = svc_model.infer(spk, tran, raw_path)
- _audio = out_audio.cpu().numpy()
- audio.extend(list(_audio))
-
- res_path = f'./results/{clean_name}_{tran}key_{spk}.{wav_format}'
- soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format)
diff --git a/spaces/rgres/Seg2Sat/app.py b/spaces/rgres/Seg2Sat/app.py
deleted file mode 100644
index cca64144d7143c917c31903f628ca133a3d96f68..0000000000000000000000000000000000000000
--- a/spaces/rgres/Seg2Sat/app.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-from flask import Flask, request
-import requests
-from gradio_client import Client
-import base64
-
-
-base_gradio_url = os.getenv('URL_GRADIO', 'http://localhost:7860')
-client = None
-
-app = Flask(__name__, static_url_path='/static')
-
-@app.route('/')
-def index():
- return app.send_static_file('index.html')
-
-def save_base64_image(base64Image):
- image_data = base64.b64decode(base64Image)
- path = "input_image.jpg"
- with open(path, 'wb') as f:
- f.write(image_data)
- return path
-
-def encode_image_to_base64(filepath):
- with open(filepath, "rb") as image_file:
- encoded_image = base64.b64encode(image_file.read()).decode("utf-8")
- return encoded_image
-
-@app.route('/predict', methods=['POST'])
-def predict():
- data = request.get_json()
-
- base64Image = data['data'][0]
- prompt = data['data'][1]
- steps = data['data'][2]
- seed = data['data'][3]
-
- global client
- if not client:
- client = Client(base_gradio_url)
-
-
- b64meta, b64_data = base64Image.split(',')
-
- image_path = save_base64_image(b64_data)
-
- result = client.predict(
- image_path, prompt, steps, seed, fn_index=0
- )
-
-
- return b64meta + ',' + encode_image_to_base64(result)
-
-
-if __name__ == '__main__':
- app.run(host='0.0.0.0', port=int(
- os.environ.get('PORT', 8000)), debug=True)
diff --git a/spaces/rishi9440/remove-photo-background/src/models/modnet.py b/spaces/rishi9440/remove-photo-background/src/models/modnet.py
deleted file mode 100644
index 9e268e70ea1afd9ce40e27cc8a8ec9df4500ac82..0000000000000000000000000000000000000000
--- a/spaces/rishi9440/remove-photo-background/src/models/modnet.py
+++ /dev/null
@@ -1,255 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .backbones import SUPPORTED_BACKBONES
-
-
-#------------------------------------------------------------------------------
-# MODNet Basic Modules
-#------------------------------------------------------------------------------
-
-class IBNorm(nn.Module):
- """ Combine Instance Norm and Batch Norm into One Layer
- """
-
- def __init__(self, in_channels):
- super(IBNorm, self).__init__()
- in_channels = in_channels
- self.bnorm_channels = int(in_channels / 2)
- self.inorm_channels = in_channels - self.bnorm_channels
-
- self.bnorm = nn.BatchNorm2d(self.bnorm_channels, affine=True)
- self.inorm = nn.InstanceNorm2d(self.inorm_channels, affine=False)
-
- def forward(self, x):
- bn_x = self.bnorm(x[:, :self.bnorm_channels, ...].contiguous())
- in_x = self.inorm(x[:, self.bnorm_channels:, ...].contiguous())
-
- return torch.cat((bn_x, in_x), 1)
-
-
-class Conv2dIBNormRelu(nn.Module):
- """ Convolution + IBNorm + ReLu
- """
-
- def __init__(self, in_channels, out_channels, kernel_size,
- stride=1, padding=0, dilation=1, groups=1, bias=True,
- with_ibn=True, with_relu=True):
- super(Conv2dIBNormRelu, self).__init__()
-
- layers = [
- nn.Conv2d(in_channels, out_channels, kernel_size,
- stride=stride, padding=padding, dilation=dilation,
- groups=groups, bias=bias)
- ]
-
- if with_ibn:
- layers.append(IBNorm(out_channels))
- if with_relu:
- layers.append(nn.ReLU(inplace=True))
-
- self.layers = nn.Sequential(*layers)
-
- def forward(self, x):
- return self.layers(x)
-
-
-class SEBlock(nn.Module):
- """ SE Block Proposed in https://arxiv.org/pdf/1709.01507.pdf
- """
-
- def __init__(self, in_channels, out_channels, reduction=1):
- super(SEBlock, self).__init__()
- self.pool = nn.AdaptiveAvgPool2d(1)
- self.fc = nn.Sequential(
- nn.Linear(in_channels, int(in_channels // reduction), bias=False),
- nn.ReLU(inplace=True),
- nn.Linear(int(in_channels // reduction), out_channels, bias=False),
- nn.Sigmoid()
- )
-
- def forward(self, x):
- b, c, _, _ = x.size()
- w = self.pool(x).view(b, c)
- w = self.fc(w).view(b, c, 1, 1)
-
- return x * w.expand_as(x)
-
-
-#------------------------------------------------------------------------------
-# MODNet Branches
-#------------------------------------------------------------------------------
-
-class LRBranch(nn.Module):
- """ Low Resolution Branch of MODNet
- """
-
- def __init__(self, backbone):
- super(LRBranch, self).__init__()
-
- enc_channels = backbone.enc_channels
-
- self.backbone = backbone
- self.se_block = SEBlock(enc_channels[4], enc_channels[4], reduction=4)
- self.conv_lr16x = Conv2dIBNormRelu(enc_channels[4], enc_channels[3], 5, stride=1, padding=2)
- self.conv_lr8x = Conv2dIBNormRelu(enc_channels[3], enc_channels[2], 5, stride=1, padding=2)
- self.conv_lr = Conv2dIBNormRelu(enc_channels[2], 1, kernel_size=3, stride=2, padding=1, with_ibn=False, with_relu=False)
-
- def forward(self, img, inference):
- enc_features = self.backbone.forward(img)
- enc2x, enc4x, enc32x = enc_features[0], enc_features[1], enc_features[4]
-
- enc32x = self.se_block(enc32x)
- lr16x = F.interpolate(enc32x, scale_factor=2, mode='bilinear', align_corners=False)
- lr16x = self.conv_lr16x(lr16x)
- lr8x = F.interpolate(lr16x, scale_factor=2, mode='bilinear', align_corners=False)
- lr8x = self.conv_lr8x(lr8x)
-
- pred_semantic = None
- if not inference:
- lr = self.conv_lr(lr8x)
- pred_semantic = torch.sigmoid(lr)
-
- return pred_semantic, lr8x, [enc2x, enc4x]
-
-
-class HRBranch(nn.Module):
- """ High Resolution Branch of MODNet
- """
-
- def __init__(self, hr_channels, enc_channels):
- super(HRBranch, self).__init__()
-
- self.tohr_enc2x = Conv2dIBNormRelu(enc_channels[0], hr_channels, 1, stride=1, padding=0)
- self.conv_enc2x = Conv2dIBNormRelu(hr_channels + 3, hr_channels, 3, stride=2, padding=1)
-
- self.tohr_enc4x = Conv2dIBNormRelu(enc_channels[1], hr_channels, 1, stride=1, padding=0)
- self.conv_enc4x = Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1)
-
- self.conv_hr4x = nn.Sequential(
- Conv2dIBNormRelu(3 * hr_channels + 3, 2 * hr_channels, 3, stride=1, padding=1),
- Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
- Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride=1, padding=1),
- )
-
- self.conv_hr2x = nn.Sequential(
- Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
- Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride=1, padding=1),
- Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1),
- Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1),
- )
-
- self.conv_hr = nn.Sequential(
- Conv2dIBNormRelu(hr_channels + 3, hr_channels, 3, stride=1, padding=1),
- Conv2dIBNormRelu(hr_channels, 1, kernel_size=1, stride=1, padding=0, with_ibn=False, with_relu=False),
- )
-
- def forward(self, img, enc2x, enc4x, lr8x, inference):
- img2x = F.interpolate(img, scale_factor=1/2, mode='bilinear', align_corners=False)
- img4x = F.interpolate(img, scale_factor=1/4, mode='bilinear', align_corners=False)
-
- enc2x = self.tohr_enc2x(enc2x)
- hr4x = self.conv_enc2x(torch.cat((img2x, enc2x), dim=1))
-
- enc4x = self.tohr_enc4x(enc4x)
- hr4x = self.conv_enc4x(torch.cat((hr4x, enc4x), dim=1))
-
- lr4x = F.interpolate(lr8x, scale_factor=2, mode='bilinear', align_corners=False)
- hr4x = self.conv_hr4x(torch.cat((hr4x, lr4x, img4x), dim=1))
-
- hr2x = F.interpolate(hr4x, scale_factor=2, mode='bilinear', align_corners=False)
- hr2x = self.conv_hr2x(torch.cat((hr2x, enc2x), dim=1))
-
- pred_detail = None
- if not inference:
- hr = F.interpolate(hr2x, scale_factor=2, mode='bilinear', align_corners=False)
- hr = self.conv_hr(torch.cat((hr, img), dim=1))
- pred_detail = torch.sigmoid(hr)
-
- return pred_detail, hr2x
-
-
-class FusionBranch(nn.Module):
- """ Fusion Branch of MODNet
- """
-
- def __init__(self, hr_channels, enc_channels):
- super(FusionBranch, self).__init__()
- self.conv_lr4x = Conv2dIBNormRelu(enc_channels[2], hr_channels, 5, stride=1, padding=2)
-
- self.conv_f2x = Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride=1, padding=1)
- self.conv_f = nn.Sequential(
- Conv2dIBNormRelu(hr_channels + 3, int(hr_channels / 2), 3, stride=1, padding=1),
- Conv2dIBNormRelu(int(hr_channels / 2), 1, 1, stride=1, padding=0, with_ibn=False, with_relu=False),
- )
-
- def forward(self, img, lr8x, hr2x):
- lr4x = F.interpolate(lr8x, scale_factor=2, mode='bilinear', align_corners=False)
- lr4x = self.conv_lr4x(lr4x)
- lr2x = F.interpolate(lr4x, scale_factor=2, mode='bilinear', align_corners=False)
-
- f2x = self.conv_f2x(torch.cat((lr2x, hr2x), dim=1))
- f = F.interpolate(f2x, scale_factor=2, mode='bilinear', align_corners=False)
- f = self.conv_f(torch.cat((f, img), dim=1))
- pred_matte = torch.sigmoid(f)
-
- return pred_matte
-
-
-#------------------------------------------------------------------------------
-# MODNet
-#------------------------------------------------------------------------------
-
-class MODNet(nn.Module):
- """ Architecture of MODNet
- """
-
- def __init__(self, in_channels=3, hr_channels=32, backbone_arch='mobilenetv2', backbone_pretrained=True):
- super(MODNet, self).__init__()
-
- self.in_channels = in_channels
- self.hr_channels = hr_channels
- self.backbone_arch = backbone_arch
- self.backbone_pretrained = backbone_pretrained
-
- self.backbone = SUPPORTED_BACKBONES[self.backbone_arch](self.in_channels)
-
- self.lr_branch = LRBranch(self.backbone)
- self.hr_branch = HRBranch(self.hr_channels, self.backbone.enc_channels)
- self.f_branch = FusionBranch(self.hr_channels, self.backbone.enc_channels)
-
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- self._init_conv(m)
- elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
- self._init_norm(m)
-
- if self.backbone_pretrained:
- self.backbone.load_pretrained_ckpt()
-
- def forward(self, img, inference):
- pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(img, inference)
- pred_detail, hr2x = self.hr_branch(img, enc2x, enc4x, lr8x, inference)
- pred_matte = self.f_branch(img, lr8x, hr2x)
-
- return pred_semantic, pred_detail, pred_matte
-
- def freeze_norm(self):
- norm_types = [nn.BatchNorm2d, nn.InstanceNorm2d]
- for m in self.modules():
- for n in norm_types:
- if isinstance(m, n):
- m.eval()
- continue
-
- def _init_conv(self, conv):
- nn.init.kaiming_uniform_(
- conv.weight, a=0, mode='fan_in', nonlinearity='relu')
- if conv.bias is not None:
- nn.init.constant_(conv.bias, 0)
-
- def _init_norm(self, norm):
- if norm.weight is not None:
- nn.init.constant_(norm.weight, 1)
- nn.init.constant_(norm.bias, 0)
diff --git a/spaces/robin0307/MMOCR/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py b/spaces/robin0307/MMOCR/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py
deleted file mode 100644
index 44bbfcd55a2efc29f441e06fb33079a48de61905..0000000000000000000000000000000000000000
--- a/spaces/robin0307/MMOCR/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py
+++ /dev/null
@@ -1,33 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py',
- '../../_base_/schedules/schedule_sgd_1500e.py',
- '../../_base_/det_models/fcenet_r50dcnv2_fpn.py',
- '../../_base_/det_datasets/ctw1500.py',
- '../../_base_/det_pipelines/fcenet_pipeline.py'
-]
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline_ctw1500 = {{_base_.train_pipeline_ctw1500}}
-test_pipeline_ctw1500 = {{_base_.test_pipeline_ctw1500}}
-
-data = dict(
- samples_per_gpu=6,
- workers_per_gpu=2,
- val_dataloader=dict(samples_per_gpu=1),
- test_dataloader=dict(samples_per_gpu=1),
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline_ctw1500),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline_ctw1500),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline_ctw1500))
-
-evaluation = dict(interval=10, metric='hmean-iou')
diff --git a/spaces/robin0307/MMOCR/configs/textrecog/crnn/crnn_toy_dataset.py b/spaces/robin0307/MMOCR/configs/textrecog/crnn/crnn_toy_dataset.py
deleted file mode 100644
index f61c68afe285e4d1943cbcbb8ede1fe965a99a4b..0000000000000000000000000000000000000000
--- a/spaces/robin0307/MMOCR/configs/textrecog/crnn/crnn_toy_dataset.py
+++ /dev/null
@@ -1,47 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py',
- '../../_base_/recog_pipelines/crnn_pipeline.py',
- '../../_base_/recog_datasets/toy_data.py',
- '../../_base_/schedules/schedule_adadelta_5e.py'
-]
-
-label_convertor = dict(
- type='CTCConvertor', dict_type='DICT36', with_unknown=True, lower=True)
-
-model = dict(
- type='CRNNNet',
- preprocessor=None,
- backbone=dict(type='VeryDeepVgg', leaky_relu=False, input_channels=1),
- encoder=None,
- decoder=dict(type='CRNNDecoder', in_channels=512, rnn_flag=True),
- loss=dict(type='CTCLoss'),
- label_convertor=label_convertor,
- pretrained=None)
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline = {{_base_.train_pipeline}}
-test_pipeline = {{_base_.test_pipeline}}
-
-data = dict(
- samples_per_gpu=32,
- workers_per_gpu=2,
- val_dataloader=dict(samples_per_gpu=1),
- test_dataloader=dict(samples_per_gpu=1),
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline))
-
-evaluation = dict(interval=1, metric='acc')
-
-cudnn_benchmark = True
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/losses/gfocal_loss.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/losses/gfocal_loss.py
deleted file mode 100644
index 0e8d26373f83f35ad032322d96cdbac995be2749..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/losses/gfocal_loss.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import mmcv
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..builder import LOSSES
-from .utils import weighted_loss
-
-
-@mmcv.jit(derivate=True, coderize=True)
-@weighted_loss
-def quality_focal_loss(pred, target, beta=2.0):
- r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
- Qualified and Distributed Bounding Boxes for Dense Object Detection
- `_.
-
- Args:
- pred (torch.Tensor): Predicted joint representation of classification
- and quality (IoU) estimation with shape (N, C), C is the number of
- classes.
- target (tuple([torch.Tensor])): Target category label with shape (N,)
- and target quality label with shape (N,).
- beta (float): The beta parameter for calculating the modulating factor.
- Defaults to 2.0.
-
- Returns:
- torch.Tensor: Loss tensor with shape (N,).
- """
- assert len(target) == 2, """target for QFL must be a tuple of two elements,
- including category label and quality label, respectively"""
- # label denotes the category id, score denotes the quality score
- label, score = target
-
- # negatives are supervised by 0 quality score
- pred_sigmoid = pred.sigmoid()
- scale_factor = pred_sigmoid
- zerolabel = scale_factor.new_zeros(pred.shape)
- loss = F.binary_cross_entropy_with_logits(
- pred, zerolabel, reduction='none') * scale_factor.pow(beta)
-
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- bg_class_ind = pred.size(1)
- pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
- pos_label = label[pos].long()
- # positives are supervised by bbox quality (IoU) score
- scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
- loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
- pred[pos, pos_label], score[pos],
- reduction='none') * scale_factor.abs().pow(beta)
-
- loss = loss.sum(dim=1, keepdim=False)
- return loss
-
-
-@weighted_loss
-def quality_focal_loss_with_prob(pred, target, beta=2.0):
- r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
- Qualified and Distributed Bounding Boxes for Dense Object Detection
- `_.
- Different from `quality_focal_loss`, this function accepts probability
- as input.
-
- Args:
- pred (torch.Tensor): Predicted joint representation of classification
- and quality (IoU) estimation with shape (N, C), C is the number of
- classes.
- target (tuple([torch.Tensor])): Target category label with shape (N,)
- and target quality label with shape (N,).
- beta (float): The beta parameter for calculating the modulating factor.
- Defaults to 2.0.
-
- Returns:
- torch.Tensor: Loss tensor with shape (N,).
- """
- assert len(target) == 2, """target for QFL must be a tuple of two elements,
- including category label and quality label, respectively"""
- # label denotes the category id, score denotes the quality score
- label, score = target
-
- # negatives are supervised by 0 quality score
- pred_sigmoid = pred
- scale_factor = pred_sigmoid
- zerolabel = scale_factor.new_zeros(pred.shape)
- loss = F.binary_cross_entropy(
- pred, zerolabel, reduction='none') * scale_factor.pow(beta)
-
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- bg_class_ind = pred.size(1)
- pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
- pos_label = label[pos].long()
- # positives are supervised by bbox quality (IoU) score
- scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
- loss[pos, pos_label] = F.binary_cross_entropy(
- pred[pos, pos_label], score[pos],
- reduction='none') * scale_factor.abs().pow(beta)
-
- loss = loss.sum(dim=1, keepdim=False)
- return loss
-
-
-@mmcv.jit(derivate=True, coderize=True)
-@weighted_loss
-def distribution_focal_loss(pred, label):
- r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
- Qualified and Distributed Bounding Boxes for Dense Object Detection
- `_.
-
- Args:
- pred (torch.Tensor): Predicted general distribution of bounding boxes
- (before softmax) with shape (N, n+1), n is the max value of the
- integral set `{0, ..., n}` in paper.
- label (torch.Tensor): Target distance label for bounding boxes with
- shape (N,).
-
- Returns:
- torch.Tensor: Loss tensor with shape (N,).
- """
- dis_left = label.long()
- dis_right = dis_left + 1
- weight_left = dis_right.float() - label
- weight_right = label - dis_left.float()
- loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
- + F.cross_entropy(pred, dis_right, reduction='none') * weight_right
- return loss
-
-
-@LOSSES.register_module()
-class QualityFocalLoss(nn.Module):
- r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
- Learning Qualified and Distributed Bounding Boxes for Dense Object
- Detection `_.
-
- Args:
- use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
- Defaults to True.
- beta (float): The beta parameter for calculating the modulating factor.
- Defaults to 2.0.
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Loss weight of current loss.
- activated (bool, optional): Whether the input is activated.
- If True, it means the input has been activated and can be
- treated as probabilities. Else, it should be treated as logits.
- Defaults to False.
- """
-
- def __init__(self,
- use_sigmoid=True,
- beta=2.0,
- reduction='mean',
- loss_weight=1.0,
- activated=False):
- super(QualityFocalLoss, self).__init__()
- assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
- self.use_sigmoid = use_sigmoid
- self.beta = beta
- self.reduction = reduction
- self.loss_weight = loss_weight
- self.activated = activated
-
- def forward(self,
- pred,
- target,
- weight=None,
- avg_factor=None,
- reduction_override=None):
- """Forward function.
-
- Args:
- pred (torch.Tensor): Predicted joint representation of
- classification and quality (IoU) estimation with shape (N, C),
- C is the number of classes.
- target (tuple([torch.Tensor])): Target category label with shape
- (N,) and target quality label with shape (N,).
- weight (torch.Tensor, optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if self.use_sigmoid:
- if self.activated:
- calculate_loss_func = quality_focal_loss_with_prob
- else:
- calculate_loss_func = quality_focal_loss
- loss_cls = self.loss_weight * calculate_loss_func(
- pred,
- target,
- weight,
- beta=self.beta,
- reduction=reduction,
- avg_factor=avg_factor)
- else:
- raise NotImplementedError
- return loss_cls
-
-
-@LOSSES.register_module()
-class DistributionFocalLoss(nn.Module):
- r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
- Learning Qualified and Distributed Bounding Boxes for Dense Object
- Detection `_.
-
- Args:
- reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
- loss_weight (float): Loss weight of current loss.
- """
-
- def __init__(self, reduction='mean', loss_weight=1.0):
- super(DistributionFocalLoss, self).__init__()
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred,
- target,
- weight=None,
- avg_factor=None,
- reduction_override=None):
- """Forward function.
-
- Args:
- pred (torch.Tensor): Predicted general distribution of bounding
- boxes (before softmax) with shape (N, n+1), n is the max value
- of the integral set `{0, ..., n}` in paper.
- target (torch.Tensor): Target distance label for bounding boxes
- with shape (N,).
- weight (torch.Tensor, optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- loss_cls = self.loss_weight * distribution_focal_loss(
- pred, target, weight, reduction=reduction, avg_factor=avg_factor)
- return loss_cls
diff --git a/spaces/rogman/Flamingo-Gradio-ImageDescribe/app.py b/spaces/rogman/Flamingo-Gradio-ImageDescribe/app.py
deleted file mode 100644
index 64cc005dabe013b1c9e4af644008622a88fa24df..0000000000000000000000000000000000000000
--- a/spaces/rogman/Flamingo-Gradio-ImageDescribe/app.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import os
-import gradio as gr
-import torch
-import PIL
-
-from flamingo_mini import FlamingoConfig, FlamingoModel, FlamingoProcessor
-
-
-
-EXAMPLES_DIR = 'examples'
-DEFAULT_PROMPT = ""
-
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-
-model = FlamingoModel.from_pretrained('dhansmair/flamingo-mini')
-model.to(device)
-model.eval()
-
-processor = FlamingoProcessor(model.config, load_vision_processor=True)
-
-# setup some example images
-examples = []
-if os.path.isdir(EXAMPLES_DIR):
- for file in os.listdir(EXAMPLES_DIR):
- path = EXAMPLES_DIR + "/" + file
- examples.append([path, DEFAULT_PROMPT])
-
-
-def predict_caption(image, prompt):
- assert isinstance(prompt, str)
-
- features = processor.extract_features(image).to(device)
- caption = model.generate_captions(processor,
- visual_features=features,
- prompt=prompt)
-
- if isinstance(caption, list):
- caption = caption[0]
-
- return caption
-
-
-iface = gr.Interface(fn=predict_caption,
- inputs=[gr.Image(type="pil"), gr.Textbox(value=DEFAULT_PROMPT, label="Prompt")],
- examples=examples,
- outputs="text")
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/ronig/protein_binding_search/credentials.py b/spaces/ronig/protein_binding_search/credentials.py
deleted file mode 100644
index 3258f7058a9d8719cf7b9e03b61e34170cc334e8..0000000000000000000000000000000000000000
--- a/spaces/ronig/protein_binding_search/credentials.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import os
-
-
-def get_token():
- return os.environ.get("TOKEN", True)
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Invisible.Secrets V4.6 Keygen Encrypt and Hide Your Files in Minutes.md b/spaces/rorallitri/biomedical-language-models/logs/Invisible.Secrets V4.6 Keygen Encrypt and Hide Your Files in Minutes.md
deleted file mode 100644
index d35533d5f6362ae429b6579712674c2d16bba080..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Invisible.Secrets V4.6 Keygen Encrypt and Hide Your Files in Minutes.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/roshithindia/image_classification/README.md b/spaces/roshithindia/image_classification/README.md
deleted file mode 100644
index dd485e469b703e1774ad4bacdfdd27a205e4858b..0000000000000000000000000000000000000000
--- a/spaces/roshithindia/image_classification/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Image Classification
-emoji: 🔥
-colorFrom: indigo
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.27.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/__init__.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/__init__.py
deleted file mode 100644
index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000
--- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-# empty
diff --git a/spaces/rstallman/Westminster-AI-Sheet/README.md b/spaces/rstallman/Westminster-AI-Sheet/README.md
deleted file mode 100644
index e992a8c5d212f74e6b4e6f98291e778c1c05a44b..0000000000000000000000000000000000000000
--- a/spaces/rstallman/Westminster-AI-Sheet/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Westminster AI Sheet
-emoji: 📊
-colorFrom: blue
-colorTo: blue
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-duplicated_from: rstallman/westminster.ai.v2
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/ryoung41/HTML5Interactivity/style.css b/spaces/ryoung41/HTML5Interactivity/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/ryoung41/HTML5Interactivity/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/rzzgate/Stable-Diffusion-ControlNet-WebUI/app.py b/spaces/rzzgate/Stable-Diffusion-ControlNet-WebUI/app.py
deleted file mode 100644
index d39c29a6d749b6e7778a696ac419add423014933..0000000000000000000000000000000000000000
--- a/spaces/rzzgate/Stable-Diffusion-ControlNet-WebUI/app.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import gradio as gr
-
-from diffusion_webui.helpers import (
- CodeformerUpscalerGenerator,
- StableDiffusionControlInpaintNetDepthGenerator,
- StableDiffusionControlNetCannyGenerator,
- StableDiffusionControlNetDepthGenerator,
- StableDiffusionControlNetHEDGenerator,
- StableDiffusionControlNetInpaintCannyGenerator,
- StableDiffusionControlNetInpaintHedGenerator,
- StableDiffusionControlNetInpaintMlsdGenerator,
- StableDiffusionControlNetInpaintPoseGenerator,
- StableDiffusionControlNetInpaintScribbleGenerator,
- StableDiffusionControlNetInpaintSegGenerator,
- StableDiffusionControlNetMLSDGenerator,
- StableDiffusionControlNetPoseGenerator,
- StableDiffusionControlNetScribbleGenerator,
- StableDiffusionControlNetSegGenerator,
- StableDiffusionImage2ImageGenerator,
- StableDiffusionInpaintGenerator,
- StableDiffusionText2ImageGenerator,
-)
-
-
-def main():
- app = gr.Blocks()
- with app:
- with gr.Row():
- with gr.Column():
- with gr.Tab("Text2Img"):
- StableDiffusionText2ImageGenerator.app()
- with gr.Tab("Img2Img"):
- StableDiffusionImage2ImageGenerator.app()
- with gr.Tab("Inpaint"):
- StableDiffusionInpaintGenerator.app()
- with gr.Tab("ControlNet"):
- with gr.Tab("Canny"):
- StableDiffusionControlNetCannyGenerator.app()
- with gr.Tab("Depth"):
- StableDiffusionControlNetDepthGenerator.app()
- with gr.Tab("HED"):
- StableDiffusionControlNetHEDGenerator.app()
- with gr.Tab("MLSD"):
- StableDiffusionControlNetMLSDGenerator.app()
- with gr.Tab("Pose"):
- StableDiffusionControlNetPoseGenerator.app()
- with gr.Tab("Scribble"):
- StableDiffusionControlNetScribbleGenerator.app()
- with gr.Tab("Seg"):
- StableDiffusionControlNetSegGenerator.app()
- with gr.Tab("ControlNet Inpaint"):
- with gr.Tab("Canny"):
- StableDiffusionControlNetInpaintCannyGenerator.app()
- with gr.Tab("Depth"):
- StableDiffusionControlInpaintNetDepthGenerator.app()
- with gr.Tab("HED"):
- StableDiffusionControlNetInpaintHedGenerator.app()
- with gr.Tab("MLSD"):
- StableDiffusionControlNetInpaintMlsdGenerator.app()
- with gr.Tab("Pose"):
- StableDiffusionControlNetInpaintPoseGenerator.app()
- with gr.Tab("Scribble"):
- StableDiffusionControlNetInpaintScribbleGenerator.app()
- with gr.Tab("Seg"):
- StableDiffusionControlNetInpaintSegGenerator.app()
- with gr.Tab("Upscaler"):
- CodeformerUpscalerGenerator.app()
-
- app.queue(concurrency_count=1)
- app.launch(debug=True, enable_queue=True)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/README.md b/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/README.md
deleted file mode 100644
index a866a6d7d9f776d943973bf5c73b5ece7882b165..0000000000000000000000000000000000000000
--- a/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Senh WizardVicuna Uncensored 3B 0719 GGML
-emoji: 🔥
-colorFrom: purple
-colorTo: red
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/sam-hq-team/sam-hq/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py b/spaces/sam-hq-team/sam-hq/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py
deleted file mode 100644
index f0cf9779b270e1aead32845006f8b881fcba37ad..0000000000000000000000000000000000000000
--- a/spaces/sam-hq-team/sam-hq/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from torch import Tensor, nn
-from torchvision.ops.boxes import nms
-from transformers import BertConfig, BertModel, BertPreTrainedModel
-from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
-
-
-class BertModelWarper(nn.Module):
- def __init__(self, bert_model):
- super().__init__()
- # self.bert = bert_modelc
-
- self.config = bert_model.config
- self.embeddings = bert_model.embeddings
- self.encoder = bert_model.encoder
- self.pooler = bert_model.pooler
-
- self.get_extended_attention_mask = bert_model.get_extended_attention_mask
- self.invert_attention_mask = bert_model.invert_attention_mask
- self.get_head_mask = bert_model.get_head_mask
-
- def forward(
- self,
- input_ids=None,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- inputs_embeds=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_values=None,
- use_cache=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- ):
- r"""
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
-
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
- use_cache (:obj:`bool`, `optional`):
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
- decoding (see :obj:`past_key_values`).
- """
- output_attentions = (
- output_attentions if output_attentions is not None else self.config.output_attentions
- )
- output_hidden_states = (
- output_hidden_states
- if output_hidden_states is not None
- else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if self.config.is_decoder:
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- else:
- use_cache = False
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- batch_size, seq_length = input_shape
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- batch_size, seq_length = input_shape
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- # past_key_values_length
- past_key_values_length = (
- past_key_values[0][0].shape[2] if past_key_values is not None else 0
- )
-
- if attention_mask is None:
- attention_mask = torch.ones(
- ((batch_size, seq_length + past_key_values_length)), device=device
- )
- if token_type_ids is None:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
- attention_mask, input_shape, device
- )
-
- # If a 2D or 3D attention mask is provided for the cross-attention
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if self.config.is_decoder and encoder_hidden_states is not None:
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
- if encoder_attention_mask is None:
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
- else:
- encoder_extended_attention_mask = None
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
- # import ipdb; ipdb.set_trace()
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-
- embedding_output = self.embeddings(
- input_ids=input_ids,
- position_ids=position_ids,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- past_key_values_length=past_key_values_length,
- )
-
- encoder_outputs = self.encoder(
- embedding_output,
- attention_mask=extended_attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_extended_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = encoder_outputs[0]
- pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
-
- if not return_dict:
- return (sequence_output, pooled_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndCrossAttentions(
- last_hidden_state=sequence_output,
- pooler_output=pooled_output,
- past_key_values=encoder_outputs.past_key_values,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- cross_attentions=encoder_outputs.cross_attentions,
- )
-
-
-class TextEncoderShell(nn.Module):
- def __init__(self, text_encoder):
- super().__init__()
- self.text_encoder = text_encoder
- self.config = self.text_encoder.config
-
- def forward(self, **kw):
- # feed into text encoder
- return self.text_encoder(**kw)
-
-
-def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):
- """Generate attention mask between each pair of special tokens
- Args:
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
- special_tokens_mask (list): special tokens mask.
- Returns:
- torch.Tensor: attention mask between each special tokens.
- """
- input_ids = tokenized["input_ids"]
- bs, num_token = input_ids.shape
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
- for special_token in special_tokens_list:
- special_tokens_mask |= input_ids == special_token
-
- # idxs: each row is a list of indices of special tokens
- idxs = torch.nonzero(special_tokens_mask)
-
- # generate attention mask and positional ids
- attention_mask = (
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
- )
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
- previous_col = 0
- for i in range(idxs.shape[0]):
- row, col = idxs[i]
- if (col == 0) or (col == num_token - 1):
- attention_mask[row, col, col] = True
- position_ids[row, col] = 0
- else:
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
- 0, col - previous_col, device=input_ids.device
- )
-
- previous_col = col
-
- # # padding mask
- # padding_mask = tokenized['attention_mask']
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
-
- return attention_mask, position_ids.to(torch.long)
-
-
-def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):
- """Generate attention mask between each pair of special tokens
- Args:
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
- special_tokens_mask (list): special tokens mask.
- Returns:
- torch.Tensor: attention mask between each special tokens.
- """
- input_ids = tokenized["input_ids"]
- bs, num_token = input_ids.shape
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
- for special_token in special_tokens_list:
- special_tokens_mask |= input_ids == special_token
-
- # idxs: each row is a list of indices of special tokens
- idxs = torch.nonzero(special_tokens_mask)
-
- # generate attention mask and positional ids
- attention_mask = (
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
- )
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
- cate_to_token_mask_list = [[] for _ in range(bs)]
- previous_col = 0
- for i in range(idxs.shape[0]):
- row, col = idxs[i]
- if (col == 0) or (col == num_token - 1):
- attention_mask[row, col, col] = True
- position_ids[row, col] = 0
- else:
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
- 0, col - previous_col, device=input_ids.device
- )
- c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
- c2t_maski[previous_col + 1 : col] = True
- cate_to_token_mask_list[row].append(c2t_maski)
- previous_col = col
-
- cate_to_token_mask_list = [
- torch.stack(cate_to_token_mask_listi, dim=0)
- for cate_to_token_mask_listi in cate_to_token_mask_list
- ]
-
- # # padding mask
- # padding_mask = tokenized['attention_mask']
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
-
- return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list
diff --git a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/bar_distribution.py b/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/bar_distribution.py
deleted file mode 100644
index c6e7a3a2aa5d08245971d2d72bbdd5d81ecf780e..0000000000000000000000000000000000000000
--- a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/bar_distribution.py
+++ /dev/null
@@ -1,147 +0,0 @@
-
-import torch
-from torch import nn
-
-class BarDistribution(nn.Module):
- def __init__(self, borders: torch.Tensor): # here borders should start with min and end with max, where all values lie in (min,max) and are sorted
- # sorted list of borders
- super().__init__()
- assert len(borders.shape) == 1
- #self.borders = borders
- self.register_buffer('borders', borders)
- #self.bucket_widths = self.borders[1:] - self.borders[:-1]
- self.register_buffer('bucket_widths', self.borders[1:] - self.borders[:-1])
- full_width = self.bucket_widths.sum()
- assert (full_width - (self.borders[-1] - self.borders[0])).abs() < 1e-4, f'diff: {full_width - (self.borders[-1] - self.borders[0])}'
- assert (torch.argsort(borders) == torch.arange(len(borders))).all(), "Please provide sorted borders!"
- self.num_bars = len(borders) - 1
-
- def map_to_bucket_idx(self, y):
- target_sample = torch.searchsorted(self.borders, y) - 1
- target_sample[y == self.borders[0]] = 0
- target_sample[y == self.borders[-1]] = self.num_bars - 1
- return target_sample
-
- def forward(self, logits, y): # gives the negative log density (the _loss_), y: T x B, logits: T x B x self.num_bars
- target_sample = self.map_to_bucket_idx(y)
- assert (target_sample >= 0).all() and (target_sample < self.num_bars).all(), f'y {y} not in support set for borders (min_y, max_y) {self.borders}'
- assert logits.shape[-1] == self.num_bars, f'{logits.shape[-1]} vs {self.num_bars}'
-
- bucket_log_probs = torch.log_softmax(logits, -1)
- scaled_bucket_log_probs = bucket_log_probs - torch.log(self.bucket_widths)
-
- return -scaled_bucket_log_probs.gather(-1,target_sample.unsqueeze(-1)).squeeze(-1)
-
- def mean(self, logits):
- bucket_means = self.borders[:-1] + self.bucket_widths/2
- p = torch.softmax(logits, -1)
- return p @ bucket_means
-
- def quantile(self, logits, center_prob=.682):
- logits_shape = logits.shape
- logits = logits.view(-1, logits.shape[-1])
- side_prob = (1-center_prob)/2
- probs = logits.softmax(-1)
- flipped_probs = probs.flip(-1)
- cumprobs = torch.cumsum(probs, -1)
- flipped_cumprobs = torch.cumsum(flipped_probs, -1)
-
- def find_lower_quantile(probs, cumprobs, side_prob, borders):
- idx = (torch.searchsorted(cumprobs, side_prob)).clamp(0, len(cumprobs)-1) # this might not do the right for outliers
-
- left_prob = cumprobs[idx-1]
- rest_prob = side_prob - left_prob
- left_border, right_border = borders[idx:idx+2]
- return left_border + (right_border-left_border)*rest_prob/probs[idx]
-
- results = []
- for p,cp,f_p,f_cp in zip(probs, cumprobs, flipped_probs, flipped_cumprobs):
- r = find_lower_quantile(p, cp, side_prob, self.borders), find_lower_quantile(f_p, f_cp, side_prob, self.borders.flip(0))
- results.append(r)
-
- return torch.tensor(results).reshape(*logits_shape[:-1],2)
-
- def mode(self, logits):
- mode_inds = logits.argmax(-1)
- bucket_means = self.borders[:-1] + self.bucket_widths/2
- return bucket_means[mode_inds]
-
- def ei(self, logits, best_f, maximize=True): # logits: evaluation_points x batch x feature_dim
- bucket_means = self.borders[:-1] + self.bucket_widths/2
- if maximize:
- bucket_contributions = torch.tensor(
- [max((bucket_max + max(bucket_min, best_f)) / 2 - best_f,0) for
- bucket_min, bucket_max, bucket_mean in zip(self.borders[:-1], self.borders[1:], bucket_means)], dtype=logits.dtype, device=logits.device)
- else:
- bucket_contributions = torch.tensor(
- [-min((min(bucket_max,best_f) + bucket_min) / 2 - best_f,0) for # min on max instead of max on min, and compare min < instead of max >
- bucket_min, bucket_max, bucket_mean in zip(self.borders[:-1], self.borders[1:], bucket_means)], dtype=logits.dtype, device=logits.device)
- p = torch.softmax(logits, -1)
- return p @ bucket_contributions
-
-
-class FullSupportBarDistribution(BarDistribution):
- @staticmethod
- def halfnormal_with_p_weight_before(range_max,p=.5):
- s = range_max / torch.distributions.HalfNormal(torch.tensor(1.)).icdf(torch.tensor(p))
- return torch.distributions.HalfNormal(s)
-
- def forward(self, logits, y): # gives the negative log density (the _loss_), y: T x B, logits: T x B x self.num_bars
- assert self.num_bars > 1
- target_sample = self.map_to_bucket_idx(y)
- target_sample.clamp_(0,self.num_bars-1)
- assert logits.shape[-1] == self.num_bars
-
- bucket_log_probs = torch.log_softmax(logits, -1)
- scaled_bucket_log_probs = bucket_log_probs - torch.log(self.bucket_widths)
- #print(bucket_log_probs, logits.shape)
- log_probs = scaled_bucket_log_probs.gather(-1,target_sample.unsqueeze(-1)).squeeze(-1)
-
- side_normals = (self.halfnormal_with_p_weight_before(self.bucket_widths[0]), self.halfnormal_with_p_weight_before(self.bucket_widths[-1]))
-
-
- # TODO look over it again
- log_probs[target_sample == 0] += side_normals[0].log_prob((self.borders[1]-y[target_sample == 0]).clamp(min=.00000001)) + torch.log(self.bucket_widths[0])
- log_probs[target_sample == self.num_bars-1] += side_normals[1].log_prob(y[target_sample == self.num_bars-1]-self.borders[-2]) + torch.log(self.bucket_widths[-1])
-
-
- return -log_probs
-
- def mean(self, logits):
- bucket_means = self.borders[:-1] + self.bucket_widths / 2
- p = torch.softmax(logits, -1)
- side_normals = (self.halfnormal_with_p_weight_before(self.bucket_widths[0]),
- self.halfnormal_with_p_weight_before(self.bucket_widths[-1]))
- bucket_means[0] = -side_normals[0].mean + self.borders[1]
- bucket_means[-1] = side_normals[1].mean + self.borders[-2]
- return p @ bucket_means
-
-
-
-def get_bucket_limits(num_outputs:int, full_range:tuple=None, ys:torch.Tensor=None):
- assert (ys is not None) or (full_range is not None)
- if ys is not None:
- ys = ys.flatten()
- if len(ys) % num_outputs: ys = ys[:-(len(ys) % num_outputs)]
- print(f'Using {len(ys)} y evals to estimate {num_outputs} buckets. Cut off the last {len(ys) % num_outputs} ys.')
- ys_per_bucket = len(ys) // num_outputs
- if full_range is None:
- full_range = (ys.min(), ys.max())
- else:
- assert full_range[0] <= ys.min() and full_range[1] >= ys.max()
- full_range = torch.tensor(full_range)
- ys_sorted, ys_order = ys.sort(0)
- bucket_limits = (ys_sorted[ys_per_bucket-1::ys_per_bucket][:-1]+ys_sorted[ys_per_bucket::ys_per_bucket])/2
- print(full_range)
- bucket_limits = torch.cat([full_range[0].unsqueeze(0), bucket_limits, full_range[1].unsqueeze(0)],0)
-
- else:
- class_width = (full_range[1] - full_range[0]) / num_outputs
- bucket_limits = torch.cat([full_range[0] + torch.arange(num_outputs).float()*class_width, torch.tensor(full_range[1]).unsqueeze(0)], 0)
-
- assert len(bucket_limits) - 1 == num_outputs and full_range[0] == bucket_limits[0] and full_range[-1] == bucket_limits[-1]
- return bucket_limits
-
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Download HOT Methodist Xhosa Hymn Book.md b/spaces/scedlatioru/img-to-music/example/Download HOT Methodist Xhosa Hymn Book.md
deleted file mode 100644
index 793aa162f7749ee54d030d9bf8c88c98d5167e0e..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Download HOT Methodist Xhosa Hymn Book.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
How to Download Methodist Xhosa Hymn Book on Your Device
-
-
If you are a Methodist Church member or any other Christian who likes hymns, you may want to download Methodist Xhosa hymn book on your device for your convenience and enjoyment. Methodist Xhosa hymn book is a collection of hymns in Xhosa language that are suitable for various occasions and purposes. In this article, we will show you how to download Methodist Xhosa hymn book on your device using different methods and sources.
-
-
How to Download Methodist Xhosa Hymn Book from Google Play Store
-
-
One of the easiest and most popular ways to download Methodist Xhosa hymn book on your device is to use the Google Play Store app. Google Play Store is an online platform that offers various apps and games for Android devices. You can access Google Play Store from your device by tapping on its icon or by visiting its website.
To download Methodist Xhosa hymn book from Google Play Store, you need to follow these steps:
-
-
-
Open Google Play Store app on your device or visit its website.
-
Search for "MCSA Hymnals" or "Methodist Hymns (ZA)" in the search bar.
-
Select the app that you want to download from the search results. MCSA Hymnals is an app that makes the Holy Bible and Methodist Hymnal available digitally to any compatible handheld device for your convenience online or offline. It allows you to access the Holy Bible and Hymnal in IsiXhosa, Setswana, Sesotho and English at the touch of your fingertips. Methodist Hymns (ZA) is an app that contains Methodist Hymns in English, Xhosa ,Sotho, and Other Languages appropriate for Methodist Church Members or any other Christians who likes hymns.
-
Tap on the "Install" button to download and install the app on your device.
-
Wait for the app to be downloaded and installed on your device.
-
Open the app and enjoy the Methodist Xhosa hymn book on your device.
-
-
-
To download Methodist Xhosa hymn book from Google Play Store, you need to have a Google account and an internet connection. You may also need to have enough storage space on your device for the app. The apps are free to download and use, but they may contain ads or in-app purchases.
-
-
How to Download Methodist Xhosa Hymn Book from Archive.org
-
-
Another way to download Methodist Xhosa hymn book on your device is to use Archive.org. Archive.org is a website that provides free access to millions of books, movies, music, software and more. You can access Archive.org from any device by visiting its website.
-
-
To download Methodist Xhosa hymn book from Archive.org, you need to follow these steps:
-
-
-
Visit Archive.org website from your device.
-
Search for "The Methodist hymn-book" in the search bar.
-
Select the book that you want to download from the search results. The Methodist hymn-book is a book that contains hymns of the Methodist Church (Great Britain) in various languages, including Xhosa. It was published in 1954 by The Epworth Press.
-
Choose the format that you want to download from the options available. You can choose between PDF, EPUB, Kindle, Text or other formats depending on your preference and device compatibility.
-
Tap on the "Download" button to download the book on your device.
-
Wait for the book to be downloaded on your device.
-
Open the book and enjoy the Methodist Xhosa hymn book on your device.
-
-
-
To download Methodist Xhosa hymn book from Archive.org, you need to have an internet connection and a compatible device. You may also need to have a PDF reader or an e-book reader app on your device to open the book. The book is free to download and use, but it may be subject to copyright laws depending on your location.
-
-
-
Conclusion
-
-
In this article, we have shown you how to download Methodist Xhosa hymn book on your device using different methods and sources. We have also explained the features and benefits of each method and source. You can choose the one that suits your needs and preferences best.
-
-
Methodist Xhosa hymn book is a collection of hymns in Xhosa language that are suitable for various occasions and purposes. It can help you worship God, praise His name, express your faith, seek His guidance, comfort your soul and more. It can also help you learn more about the history, culture and traditions of the Methodist Church and the Xhosa people.
-
-
We hope this article was helpful and we invite you to discover the many possibilities offered by Methodist Xhosa hymn book on your device.
-
How to Download Methodist Xhosa Hymn Book from Other Sources
-
-
Besides Google Play Store and Archive.org, there may be other sources that offer Methodist Xhosa hymn book for download on your device. However, you need to be careful and cautious when using these sources as they may not be reliable, safe or legal. You may encounter some risks or problems such as:
-
-
-
The source may not have the latest or complete version of Methodist Xhosa hymn book.
-
The source may have a corrupted or infected file that can harm your device or compromise your data.
-
The source may have a fake or stolen serial number that can cause legal issues or activation problems.
-
The source may have a hidden fee or subscription that can charge you without your consent or knowledge.
-
The source may have a poor customer service or support that can leave you unsatisfied or frustrated.
-
-
-
To avoid these risks or problems, you need to do some research and verification before using any source that offers Methodist Xhosa hymn book for download on your device. You need to check the following aspects:
-
-
-
The reputation and credibility of the source. You can read reviews, ratings, comments and testimonials from other users who have used the source before.
-
The quality and security of the file. You can scan the file with an antivirus program or a file checker tool before downloading it on your device.
-
The legality and validity of the serial number. You can contact the Cabrilog team or the authorized reseller to confirm the serial number before using it to activate the software.
-
The terms and conditions of the service. You can read the fine print and understand the details of the service such as the price, duration, cancellation policy and more.
-
The availability and responsiveness of the customer service or support. You can test the contact information and see how fast and helpful they are in answering your questions or resolving your issues.
-
-
-
By doing these checks, you can ensure that you are using a trustworthy and reputable source that offers Methodist Xhosa hymn book for download on your device.
-
-
Conclusion
-
-
In this article, we have shown you how to download Methodist Xhosa hymn book on your device using different methods and sources. We have also explained the features and benefits of each method and source, as well as the risks and problems of using other sources. You can choose the one that suits your needs and preferences best.
-
-
Methodist Xhosa hymn book is a collection of hymns in Xhosa language that are suitable for various occasions and purposes. It can help you worship God, praise His name, express your faith, seek His guidance, comfort your soul and more. It can also help you learn more about the history, culture and traditions of the Methodist Church and the Xhosa people.
-
-
We hope this article was helpful and we invite you to discover the many possibilities offered by Methodist Xhosa hymn book on your device.
-
How to Use Methodist Xhosa Hymn Book on Your Device
-
-
After downloading Methodist Xhosa hymn book on your device, you may want to know how to use it effectively and efficiently. Methodist Xhosa hymn book is a software that allows you to access, read, listen and sing along to the hymns in Xhosa language. You can also use it for various purposes such as worship, prayer, meditation, study and more.
-
-
To use Methodist Xhosa hymn book on your device, you need to follow these steps:
-
-
-
Open the app that you downloaded from Google Play Store or Archive.org on your device.
-
Select the language that you want to use from the options available. You can choose between IsiXhosa, Setswana, Sesotho and English.
-
Select the hymn that you want to access from the list or the index. You can also search for a hymn by number, title or keyword.
-
Read the lyrics of the hymn on your screen or listen to the audio of the hymn by tapping on the play button.
-
Sing along to the hymn by following the lyrics and the tune. You can adjust the volume, speed and pitch of the audio according to your preference.
-
Learn more about the hymn by reading the information and the history of the hymn. You can also find related hymns or scriptures that are linked to the hymn.
-
Share the hymn with others by sending it via email, text message, social media or other apps. You can also print or save the hymn as a PDF file on your device.
-
-
-
To use Methodist Xhosa hymn book on your device, you need to have a compatible device and an internet connection. You may also need to have a speaker or a headphone to listen to the audio of the hymns. The apps are easy to use and user-friendly, but they may have some limitations or errors that can be fixed by contacting the developers or updating the apps.
-
-
Conclusion
-
-
In this article, we have shown you how to download Methodist Xhosa hymn book on your device using different methods and sources. We have also explained how to use Methodist Xhosa hymn book on your device for various purposes and occasions. We have also discussed the features and benefits of each method and source, as well as the risks and problems of using other sources.
-
-
Methodist Xhosa hymn book is a collection of hymns in Xhosa language that are suitable for various occasions and purposes. It can help you worship God, praise His name, express your faith, seek His guidance, comfort your soul and more. It can also help you learn more about the history, culture and traditions of the Methodist Church and the Xhosa people.
-
-
We hope this article was helpful and we invite you to discover the many possibilities offered by Methodist Xhosa hymn book on your device.
-
Conclusion
-
-
In this article, we have shown you how to download Methodist Xhosa hymn book on your device using different methods and sources. We have also explained how to use Methodist Xhosa hymn book on your device for various purposes and occasions. We have also discussed the features and benefits of each method and source, as well as the risks and problems of using other sources.
-
-
Methodist Xhosa hymn book is a collection of hymns in Xhosa language that are suitable for various occasions and purposes. It can help you worship God, praise His name, express your faith, seek His guidance, comfort your soul and more. It can also help you learn more about the history, culture and traditions of the Methodist Church and the Xhosa people.
-
-
We hope this article was helpful and we invite you to discover the many possibilities offered by Methodist Xhosa hymn book on your device.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/Minority Movies In Dual Audio Download PATCHED.md b/spaces/scedlatioru/img-to-music/example/Minority Movies In Dual Audio Download PATCHED.md
deleted file mode 100644
index f044acc5bb5a47d9be896fced003adaad7a165bf..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Minority Movies In Dual Audio Download PATCHED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
And then there is the question of who receives credit and under what conditions and with what degrees of support. Robert Allen, senior writer at National Public Radio, has developed a whole series of stories about these issues, and the technology that has fueled them, often in collaboration with data journalists and other writers at NPR. See his TED Talk for more on this. And see NPRs first audio interview. So, in that case, what Tom's was going to be was a bit like a video interview. But it's perhaps more like a psychotherapeutic interview, in that for the interviewee, it's very much a dialogue, not a series of questions and answers. In this case, Tom was told that he would only be able to answer questions about a specific situation; he would not be expected to discuss the subject at length, or to delve into his other interests. So he was quite surprised when he met the researcher at the pub, and realized that he was going to be interviewed. "We went to a bar. And I figured, I didnt have much choice there. I was going to talk to you, so you might as well be comfortable. And then the only other person there was a Scottish woman, so I was like, that was an odd pick. I just dont know that much about Scotland, but I know some Scottish, so maybe we can get some things going. And as it turned out, we got a good hang of each other, and just talked for hours, really.
-
In no way does this site go back to the first days of the Voyager project. All of the work on the site is my own, and Ive been collecting and editing data from the fall of 1980 to the present. The site is a very quick and easy to use site, and very, very fun. Like the Voyager project, Exotic Movies offers 3-4 films a day plus a collection of thousands of images. If the search feature is too broad for your taste, you may want to look at its image collections.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/seanghay/KLEA/khmer_phonemizer.py b/spaces/seanghay/KLEA/khmer_phonemizer.py
deleted file mode 100644
index 73f64558249a1b1918dbd39e8a7fd6bee03bf744..0000000000000000000000000000000000000000
--- a/spaces/seanghay/KLEA/khmer_phonemizer.py
+++ /dev/null
@@ -1,45 +0,0 @@
-r"""
-Khmer Phonemizer - A Free, Standalone and Open-Source Khmer Grapheme-to-Phonemes.
-"""
-import os
-import csv
-from g2p import PhonetisaurusGraph
-
-def _read_lexicon_file(file):
- lexicon = {}
- with open(file) as infile:
- for line in csv.reader(infile, delimiter="\t"):
- word, phonemes = line
- word, phonemes = word.strip(), phonemes.strip().split()
- lexicon[word] = phonemes
- return lexicon
-
-_graph_file = os.path.join(os.path.dirname(__file__), "km_phonemizer.npz")
-_lexicon_file = os.path.join(os.path.dirname(__file__), "km_lexicon.tsv")
-_lexicon_dict = _read_lexicon_file(_lexicon_file)
-_graph = PhonetisaurusGraph.load(_graph_file, preload=False)
-
-def _phoneticize(word: str, beam: int, min_beam: int, beam_scale: float):
- results = _graph.g2p_one(word, beam=beam, min_beam=min_beam, beam_scale=beam_scale)
- results = list(results)
- if len(results) == 0:
- return None
- return results[0]
-
-
-def phonemize_single(
- word,
- beam: int = 500,
- min_beam: int = 100,
- beam_scale: float = 0.6,
- use_lexicon: bool = True,
-):
- r"""
- Phonemize a single word. The word must match [a-zA-Z\u1780-\u17dd]+
- """
- if word is None:
- return None
- word = word.lower()
- if use_lexicon and word in _lexicon_dict:
- return _lexicon_dict[word]
- return _phoneticize(word, beam=beam, min_beam=min_beam, beam_scale=beam_scale)
diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/blocks.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/blocks.py
deleted file mode 100644
index 875053ab69bcf6353c970b6585a0e369c4d946b3..0000000000000000000000000000000000000000
--- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/blocks.py
+++ /dev/null
@@ -1,556 +0,0 @@
-"""Set of methods to create custom architecture."""
-
-from collections import Counter
-
-import torch
-
-from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
-from espnet.nets.pytorch_backend.conformer.encoder_layer import (
- EncoderLayer as ConformerEncoderLayer, # noqa: H301
-)
-
-from espnet.nets.pytorch_backend.nets_utils import get_activation
-
-from espnet.nets.pytorch_backend.transducer.causal_conv1d import CausalConv1d
-from espnet.nets.pytorch_backend.transducer.transformer_decoder_layer import (
- DecoderLayer, # noqa: H301
-)
-from espnet.nets.pytorch_backend.transducer.tdnn import TDNN
-from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
-
-from espnet.nets.pytorch_backend.transformer.attention import (
- MultiHeadedAttention, # noqa: H301
- RelPositionMultiHeadedAttention, # noqa: H301
-)
-from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
-from espnet.nets.pytorch_backend.transformer.embedding import (
- PositionalEncoding, # noqa: H301
- ScaledPositionalEncoding, # noqa: H301
- RelPositionalEncoding, # noqa: H301
-)
-from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
- PositionwiseFeedForward, # noqa: H301
-)
-from espnet.nets.pytorch_backend.transformer.repeat import MultiSequential
-from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
-
-
-def check_and_prepare(net_part, blocks_arch, input_layer):
- """Check consecutive block shapes match and prepare input parameters.
-
- Args:
- net_part (str): either 'encoder' or 'decoder'
- blocks_arch (list): list of blocks for network part (type and parameters)
- input_layer (str): input layer type
-
- Return:
- input_layer (str): input layer type
- input_layer_odim (int): output dim of input layer
- input_dropout_rate (float): dropout rate of input layer
- input_pos_dropout_rate (float): dropout rate of input layer positional enc.
- out_dim (int): output dim of last block
-
- """
- input_dropout_rate = sorted(
- Counter(
- b["dropout-rate"] for b in blocks_arch if "dropout-rate" in b
- ).most_common(),
- key=lambda x: x[0],
- reverse=True,
- )
-
- input_pos_dropout_rate = sorted(
- Counter(
- b["pos-dropout-rate"] for b in blocks_arch if "pos-dropout-rate" in b
- ).most_common(),
- key=lambda x: x[0],
- reverse=True,
- )
-
- input_dropout_rate = input_dropout_rate[0][0] if input_dropout_rate else 0.0
- input_pos_dropout_rate = (
- input_pos_dropout_rate[0][0] if input_pos_dropout_rate else 0.0
- )
-
- cmp_io = []
- has_transformer = False
- has_conformer = False
- for i in range(len(blocks_arch)):
- if "type" in blocks_arch[i]:
- block_type = blocks_arch[i]["type"]
- else:
- raise ValueError("type is not defined in the " + str(i + 1) + "th block.")
-
- if block_type == "transformer":
- if not {"d_hidden", "d_ff", "heads"}.issubset(blocks_arch[i]):
- raise ValueError(
- "Block "
- + str(i + 1)
- + "in "
- + net_part
- + ": Transformer block format is: {'type: transformer', "
- "'d_hidden': int, 'd_ff': int, 'heads': int, [...]}"
- )
-
- has_transformer = True
- cmp_io.append((blocks_arch[i]["d_hidden"], blocks_arch[i]["d_hidden"]))
- elif block_type == "conformer":
- if net_part != "encoder":
- raise ValueError(
- "Block " + str(i + 1) + ": conformer type is only for encoder part."
- )
-
- if not {
- "d_hidden",
- "d_ff",
- "heads",
- "macaron_style",
- "use_conv_mod",
- }.issubset(blocks_arch[i]):
- raise ValueError(
- "Block "
- + str(i + 1)
- + " in "
- + net_part
- + ": Conformer block format is {'type: conformer', "
- "'d_hidden': int, 'd_ff': int, 'heads': int, "
- "'macaron_style': bool, 'use_conv_mod': bool, [...]}"
- )
-
- if (
- blocks_arch[i]["use_conv_mod"] is True
- and "conv_mod_kernel" not in blocks_arch[i]
- ):
- raise ValueError(
- "Block "
- + str(i + 1)
- + ": 'use_conv_mod' is True but 'use_conv_kernel' is not specified"
- )
-
- has_conformer = True
- cmp_io.append((blocks_arch[i]["d_hidden"], blocks_arch[i]["d_hidden"]))
- elif block_type == "causal-conv1d":
- if not {"idim", "odim", "kernel_size"}.issubset(blocks_arch[i]):
- raise ValueError(
- "Block "
- + str(i + 1)
- + " in "
- + net_part
- + ": causal conv1d block format is: {'type: causal-conv1d', "
- "'idim': int, 'odim': int, 'kernel_size': int}"
- )
-
- if i == 0:
- input_layer = "c-embed"
-
- cmp_io.append((blocks_arch[i]["idim"], blocks_arch[i]["odim"]))
- elif block_type == "tdnn":
- if not {"idim", "odim", "ctx_size", "dilation", "stride"}.issubset(
- blocks_arch[i]
- ):
- raise ValueError(
- "Block "
- + str(i + 1)
- + " in "
- + net_part
- + ": TDNN block format is: {'type: tdnn', "
- "'idim': int, 'odim': int, 'ctx_size': int, "
- "'dilation': int, 'stride': int, [...]}"
- )
-
- cmp_io.append((blocks_arch[i]["idim"], blocks_arch[i]["odim"]))
- else:
- raise NotImplementedError(
- "Wrong type for block "
- + str(i + 1)
- + " in "
- + net_part
- + ". Currently supported: "
- "tdnn, causal-conv1d or transformer"
- )
-
- if has_transformer and has_conformer:
- raise NotImplementedError(
- net_part + ": transformer and conformer blocks "
- "can't be defined in the same net part."
- )
-
- for i in range(1, len(cmp_io)):
- if cmp_io[(i - 1)][1] != cmp_io[i][0]:
- raise ValueError(
- "Output/Input mismatch between blocks "
- + str(i)
- + " and "
- + str(i + 1)
- + " in "
- + net_part
- )
-
- if blocks_arch[0]["type"] in ("tdnn", "causal-conv1d"):
- input_layer_odim = blocks_arch[0]["idim"]
- else:
- input_layer_odim = blocks_arch[0]["d_hidden"]
-
- if blocks_arch[-1]["type"] in ("tdnn", "causal-conv1d"):
- out_dim = blocks_arch[-1]["odim"]
- else:
- out_dim = blocks_arch[-1]["d_hidden"]
-
- return (
- input_layer,
- input_layer_odim,
- input_dropout_rate,
- input_pos_dropout_rate,
- out_dim,
- )
-
-
-def get_pos_enc_and_att_class(net_part, pos_enc_type, self_attn_type):
- """Get positional encoding and self attention module class.
-
- Args:
- net_part (str): either 'encoder' or 'decoder'
- pos_enc_type (str): positional encoding type
- self_attn_type (str): self-attention type
-
- Return:
- pos_enc_class (torch.nn.Module): positional encoding class
- self_attn_class (torch.nn.Module): self-attention class
-
- """
- if pos_enc_type == "abs_pos":
- pos_enc_class = PositionalEncoding
- elif pos_enc_type == "scaled_abs_pos":
- pos_enc_class = ScaledPositionalEncoding
- elif pos_enc_type == "rel_pos":
- if net_part == "encoder" and self_attn_type != "rel_self_attn":
- raise ValueError("'rel_pos' is only compatible with 'rel_self_attn'")
- pos_enc_class = RelPositionalEncoding
- else:
- raise NotImplementedError(
- "pos_enc_type should be either 'abs_pos', 'scaled_abs_pos' or 'rel_pos'"
- )
-
- if self_attn_type == "rel_self_attn":
- self_attn_class = RelPositionMultiHeadedAttention
- else:
- self_attn_class = MultiHeadedAttention
-
- return pos_enc_class, self_attn_class
-
-
-def build_input_layer(
- input_layer,
- idim,
- odim,
- pos_enc_class,
- dropout_rate_embed,
- dropout_rate,
- pos_dropout_rate,
- padding_idx,
-):
- """Build input layer.
-
- Args:
- input_layer (str): input layer type
- idim (int): input dimension
- odim (int): output dimension
- pos_enc_class (class): positional encoding class
- dropout_rate_embed (float): dropout rate for embedding layer
- dropout_rate (float): dropout rate for input layer
- pos_dropout_rate (float): dropout rate for positional encoding
- padding_idx (int): padding index for embedding input layer (if specified)
-
- Returns:
- (torch.nn.*): input layer module
- subsampling_factor (int): subsampling factor
-
- """
- if pos_enc_class.__name__ == "RelPositionalEncoding":
- pos_enc_class_subsampling = pos_enc_class(odim, pos_dropout_rate)
- else:
- pos_enc_class_subsampling = None
-
- if input_layer == "linear":
- return (
- torch.nn.Sequential(
- torch.nn.Linear(idim, odim),
- torch.nn.LayerNorm(odim),
- torch.nn.Dropout(dropout_rate),
- torch.nn.ReLU(),
- pos_enc_class(odim, pos_dropout_rate),
- ),
- 1,
- )
- elif input_layer == "conv2d":
- return Conv2dSubsampling(idim, odim, dropout_rate, pos_enc_class_subsampling), 4
- elif input_layer == "vgg2l":
- return VGG2L(idim, odim, pos_enc_class_subsampling), 4
- elif input_layer == "embed":
- return (
- torch.nn.Sequential(
- torch.nn.Embedding(idim, odim, padding_idx=padding_idx),
- pos_enc_class(odim, pos_dropout_rate),
- ),
- 1,
- )
- elif input_layer == "c-embed":
- return (
- torch.nn.Sequential(
- torch.nn.Embedding(idim, odim, padding_idx=padding_idx),
- torch.nn.Dropout(dropout_rate_embed),
- ),
- 1,
- )
- else:
- raise NotImplementedError("Support: linear, conv2d, vgg2l and embed")
-
-
-def build_transformer_block(net_part, block_arch, pw_layer_type, pw_activation_type):
- """Build function for transformer block.
-
- Args:
- net_part (str): either 'encoder' or 'decoder'
- block_arch (dict): transformer block parameters
- pw_layer_type (str): positionwise layer type
- pw_activation_type (str): positionwise activation type
-
- Returns:
- (function): function to create transformer block
-
- """
- d_hidden = block_arch["d_hidden"]
- d_ff = block_arch["d_ff"]
- heads = block_arch["heads"]
-
- dropout_rate = block_arch["dropout-rate"] if "dropout-rate" in block_arch else 0.0
- pos_dropout_rate = (
- block_arch["pos-dropout-rate"] if "pos-dropout-rate" in block_arch else 0.0
- )
- att_dropout_rate = (
- block_arch["att-dropout-rate"] if "att-dropout-rate" in block_arch else 0.0
- )
-
- if pw_layer_type == "linear":
- pw_layer = PositionwiseFeedForward
- pw_activation = get_activation(pw_activation_type)
- pw_layer_args = (d_hidden, d_ff, pos_dropout_rate, pw_activation)
- else:
- raise NotImplementedError("Transformer block only supports linear yet.")
-
- if net_part == "encoder":
- transformer_layer_class = EncoderLayer
- elif net_part == "decoder":
- transformer_layer_class = DecoderLayer
-
- return lambda: transformer_layer_class(
- d_hidden,
- MultiHeadedAttention(heads, d_hidden, att_dropout_rate),
- pw_layer(*pw_layer_args),
- dropout_rate,
- )
-
-
-def build_conformer_block(
- block_arch,
- self_attn_class,
- pw_layer_type,
- pw_activation_type,
- conv_mod_activation_type,
-):
- """Build function for conformer block.
-
- Args:
- block_arch (dict): conformer block parameters
- self_attn_type (str): self-attention module type
- pw_layer_type (str): positionwise layer type
- pw_activation_type (str): positionwise activation type
- conv_mod_activation_type (str): convolutional module activation type
-
- Returns:
- (function): function to create conformer block
-
- """
- d_hidden = block_arch["d_hidden"]
- d_ff = block_arch["d_ff"]
- heads = block_arch["heads"]
- macaron_style = block_arch["macaron_style"]
- use_conv_mod = block_arch["use_conv_mod"]
-
- dropout_rate = block_arch["dropout-rate"] if "dropout-rate" in block_arch else 0.0
- pos_dropout_rate = (
- block_arch["pos-dropout-rate"] if "pos-dropout-rate" in block_arch else 0.0
- )
- att_dropout_rate = (
- block_arch["att-dropout-rate"] if "att-dropout-rate" in block_arch else 0.0
- )
-
- if pw_layer_type == "linear":
- pw_layer = PositionwiseFeedForward
- pw_activation = get_activation(pw_activation_type)
- pw_layer_args = (d_hidden, d_ff, pos_dropout_rate, pw_activation)
- else:
- raise NotImplementedError("Conformer block only supports linear yet.")
-
- if use_conv_mod:
- conv_layer = ConvolutionModule
- conv_activation = get_activation(conv_mod_activation_type)
- conv_layers_args = (d_hidden, block_arch["conv_mod_kernel"], conv_activation)
-
- return lambda: ConformerEncoderLayer(
- d_hidden,
- self_attn_class(heads, d_hidden, att_dropout_rate),
- pw_layer(*pw_layer_args),
- pw_layer(*pw_layer_args) if macaron_style else None,
- conv_layer(*conv_layers_args) if use_conv_mod else None,
- dropout_rate,
- )
-
-
-def build_causal_conv1d_block(block_arch):
- """Build function for causal conv1d block.
-
- Args:
- block_arch (dict): causal conv1d block parameters
-
- Returns:
- (function): function to create causal conv1d block
-
- """
- idim = block_arch["idim"]
- odim = block_arch["odim"]
- kernel_size = block_arch["kernel_size"]
-
- return lambda: CausalConv1d(idim, odim, kernel_size)
-
-
-def build_tdnn_block(block_arch):
- """Build function for tdnn block.
-
- Args:
- block_arch (dict): tdnn block parameters
-
- Returns:
- (function): function to create tdnn block
-
- """
- idim = block_arch["idim"]
- odim = block_arch["odim"]
- ctx_size = block_arch["ctx_size"]
- dilation = block_arch["dilation"]
- stride = block_arch["stride"]
-
- use_batch_norm = (
- block_arch["use-batch-norm"] if "use-batch-norm" in block_arch else False
- )
- use_relu = block_arch["use-relu"] if "use-relu" in block_arch else False
-
- dropout_rate = block_arch["dropout-rate"] if "dropout-rate" in block_arch else 0.0
-
- return lambda: TDNN(
- idim,
- odim,
- ctx_size=ctx_size,
- dilation=dilation,
- stride=stride,
- dropout_rate=dropout_rate,
- batch_norm=use_batch_norm,
- relu=use_relu,
- )
-
-
-def build_blocks(
- net_part,
- idim,
- input_layer,
- blocks_arch,
- repeat_block=0,
- self_attn_type="self_attn",
- positional_encoding_type="abs_pos",
- positionwise_layer_type="linear",
- positionwise_activation_type="relu",
- conv_mod_activation_type="relu",
- dropout_rate_embed=0.0,
- padding_idx=-1,
-):
- """Build block for customizable architecture.
-
- Args:
- net_part (str): either 'encoder' or 'decoder'
- idim (int): dimension of inputs
- input_layer (str): input layer type
- blocks_arch (list): list of blocks for network part (type and parameters)
- repeat_block (int): repeat provided blocks N times if N > 1
- positional_encoding_type (str): positional encoding layer type
- positionwise_layer_type (str): linear
- positionwise_activation_type (str): positionwise activation type
- conv_mod_activation_type (str): convolutional module activation type
- dropout_rate_embed (float): dropout rate for embedding
- padding_idx (int): padding index for embedding input layer (if specified)
-
- Returns:
- in_layer (torch.nn.*): input layer
- all_blocks (MultiSequential): all blocks for network part
- out_dim (int): dimension of last block output
- conv_subsampling_factor (int): subsampling factor in frontend CNN
-
- """
- fn_modules = []
-
- (
- input_layer,
- input_layer_odim,
- input_dropout_rate,
- input_pos_dropout_rate,
- out_dim,
- ) = check_and_prepare(net_part, blocks_arch, input_layer)
-
- pos_enc_class, self_attn_class = get_pos_enc_and_att_class(
- net_part, positional_encoding_type, self_attn_type
- )
-
- in_layer, conv_subsampling_factor = build_input_layer(
- input_layer,
- idim,
- input_layer_odim,
- pos_enc_class,
- dropout_rate_embed,
- input_dropout_rate,
- input_pos_dropout_rate,
- padding_idx,
- )
-
- for i in range(len(blocks_arch)):
- block_type = blocks_arch[i]["type"]
-
- if block_type == "tdnn":
- module = build_tdnn_block(blocks_arch[i])
- elif block_type == "transformer":
- module = build_transformer_block(
- net_part,
- blocks_arch[i],
- positionwise_layer_type,
- positionwise_activation_type,
- )
- elif block_type == "conformer":
- module = build_conformer_block(
- blocks_arch[i],
- self_attn_class,
- positionwise_layer_type,
- positionwise_activation_type,
- conv_mod_activation_type,
- )
- elif block_type == "causal-conv1d":
- module = build_causal_conv1d_block(blocks_arch[i])
-
- fn_modules.append(module)
-
- if repeat_block > 1:
- fn_modules = fn_modules * repeat_block
-
- return (
- in_layer,
- MultiSequential(*[fn() for fn in fn_modules]),
- out_dim,
- conv_subsampling_factor,
- )
diff --git a/spaces/segments/panoptic-segment-anything-api/segment_anything/segment_anything/modeling/__init__.py b/spaces/segments/panoptic-segment-anything-api/segment_anything/segment_anything/modeling/__init__.py
deleted file mode 100644
index 38e906243d898d7fc071c0fe218338c5cace3ea1..0000000000000000000000000000000000000000
--- a/spaces/segments/panoptic-segment-anything-api/segment_anything/segment_anything/modeling/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .sam import Sam
-from .image_encoder import ImageEncoderViT
-from .mask_decoder import MaskDecoder
-from .prompt_encoder import PromptEncoder
-from .transformer import TwoWayTransformer
diff --git a/spaces/shgao/EditAnything/ldm/models/diffusion/dpm_solver/dpm_solver.py b/spaces/shgao/EditAnything/ldm/models/diffusion/dpm_solver/dpm_solver.py
deleted file mode 100644
index 095e5ba3ce0b1aa7f4b3f1e2e5d8fff7cfe6dc8c..0000000000000000000000000000000000000000
--- a/spaces/shgao/EditAnything/ldm/models/diffusion/dpm_solver/dpm_solver.py
+++ /dev/null
@@ -1,1154 +0,0 @@
-import torch
-import torch.nn.functional as F
-import math
-from tqdm import tqdm
-
-
-class NoiseScheduleVP:
- def __init__(
- self,
- schedule='discrete',
- betas=None,
- alphas_cumprod=None,
- continuous_beta_0=0.1,
- continuous_beta_1=20.,
- ):
- """Create a wrapper class for the forward SDE (VP type).
- ***
- Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
- We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
- ***
- The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
- We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
- Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
- log_alpha_t = self.marginal_log_mean_coeff(t)
- sigma_t = self.marginal_std(t)
- lambda_t = self.marginal_lambda(t)
- Moreover, as lambda(t) is an invertible function, we also support its inverse function:
- t = self.inverse_lambda(lambda_t)
- ===============================================================
- We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
- 1. For discrete-time DPMs:
- For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
- t_i = (i + 1) / N
- e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
- We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
- Args:
- betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
- alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
- Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
- **Important**: Please pay special attention for the args for `alphas_cumprod`:
- The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
- q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
- Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
- alpha_{t_n} = \sqrt{\hat{alpha_n}},
- and
- log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
- 2. For continuous-time DPMs:
- We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
- schedule are the default settings in DDPM and improved-DDPM:
- Args:
- beta_min: A `float` number. The smallest beta for the linear schedule.
- beta_max: A `float` number. The largest beta for the linear schedule.
- cosine_s: A `float` number. The hyperparameter in the cosine schedule.
- cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
- T: A `float` number. The ending time of the forward process.
- ===============================================================
- Args:
- schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
- 'linear' or 'cosine' for continuous-time DPMs.
- Returns:
- A wrapper object of the forward SDE (VP type).
-
- ===============================================================
- Example:
- # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
- >>> ns = NoiseScheduleVP('discrete', betas=betas)
- # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
- >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
- # For continuous-time DPMs (VPSDE), linear schedule:
- >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
- """
-
- if schedule not in ['discrete', 'linear', 'cosine']:
- raise ValueError(
- "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
- schedule))
-
- self.schedule = schedule
- if schedule == 'discrete':
- if betas is not None:
- log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
- else:
- assert alphas_cumprod is not None
- log_alphas = 0.5 * torch.log(alphas_cumprod)
- self.total_N = len(log_alphas)
- self.T = 1.
- self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
- self.log_alpha_array = log_alphas.reshape((1, -1,))
- else:
- self.total_N = 1000
- self.beta_0 = continuous_beta_0
- self.beta_1 = continuous_beta_1
- self.cosine_s = 0.008
- self.cosine_beta_max = 999.
- self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
- 1. + self.cosine_s) / math.pi - self.cosine_s
- self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
- self.schedule = schedule
- if schedule == 'cosine':
- # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
- # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
- self.T = 0.9946
- else:
- self.T = 1.
-
- def marginal_log_mean_coeff(self, t):
- """
- Compute log(alpha_t) of a given continuous-time label t in [0, T].
- """
- if self.schedule == 'discrete':
- return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
- self.log_alpha_array.to(t.device)).reshape((-1))
- elif self.schedule == 'linear':
- return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
- elif self.schedule == 'cosine':
- log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
- log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
- return log_alpha_t
-
- def marginal_alpha(self, t):
- """
- Compute alpha_t of a given continuous-time label t in [0, T].
- """
- return torch.exp(self.marginal_log_mean_coeff(t))
-
- def marginal_std(self, t):
- """
- Compute sigma_t of a given continuous-time label t in [0, T].
- """
- return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
-
- def marginal_lambda(self, t):
- """
- Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
- """
- log_mean_coeff = self.marginal_log_mean_coeff(t)
- log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
- return log_mean_coeff - log_std
-
- def inverse_lambda(self, lamb):
- """
- Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
- """
- if self.schedule == 'linear':
- tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
- Delta = self.beta_0 ** 2 + tmp
- return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
- elif self.schedule == 'discrete':
- log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
- t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
- torch.flip(self.t_array.to(lamb.device), [1]))
- return t.reshape((-1,))
- else:
- log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
- t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
- 1. + self.cosine_s) / math.pi - self.cosine_s
- t = t_fn(log_alpha)
- return t
-
-
-def model_wrapper(
- model,
- noise_schedule,
- model_type="noise",
- model_kwargs={},
- guidance_type="uncond",
- condition=None,
- unconditional_condition=None,
- guidance_scale=1.,
- classifier_fn=None,
- classifier_kwargs={},
-):
- """Create a wrapper function for the noise prediction model.
- DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
- firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
- We support four types of the diffusion model by setting `model_type`:
- 1. "noise": noise prediction model. (Trained by predicting noise).
- 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
- 3. "v": velocity prediction model. (Trained by predicting the velocity).
- The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
- [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
- arXiv preprint arXiv:2202.00512 (2022).
- [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
- arXiv preprint arXiv:2210.02303 (2022).
-
- 4. "score": marginal score function. (Trained by denoising score matching).
- Note that the score function and the noise prediction model follows a simple relationship:
- ```
- noise(x_t, t) = -sigma_t * score(x_t, t)
- ```
- We support three types of guided sampling by DPMs by setting `guidance_type`:
- 1. "uncond": unconditional sampling by DPMs.
- The input `model` has the following format:
- ``
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
- ``
- 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
- The input `model` has the following format:
- ``
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
- ``
- The input `classifier_fn` has the following format:
- ``
- classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
- ``
- [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
- in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
- 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
- The input `model` has the following format:
- ``
- model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
- ``
- And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
- [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
- arXiv preprint arXiv:2207.12598 (2022).
-
- The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
- or continuous-time labels (i.e. epsilon to T).
- We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
- ``
- def model_fn(x, t_continuous) -> noise:
- t_input = get_model_input_time(t_continuous)
- return noise_pred(model, x, t_input, **model_kwargs)
- ``
- where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
- ===============================================================
- Args:
- model: A diffusion model with the corresponding format described above.
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
- model_type: A `str`. The parameterization type of the diffusion model.
- "noise" or "x_start" or "v" or "score".
- model_kwargs: A `dict`. A dict for the other inputs of the model function.
- guidance_type: A `str`. The type of the guidance for sampling.
- "uncond" or "classifier" or "classifier-free".
- condition: A pytorch tensor. The condition for the guided sampling.
- Only used for "classifier" or "classifier-free" guidance type.
- unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
- Only used for "classifier-free" guidance type.
- guidance_scale: A `float`. The scale for the guided sampling.
- classifier_fn: A classifier function. Only used for the classifier guidance.
- classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
- Returns:
- A noise prediction model that accepts the noised data and the continuous time as the inputs.
- """
-
- def get_model_input_time(t_continuous):
- """
- Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
- For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
- For continuous-time DPMs, we just use `t_continuous`.
- """
- if noise_schedule.schedule == 'discrete':
- return (t_continuous - 1. / noise_schedule.total_N) * 1000.
- else:
- return t_continuous
-
- def noise_pred_fn(x, t_continuous, cond=None):
- if t_continuous.reshape((-1,)).shape[0] == 1:
- t_continuous = t_continuous.expand((x.shape[0]))
- t_input = get_model_input_time(t_continuous)
- if cond is None:
- output = model(x, t_input, **model_kwargs)
- else:
- output = model(x, t_input, cond, **model_kwargs)
- if model_type == "noise":
- return output
- elif model_type == "x_start":
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
- dims = x.dim()
- return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
- elif model_type == "v":
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
- dims = x.dim()
- return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
- elif model_type == "score":
- sigma_t = noise_schedule.marginal_std(t_continuous)
- dims = x.dim()
- return -expand_dims(sigma_t, dims) * output
-
- def cond_grad_fn(x, t_input):
- """
- Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
- """
- with torch.enable_grad():
- x_in = x.detach().requires_grad_(True)
- log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
- return torch.autograd.grad(log_prob.sum(), x_in)[0]
-
- def model_fn(x, t_continuous):
- """
- The noise predicition model function that is used for DPM-Solver.
- """
- if t_continuous.reshape((-1,)).shape[0] == 1:
- t_continuous = t_continuous.expand((x.shape[0]))
- if guidance_type == "uncond":
- return noise_pred_fn(x, t_continuous)
- elif guidance_type == "classifier":
- assert classifier_fn is not None
- t_input = get_model_input_time(t_continuous)
- cond_grad = cond_grad_fn(x, t_input)
- sigma_t = noise_schedule.marginal_std(t_continuous)
- noise = noise_pred_fn(x, t_continuous)
- return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
- elif guidance_type == "classifier-free":
- if guidance_scale == 1. or unconditional_condition is None:
- return noise_pred_fn(x, t_continuous, cond=condition)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t_continuous] * 2)
- c_in = torch.cat([unconditional_condition, condition])
- noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
- return noise_uncond + guidance_scale * (noise - noise_uncond)
-
- assert model_type in ["noise", "x_start", "v"]
- assert guidance_type in ["uncond", "classifier", "classifier-free"]
- return model_fn
-
-
-class DPM_Solver:
- def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
- """Construct a DPM-Solver.
- We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
- If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
- If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
- In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
- The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
- Args:
- model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
- ``
- def model_fn(x, t_continuous):
- return noise
- ``
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
- predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
- thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
- max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
-
- [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
- """
- self.model = model_fn
- self.noise_schedule = noise_schedule
- self.predict_x0 = predict_x0
- self.thresholding = thresholding
- self.max_val = max_val
-
- def noise_prediction_fn(self, x, t):
- """
- Return the noise prediction model.
- """
- return self.model(x, t)
-
- def data_prediction_fn(self, x, t):
- """
- Return the data prediction model (with thresholding).
- """
- noise = self.noise_prediction_fn(x, t)
- dims = x.dim()
- alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
- x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
- if self.thresholding:
- p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
- s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
- s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
- x0 = torch.clamp(x0, -s, s) / s
- return x0
-
- def model_fn(self, x, t):
- """
- Convert the model to the noise prediction model or the data prediction model.
- """
- if self.predict_x0:
- return self.data_prediction_fn(x, t)
- else:
- return self.noise_prediction_fn(x, t)
-
- def get_time_steps(self, skip_type, t_T, t_0, N, device):
- """Compute the intermediate time steps for sampling.
- Args:
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
- - 'logSNR': uniform logSNR for the time steps.
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
- t_T: A `float`. The starting time of the sampling (default is T).
- t_0: A `float`. The ending time of the sampling (default is epsilon).
- N: A `int`. The total number of the spacing of the time steps.
- device: A torch device.
- Returns:
- A pytorch tensor of the time steps, with the shape (N + 1,).
- """
- if skip_type == 'logSNR':
- lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
- lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
- logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
- return self.noise_schedule.inverse_lambda(logSNR_steps)
- elif skip_type == 'time_uniform':
- return torch.linspace(t_T, t_0, N + 1).to(device)
- elif skip_type == 'time_quadratic':
- t_order = 2
- t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
- return t
- else:
- raise ValueError(
- "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
-
- def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
- """
- Get the order of each step for sampling by the singlestep DPM-Solver.
- We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
- Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
- - If order == 1:
- We take `steps` of DPM-Solver-1 (i.e. DDIM).
- - If order == 2:
- - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
- - If steps % 2 == 0, we use K steps of DPM-Solver-2.
- - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If order == 3:
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
- - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
- - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
- ============================================
- Args:
- order: A `int`. The max order for the solver (2 or 3).
- steps: A `int`. The total number of function evaluations (NFE).
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
- - 'logSNR': uniform logSNR for the time steps.
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
- t_T: A `float`. The starting time of the sampling (default is T).
- t_0: A `float`. The ending time of the sampling (default is epsilon).
- device: A torch device.
- Returns:
- orders: A list of the solver order of each step.
- """
- if order == 3:
- K = steps // 3 + 1
- if steps % 3 == 0:
- orders = [3, ] * (K - 2) + [2, 1]
- elif steps % 3 == 1:
- orders = [3, ] * (K - 1) + [1]
- else:
- orders = [3, ] * (K - 1) + [2]
- elif order == 2:
- if steps % 2 == 0:
- K = steps // 2
- orders = [2, ] * K
- else:
- K = steps // 2 + 1
- orders = [2, ] * (K - 1) + [1]
- elif order == 1:
- K = 1
- orders = [1, ] * steps
- else:
- raise ValueError("'order' must be '1' or '2' or '3'.")
- if skip_type == 'logSNR':
- # To reproduce the results in DPM-Solver paper
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
- else:
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
- torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
- return timesteps_outer, orders
-
- def denoise_to_zero_fn(self, x, s):
- """
- Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
- """
- return self.data_prediction_fn(x, s)
-
- def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
- """
- DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- model_s: A pytorch tensor. The model function evaluated at time `s`.
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
- return_intermediate: A `bool`. If true, also return the model value at time `s`.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- ns = self.noise_schedule
- dims = x.dim()
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
- h = lambda_t - lambda_s
- log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
- sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
- alpha_t = torch.exp(log_alpha_t)
-
- if self.predict_x0:
- phi_1 = torch.expm1(-h)
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- )
- if return_intermediate:
- return x_t, {'model_s': model_s}
- else:
- return x_t
- else:
- phi_1 = torch.expm1(h)
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- )
- if return_intermediate:
- return x_t, {'model_s': model_s}
- else:
- return x_t
-
- def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
- solver_type='dpm_solver'):
- """
- Singlestep solver DPM-Solver-2 from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- r1: A `float`. The hyperparameter of the second-order solver.
- model_s: A pytorch tensor. The model function evaluated at time `s`.
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
- return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if solver_type not in ['dpm_solver', 'taylor']:
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
- if r1 is None:
- r1 = 0.5
- ns = self.noise_schedule
- dims = x.dim()
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
- h = lambda_t - lambda_s
- lambda_s1 = lambda_s + r1 * h
- s1 = ns.inverse_lambda(lambda_s1)
- log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
- s1), ns.marginal_log_mean_coeff(t)
- sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
- alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
-
- if self.predict_x0:
- phi_11 = torch.expm1(-r1 * h)
- phi_1 = torch.expm1(-h)
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_s1 = (
- expand_dims(sigma_s1 / sigma_s, dims) * x
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
- model_s1 - model_s)
- )
- else:
- phi_11 = torch.expm1(r1 * h)
- phi_1 = torch.expm1(h)
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- x_s1 = (
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
- )
- if return_intermediate:
- return x_t, {'model_s': model_s, 'model_s1': model_s1}
- else:
- return x_t
-
- def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
- return_intermediate=False, solver_type='dpm_solver'):
- """
- Singlestep solver DPM-Solver-3 from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- r1: A `float`. The hyperparameter of the third-order solver.
- r2: A `float`. The hyperparameter of the third-order solver.
- model_s: A pytorch tensor. The model function evaluated at time `s`.
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
- model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
- If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if solver_type not in ['dpm_solver', 'taylor']:
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
- if r1 is None:
- r1 = 1. / 3.
- if r2 is None:
- r2 = 2. / 3.
- ns = self.noise_schedule
- dims = x.dim()
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
- h = lambda_t - lambda_s
- lambda_s1 = lambda_s + r1 * h
- lambda_s2 = lambda_s + r2 * h
- s1 = ns.inverse_lambda(lambda_s1)
- s2 = ns.inverse_lambda(lambda_s2)
- log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
- s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
- sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
- s2), ns.marginal_std(t)
- alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
-
- if self.predict_x0:
- phi_11 = torch.expm1(-r1 * h)
- phi_12 = torch.expm1(-r2 * h)
- phi_1 = torch.expm1(-h)
- phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
- phi_2 = phi_1 / h + 1.
- phi_3 = phi_2 / h - 0.5
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- if model_s1 is None:
- x_s1 = (
- expand_dims(sigma_s1 / sigma_s, dims) * x
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- x_s2 = (
- expand_dims(sigma_s2 / sigma_s, dims) * x
- - expand_dims(alpha_s2 * phi_12, dims) * model_s
- + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
- )
- model_s2 = self.model_fn(x_s2, s2)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
- )
- elif solver_type == 'taylor':
- D1_0 = (1. / r1) * (model_s1 - model_s)
- D1_1 = (1. / r2) * (model_s2 - model_s)
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
- x_t = (
- expand_dims(sigma_t / sigma_s, dims) * x
- - expand_dims(alpha_t * phi_1, dims) * model_s
- + expand_dims(alpha_t * phi_2, dims) * D1
- - expand_dims(alpha_t * phi_3, dims) * D2
- )
- else:
- phi_11 = torch.expm1(r1 * h)
- phi_12 = torch.expm1(r2 * h)
- phi_1 = torch.expm1(h)
- phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
- phi_2 = phi_1 / h - 1.
- phi_3 = phi_2 / h - 0.5
-
- if model_s is None:
- model_s = self.model_fn(x, s)
- if model_s1 is None:
- x_s1 = (
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
- )
- model_s1 = self.model_fn(x_s1, s1)
- x_s2 = (
- expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
- - expand_dims(sigma_s2 * phi_12, dims) * model_s
- - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
- )
- model_s2 = self.model_fn(x_s2, s2)
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
- )
- elif solver_type == 'taylor':
- D1_0 = (1. / r1) * (model_s1 - model_s)
- D1_1 = (1. / r2) * (model_s2 - model_s)
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
- - expand_dims(sigma_t * phi_1, dims) * model_s
- - expand_dims(sigma_t * phi_2, dims) * D1
- - expand_dims(sigma_t * phi_3, dims) * D2
- )
-
- if return_intermediate:
- return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
- else:
- return x_t
-
- def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
- """
- Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- model_prev_list: A list of pytorch tensor. The previous computed model values.
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if solver_type not in ['dpm_solver', 'taylor']:
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
- ns = self.noise_schedule
- dims = x.dim()
- model_prev_1, model_prev_0 = model_prev_list
- t_prev_1, t_prev_0 = t_prev_list
- lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
- t_prev_0), ns.marginal_lambda(t)
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
- alpha_t = torch.exp(log_alpha_t)
-
- h_0 = lambda_prev_0 - lambda_prev_1
- h = lambda_t - lambda_prev_0
- r0 = h_0 / h
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
- if self.predict_x0:
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(sigma_t / sigma_prev_0, dims) * x
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
- - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(sigma_t / sigma_prev_0, dims) * x
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
- )
- else:
- if solver_type == 'dpm_solver':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
- - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
- )
- elif solver_type == 'taylor':
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
- )
- return x_t
-
- def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
- """
- Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- model_prev_list: A list of pytorch tensor. The previous computed model values.
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- ns = self.noise_schedule
- dims = x.dim()
- model_prev_2, model_prev_1, model_prev_0 = model_prev_list
- t_prev_2, t_prev_1, t_prev_0 = t_prev_list
- lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
- t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
- alpha_t = torch.exp(log_alpha_t)
-
- h_1 = lambda_prev_1 - lambda_prev_2
- h_0 = lambda_prev_0 - lambda_prev_1
- h = lambda_t - lambda_prev_0
- r0, r1 = h_0 / h, h_1 / h
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
- D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
- D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
- D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
- if self.predict_x0:
- x_t = (
- expand_dims(sigma_t / sigma_prev_0, dims) * x
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
- - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
- )
- else:
- x_t = (
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
- - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
- )
- return x_t
-
- def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
- r2=None):
- """
- Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- r1: A `float`. The hyperparameter of the second-order or third-order solver.
- r2: A `float`. The hyperparameter of the third-order solver.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if order == 1:
- return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
- elif order == 2:
- return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
- solver_type=solver_type, r1=r1)
- elif order == 3:
- return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
- solver_type=solver_type, r1=r1, r2=r2)
- else:
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
-
- def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
- """
- Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
- Args:
- x: A pytorch tensor. The initial value at time `s`.
- model_prev_list: A list of pytorch tensor. The previous computed model values.
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_t: A pytorch tensor. The approximated solution at time `t`.
- """
- if order == 1:
- return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
- elif order == 2:
- return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
- elif order == 3:
- return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
- else:
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
-
- def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
- solver_type='dpm_solver'):
- """
- The adaptive step size solver based on singlestep DPM-Solver.
- Args:
- x: A pytorch tensor. The initial value at time `t_T`.
- order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
- t_T: A `float`. The starting time of the sampling (default is T).
- t_0: A `float`. The ending time of the sampling (default is epsilon).
- h_init: A `float`. The initial step size (for logSNR).
- atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
- rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
- theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
- t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
- current time and `t_0` is less than `t_err`. The default setting is 1e-5.
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
- Returns:
- x_0: A pytorch tensor. The approximated solution at time `t_0`.
- [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
- """
- ns = self.noise_schedule
- s = t_T * torch.ones((x.shape[0],)).to(x)
- lambda_s = ns.marginal_lambda(s)
- lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
- h = h_init * torch.ones_like(s).to(x)
- x_prev = x
- nfe = 0
- if order == 2:
- r1 = 0.5
- lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
- solver_type=solver_type,
- **kwargs)
- elif order == 3:
- r1, r2 = 1. / 3., 2. / 3.
- lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
- return_intermediate=True,
- solver_type=solver_type)
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
- solver_type=solver_type,
- **kwargs)
- else:
- raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
- while torch.abs((s - t_0)).mean() > t_err:
- t = ns.inverse_lambda(lambda_s + h)
- x_lower, lower_noise_kwargs = lower_update(x, s, t)
- x_higher = higher_update(x, s, t, **lower_noise_kwargs)
- delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
- norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
- E = norm_fn((x_higher - x_lower) / delta).max()
- if torch.all(E <= 1.):
- x = x_higher
- s = t
- x_prev = x_lower
- lambda_s = ns.marginal_lambda(s)
- h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
- nfe += order
- print('adaptive solver nfe', nfe)
- return x
-
- def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
- method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
- atol=0.0078, rtol=0.05,
- ):
- """
- Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
- =====================================================
- We support the following algorithms for both noise prediction model and data prediction model:
- - 'singlestep':
- Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
- We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
- The total number of function evaluations (NFE) == `steps`.
- Given a fixed NFE == `steps`, the sampling procedure is:
- - If `order` == 1:
- - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
- - If `order` == 2:
- - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
- - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
- - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If `order` == 3:
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
- - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
- - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
- - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
- - 'multistep':
- Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
- We initialize the first `order` values by lower order multistep solvers.
- Given a fixed NFE == `steps`, the sampling procedure is:
- Denote K = steps.
- - If `order` == 1:
- - We use K steps of DPM-Solver-1 (i.e. DDIM).
- - If `order` == 2:
- - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
- - If `order` == 3:
- - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
- - 'singlestep_fixed':
- Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
- We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
- - 'adaptive':
- Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
- We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
- You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
- (NFE) and the sample quality.
- - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
- - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
- =====================================================
- Some advices for choosing the algorithm:
- - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
- Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
- e.g.
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
- skip_type='time_uniform', method='singlestep')
- - For **guided sampling with large guidance scale** by DPMs:
- Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
- e.g.
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
- skip_type='time_uniform', method='multistep')
- We support three types of `skip_type`:
- - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
- - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
- - 'time_quadratic': quadratic time for the time steps.
- =====================================================
- Args:
- x: A pytorch tensor. The initial value at time `t_start`
- e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
- steps: A `int`. The total number of function evaluations (NFE).
- t_start: A `float`. The starting time of the sampling.
- If `T` is None, we use self.noise_schedule.T (default is 1.0).
- t_end: A `float`. The ending time of the sampling.
- If `t_end` is None, we use 1. / self.noise_schedule.total_N.
- e.g. if total_N == 1000, we have `t_end` == 1e-3.
- For discrete-time DPMs:
- - We recommend `t_end` == 1. / self.noise_schedule.total_N.
- For continuous-time DPMs:
- - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
- order: A `int`. The order of DPM-Solver.
- skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
- method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
- denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
- Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
- This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
- score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
- for diffusion models sampling by diffusion SDEs for low-resolutional images
- (such as CIFAR-10). However, we observed that such trick does not matter for
- high-resolutional images. As it needs an additional NFE, we do not recommend
- it for high-resolutional images.
- lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
- Only valid for `method=multistep` and `steps < 15`. We empirically find that
- this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
- (especially for steps <= 10). So we recommend to set it to be `True`.
- solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
- atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
- rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
- Returns:
- x_end: A pytorch tensor. The approximated solution at time `t_end`.
- """
- t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
- t_T = self.noise_schedule.T if t_start is None else t_start
- device = x.device
- if method == 'adaptive':
- with torch.no_grad():
- x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
- solver_type=solver_type)
- elif method == 'multistep':
- assert steps >= order
- timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
- assert timesteps.shape[0] - 1 == steps
- with torch.no_grad():
- vec_t = timesteps[0].expand((x.shape[0]))
- model_prev_list = [self.model_fn(x, vec_t)]
- t_prev_list = [vec_t]
- # Init the first `order` values by lower order multistep DPM-Solver.
- for init_order in tqdm(range(1, order), desc="DPM init order"):
- vec_t = timesteps[init_order].expand(x.shape[0])
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
- solver_type=solver_type)
- model_prev_list.append(self.model_fn(x, vec_t))
- t_prev_list.append(vec_t)
- # Compute the remaining values by `order`-th order multistep DPM-Solver.
- for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
- vec_t = timesteps[step].expand(x.shape[0])
- if lower_order_final and steps < 15:
- step_order = min(order, steps + 1 - step)
- else:
- step_order = order
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
- solver_type=solver_type)
- for i in range(order - 1):
- t_prev_list[i] = t_prev_list[i + 1]
- model_prev_list[i] = model_prev_list[i + 1]
- t_prev_list[-1] = vec_t
- # We do not need to evaluate the final model value.
- if step < steps:
- model_prev_list[-1] = self.model_fn(x, vec_t)
- elif method in ['singlestep', 'singlestep_fixed']:
- if method == 'singlestep':
- timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
- skip_type=skip_type,
- t_T=t_T, t_0=t_0,
- device=device)
- elif method == 'singlestep_fixed':
- K = steps // order
- orders = [order, ] * K
- timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
- for i, order in enumerate(orders):
- t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
- timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
- N=order, device=device)
- lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
- vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
- h = lambda_inner[-1] - lambda_inner[0]
- r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
- r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
- x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
- if denoise_to_zero:
- x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
- return x
-
-
-#############################################################
-# other utility functions
-#############################################################
-
-def interpolate_fn(x, xp, yp):
- """
- A piecewise linear function y = f(x), using xp and yp as keypoints.
- We implement f(x) in a differentiable way (i.e. applicable for autograd).
- The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
- Args:
- x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
- xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
- yp: PyTorch tensor with shape [C, K].
- Returns:
- The function values f(x), with shape [N, C].
- """
- N, K = x.shape[0], xp.shape[1]
- all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
- sorted_all_x, x_indices = torch.sort(all_x, dim=2)
- x_idx = torch.argmin(x_indices, dim=2)
- cand_start_idx = x_idx - 1
- start_idx = torch.where(
- torch.eq(x_idx, 0),
- torch.tensor(1, device=x.device),
- torch.where(
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
- ),
- )
- end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
- start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
- end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
- start_idx2 = torch.where(
- torch.eq(x_idx, 0),
- torch.tensor(0, device=x.device),
- torch.where(
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
- ),
- )
- y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
- start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
- end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
- cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
- return cand
-
-
-def expand_dims(v, dims):
- """
- Expand the tensor `v` to the dim `dims`.
- Args:
- `v`: a PyTorch tensor with shape [N].
- `dim`: a `int`.
- Returns:
- a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
- """
- return v[(...,) + (None,) * (dims - 1)]
\ No newline at end of file
diff --git a/spaces/shibing624/ChatPDF/modules/models/modeling_moss.py b/spaces/shibing624/ChatPDF/modules/models/modeling_moss.py
deleted file mode 100644
index b7adea5bca857f7fdd6399dde7ce359f8f8cecfe..0000000000000000000000000000000000000000
--- a/spaces/shibing624/ChatPDF/modules/models/modeling_moss.py
+++ /dev/null
@@ -1,711 +0,0 @@
-""" PyTorch Moss model."""
-
-from typing import Optional, Tuple, Union
-
-import torch
-import torch.utils.checkpoint
-from torch import nn
-from torch.nn import CrossEntropyLoss
-
-from transformers.activations import ACT2FN
-from transformers.modeling_utils import PreTrainedModel
-from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
-from transformers.utils import (
- add_code_sample_docstrings,
- add_start_docstrings,
- add_start_docstrings_to_model_forward,
- logging
-)
-
-from .configuration_moss import MossConfig
-
-
-logger = logging.get_logger(__name__)
-
-_CHECKPOINT_FOR_DOC = "fnlp/moss-moon-003-base"
-_CONFIG_FOR_DOC = "MossConfig"
-
-
-MOSS_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "fnlp/moss-moon-003-base",
- "fnlp/moss-moon-003-sft",
- "fnlp/moss-moon-003-sft-plugin",
-]
-
-
-# Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
-def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
- inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
- sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float()
- return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
-
-
-# Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
-def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
- x1 = x[:, :, :, ::2]
- x2 = x[:, :, :, 1::2]
- x = torch.stack((-x2, x1), dim=-1)
- return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
-
-
-# Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
-def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
- sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
- cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
- return (tensor * cos) + (rotate_every_two(tensor) * sin)
-
-
-class MossAttention(nn.Module):
- def __init__(self, config):
- super().__init__()
-
- max_positions = config.max_position_embeddings
- self.register_buffer(
- "causal_mask",
- torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
- 1, 1, max_positions, max_positions
- ),
- )
-
- self.attn_dropout = nn.Dropout(config.attn_pdrop)
- self.resid_dropout = nn.Dropout(config.resid_pdrop)
-
- self.embed_dim = config.hidden_size
- self.num_attention_heads = config.num_attention_heads
- self.head_dim = self.embed_dim // self.num_attention_heads
- if self.head_dim * self.num_attention_heads != self.embed_dim:
- raise ValueError(
- f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
- f" `num_attention_heads`: {self.num_attention_heads})."
- )
- self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
- self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
-
- self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
- self.rotary_dim = config.rotary_dim
- pos_embd_dim = self.rotary_dim or self.embed_dim
- self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
-
- def _split_heads(self, x, n_head, dim_head, mp_num):
- reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
- reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
- return reshaped
-
- def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
- """
- Merges attn_head_size dim and num_attn_heads dim into n_ctx
- """
- if len(tensor.shape) == 5:
- tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
- elif len(tensor.shape) == 4:
- tensor = tensor.permute(0, 2, 1, 3).contiguous()
- else:
- raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
- new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
- return tensor.view(new_shape)
-
- def _attn(
- self,
- query,
- key,
- value,
- attention_mask=None,
- head_mask=None,
- ):
- # compute causal mask from causal mask buffer
- query_length, key_length = query.size(-2), key.size(-2)
- causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
-
- # Keep the attention weights computation in fp32 to avoid overflow issues
- query = query.to(torch.float32)
- key = key.to(torch.float32)
-
- attn_weights = torch.matmul(query, key.transpose(-1, -2))
-
- attn_weights = attn_weights / self.scale_attn
- mask_value = torch.finfo(attn_weights.dtype).min
- # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
- # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
- mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
- attn_weights = torch.where(causal_mask, attn_weights, mask_value)
-
- if attention_mask is not None:
- # Apply the attention mask
- attn_weights = attn_weights + attention_mask
-
- attn_weights = nn.Softmax(dim=-1)(attn_weights)
- attn_weights = attn_weights.to(value.dtype)
- attn_weights = self.attn_dropout(attn_weights)
-
- # Mask heads if we want to
- if head_mask is not None:
- attn_weights = attn_weights * head_mask
-
- attn_output = torch.matmul(attn_weights, value)
-
- return attn_output, attn_weights
-
- def forward(
- self,
- hidden_states: Optional[torch.FloatTensor],
- layer_past: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- ) -> Union[
- Tuple[torch.Tensor, Tuple[torch.Tensor]],
- Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
- ]:
- qkv = self.qkv_proj(hidden_states)
- # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
- mp_num = 4
- qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
-
- local_dim = self.head_dim * self.num_attention_heads // mp_num
- query, value, key = torch.split(qkv_split, local_dim, dim=-1)
- query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
- key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
-
- value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
- value = value.permute(0, 2, 1, 3)
-
- embed_positions = self.embed_positions
- if embed_positions.device != position_ids.device:
- embed_positions = embed_positions.to(position_ids.device)
- self.embed_positions = embed_positions
-
- sincos = embed_positions[position_ids]
- sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
-
- if self.rotary_dim is not None:
- k_rot = key[:, :, :, : self.rotary_dim]
- k_pass = key[:, :, :, self.rotary_dim :]
-
- q_rot = query[:, :, :, : self.rotary_dim]
- q_pass = query[:, :, :, self.rotary_dim :]
-
- k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
- q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
-
- key = torch.cat([k_rot, k_pass], dim=-1)
- query = torch.cat([q_rot, q_pass], dim=-1)
- else:
- key = apply_rotary_pos_emb(key, sin, cos)
- query = apply_rotary_pos_emb(query, sin, cos)
-
- key = key.permute(0, 2, 1, 3)
- query = query.permute(0, 2, 1, 3)
-
- if layer_past is not None:
- past_key = layer_past[0]
- past_value = layer_past[1]
- key = torch.cat((past_key, key), dim=-2)
- value = torch.cat((past_value, value), dim=-2)
-
- if use_cache is True:
- present = (key, value)
- else:
- present = None
-
- # compute self-attention: V x Softmax(QK^T)
- attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
-
- attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
- attn_output = self.out_proj(attn_output)
- attn_output = self.resid_dropout(attn_output)
-
- outputs = (attn_output, present)
- if output_attentions:
- outputs += (attn_weights,)
-
- return outputs # a, present, (attentions)
-
-
-# Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->Moss
-class MossMLP(nn.Module):
- def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
- super().__init__()
- embed_dim = config.n_embd
-
- self.fc_in = nn.Linear(embed_dim, intermediate_size)
- self.fc_out = nn.Linear(intermediate_size, embed_dim)
-
- self.act = ACT2FN[config.activation_function]
- self.dropout = nn.Dropout(config.resid_pdrop)
-
- def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
- hidden_states = self.fc_in(hidden_states)
- hidden_states = self.act(hidden_states)
- hidden_states = self.fc_out(hidden_states)
- hidden_states = self.dropout(hidden_states)
- return hidden_states
-
-
-# Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->Moss
-class MossBlock(nn.Module):
- def __init__(self, config):
- super().__init__()
- inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
- self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
- self.attn = MossAttention(config)
- self.mlp = MossMLP(inner_dim, config)
-
- def forward(
- self,
- hidden_states: Optional[torch.FloatTensor],
- layer_past: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
- residual = hidden_states
- hidden_states = self.ln_1(hidden_states)
- attn_outputs = self.attn(
- hidden_states=hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask,
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
- attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
- outputs = attn_outputs[1:]
-
- feed_forward_hidden_states = self.mlp(hidden_states)
- hidden_states = attn_output + feed_forward_hidden_states + residual
-
- if use_cache:
- outputs = (hidden_states,) + outputs
- else:
- outputs = (hidden_states,) + outputs[1:]
-
- return outputs # hidden_states, present, (attentions)
-
-
-class MossPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = MossConfig
- base_model_prefix = "transformer"
- supports_gradient_checkpointing = True
- _no_split_modules = ["MossBlock"]
-
- def __init__(self, *inputs, **kwargs):
- super().__init__(*inputs, **kwargs)
-
- def _init_weights(self, module):
- """Initialize the weights."""
- if isinstance(module, (nn.Linear,)):
- # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, MossModel):
- module.gradient_checkpointing = value
-
-
-MOSS_START_DOCSTRING = r"""
- This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
- it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
- behavior.
-
- Parameters:
- config ([`MossConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
-"""
-
-MOSS_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `({0})`):
- Indices of input sequence tokens in the vocabulary.
-
- Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
-
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
-
- [What are token type IDs?](../glossary#token-type-ids)
- position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.n_positions - 1]`.
-
- [What are position IDs?](../glossary#position-ids)
- head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
-
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
-
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
- model's internal embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-
-@add_start_docstrings(
- "The bare Moss Model transformer outputting raw hidden-states without any specific head on top.",
- MOSS_START_DOCSTRING,
-)
-class MossModel(MossPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
-
- self.embed_dim = config.n_embd
- self.vocab_size = config.vocab_size
- self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
- self.drop = nn.Dropout(config.embd_pdrop)
- self.h = nn.ModuleList([MossBlock(config) for _ in range(config.n_layer)])
- self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
- self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
-
- self.gradient_checkpointing = False
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.wte
-
- def set_input_embeddings(self, new_embeddings):
- self.wte = new_embeddings
-
- @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=BaseModelOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- )
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPast]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- input_ids = input_ids.view(-1, input_shape[-1])
- batch_size = input_ids.shape[0]
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- batch_size = inputs_embeds.shape[0]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- if token_type_ids is not None:
- token_type_ids = token_type_ids.view(-1, input_shape[-1])
-
- if position_ids is not None:
- position_ids = position_ids.view(-1, input_shape[-1]).long()
-
- if past_key_values is None:
- past_length = 0
- past_key_values = tuple([None] * len(self.h))
- else:
- past_length = past_key_values[0][0].size(-2)
-
- if position_ids is None:
- position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
- position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
-
- # Attention mask.
- if attention_mask is not None:
- if batch_size <= 0:
- raise ValueError("batch_size has to be defined and > 0")
- attention_mask = attention_mask.view(batch_size, -1)
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- attention_mask = attention_mask[:, None, None, :]
-
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and the dtype's smallest value for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
- attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x num_attention_heads x N x N
- # head_mask has shape n_layer x batch x num_attention_heads x N x N
- head_mask = self.get_head_mask(head_mask, self.config.n_layer)
-
- if inputs_embeds is None:
- inputs_embeds = self.wte(input_ids)
-
- hidden_states = inputs_embeds
-
- if token_type_ids is not None:
- token_type_embeds = self.wte(token_type_ids)
- hidden_states = hidden_states + token_type_embeds
-
- hidden_states = self.drop(hidden_states)
-
- output_shape = input_shape + (hidden_states.size(-1),)
-
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning_once(
- "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
- "`use_cache=False`..."
- )
- use_cache = False
-
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
- for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if self.gradient_checkpointing and self.training:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- # None for past_key_value
- return module(*inputs, use_cache, output_attentions)
-
- return custom_forward
-
- outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(block),
- hidden_states,
- None,
- attention_mask,
- position_ids,
- head_mask[i],
- )
- else:
- outputs = block(
- hidden_states=hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask[i],
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
-
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],)
-
- if output_attentions:
- all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
-
- hidden_states = self.ln_f(hidden_states)
-
- hidden_states = hidden_states.view(output_shape)
- # Add last hidden state
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if not return_dict:
- return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
-
- return BaseModelOutputWithPast(
- last_hidden_state=hidden_states,
- past_key_values=presents,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
-
-
-@add_start_docstrings(
- """
- The Moss Model transformer with a language modeling head on top.
- """,
- MOSS_START_DOCSTRING,
-)
-class MossForCausalLM(MossPreTrainedModel):
- _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"]
-
- def __init__(self, config):
- super().__init__(config)
- self.transformer = MossModel(config)
- self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.lm_head
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
-
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
- token_type_ids = kwargs.get("token_type_ids", None)
- # only last token for inputs_ids if past is defined in kwargs
- if past_key_values:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if token_type_ids is not None:
- token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
-
- attention_mask = kwargs.get("attention_mask", None)
- position_ids = kwargs.get("position_ids", None)
-
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids.masked_fill_(attention_mask == 0, 1)
- if past_key_values:
- position_ids = position_ids[:, -1].unsqueeze(-1)
-
- return {
- "input_ids": input_ids,
- "past_key_values": past_key_values,
- "use_cache": kwargs.get("use_cache"),
- "position_ids": position_ids,
- "attention_mask": attention_mask,
- "token_type_ids": token_type_ids,
- }
-
- @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=CausalLMOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- )
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, CausalLMOutputWithPast]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
- `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
- are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
-
- # make sure sampling in fp16 works correctly and
- # compute loss in fp32 to match with mesh-tf version
- # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
- lm_logits = self.lm_head(hidden_states).to(torch.float32)
-
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- # Flatten the tokens
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
-
- loss = loss.to(hidden_states.dtype)
-
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
-
- return CausalLMOutputWithPast(
- loss=loss,
- logits=lm_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
-
- @staticmethod
- def _reorder_cache(
- past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
- ) -> Tuple[Tuple[torch.Tensor]]:
- """
- This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
- [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
- beam_idx at every generation step.
- """
- return tuple(
- tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
- for layer_past in past_key_values
- )
diff --git a/spaces/shivammittal274/LLM_CA/query_data.py b/spaces/shivammittal274/LLM_CA/query_data.py
deleted file mode 100644
index 4aa3fcd741ac3dc4076417d58e13bcd1ed510035..0000000000000000000000000000000000000000
--- a/spaces/shivammittal274/LLM_CA/query_data.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from langchain.prompts.prompt import PromptTemplate
-from langchain.llms import OpenAI
-from langchain.chains import ConversationalRetrievalChain, RetrievalQAWithSourcesChain
-
-
-_template = """You are a Ca in India, you know all tax rules HRA, 80C, 80D, 80CCD1, 80CCD2, 80D, 80DDB. Try to answer the question from relevant data, otherwise you can also use up your mind.
-Chat History:
-{chat_history}
-Follow Up Input: {question}
-Standalone question:"""
-CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
-
-template = """You are an AI assistant for answering questions about the most recent state of the union address.
-You are given the following extracted parts of a long document and a question. Provide a conversational answer.
-If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
-If the question is not about the most recent state of the union, politely inform them that you are tuned to only answer questions about the most recent state of the union.
-Question: {question}
-=========
-{context}
-=========
-Answer in Markdown:"""
-QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
-
-
-def get_chain(llm, vectorstore):
- vectorstore = vectorstore.as_retriever(search_kwargs={'k': 4})
-
- qa_chain = ConversationalRetrievalChain.from_llm(
- llm,
- vectorstore,
- # qa_prompt=QA_PROMPT,
- # condense_question_prompt=CONDENSE_QUESTION_PROMPT,
- )
- # retriever = vectorstore.as_retriever(
- # search_type='similarity', search_kwargs={'k': 1})
-
- # qa_chain = RetrievalQAWithSourcesChain.from_chain_type(
- # llm, chain_type="stuff", retriever=retriever)
- return qa_chain
-
-
-
-
diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/registry.py b/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/registry.py
deleted file mode 100644
index 655753b3b9cbd0cfe73fe93a77cf1fcc3db6d827..0000000000000000000000000000000000000000
--- a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/registry.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Modified from: https://github.com/facebookresearch/fvcore/blob/master/fvcore/common/registry.py # noqa: E501
-
-
-class Registry():
- """
- The registry that provides name -> object mapping, to support third-party
- users' custom modules.
-
- To create a registry (e.g. a backbone registry):
-
- .. code-block:: python
-
- BACKBONE_REGISTRY = Registry('BACKBONE')
-
- To register an object:
-
- .. code-block:: python
-
- @BACKBONE_REGISTRY.register()
- class MyBackbone():
- ...
-
- Or:
-
- .. code-block:: python
-
- BACKBONE_REGISTRY.register(MyBackbone)
- """
-
- def __init__(self, name):
- """
- Args:
- name (str): the name of this registry
- """
- self._name = name
- self._obj_map = {}
-
- def _do_register(self, name, obj):
- assert (name not in self._obj_map), (f"An object named '{name}' was already registered "
- f"in '{self._name}' registry!")
- self._obj_map[name] = obj
-
- def register(self, obj=None):
- """
- Register the given object under the the name `obj.__name__`.
- Can be used as either a decorator or not.
- See docstring of this class for usage.
- """
- if obj is None:
- # used as a decorator
- def deco(func_or_class):
- name = func_or_class.__name__
- self._do_register(name, func_or_class)
- return func_or_class
-
- return deco
-
- # used as a function call
- name = obj.__name__
- self._do_register(name, obj)
-
- def get(self, name):
- ret = self._obj_map.get(name)
- if ret is None:
- raise KeyError(f"No object named '{name}' found in '{self._name}' registry!")
- return ret
-
- def __contains__(self, name):
- return name in self._obj_map
-
- def __iter__(self):
- return iter(self._obj_map.items())
-
- def keys(self):
- return self._obj_map.keys()
-
-
-DATASET_REGISTRY = Registry('dataset')
-ARCH_REGISTRY = Registry('arch')
-MODEL_REGISTRY = Registry('model')
-LOSS_REGISTRY = Registry('loss')
-METRIC_REGISTRY = Registry('metric')
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/Revalver-4-Windows-Crack-LINK-132.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/Revalver-4-Windows-Crack-LINK-132.md
deleted file mode 100644
index 84a043b762bf2be141e57996bbdaa35874c0e816..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/Revalver-4-Windows-Crack-LINK-132.md
+++ /dev/null
@@ -1,78 +0,0 @@
-## Revalver 4 Windows Crack 132
-
-
-
-
-
-
-
-
-
-**CLICK HERE › [https://kneedacexbrew.blogspot.com/?d=2txiFe](https://kneedacexbrew.blogspot.com/?d=2txiFe)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# How to Crack ReValver 4 for Windows
-
-
-
-ReValver 4 is a revolutionary amp modeling software that simulates guitar amplifiers, stompboxes and effects at the component level, resulting in unmatched realism, dynamics and tone. ReValver 4 also features advanced cabinet modeling, instrument modeling, 3rd party plugin hosting, MIDI mapping, amp cloning and more.
-
-
-
-If you want to crack ReValver 4 for Windows and enjoy its full features for free, you will need to follow these steps:
-
-
-
-1. Download ReValver 4 from the official website [here](https://www.audiomediaresearch.com/) and install it on your computer.
-
-2. Download the crack file from [here](https://sway.office.com/CWYkz44Aku0oCPCL) and extract it to a folder.
-
-3. Copy the file ReValver4.dll from the crack folder and paste it into the installation directory of ReValver 4, usually C:\Program Files\Audio Media Research\ReValver 4.
-
-4. Run ReValver 4 as administrator and enjoy!
-
-
-
-Note: This crack is for version 4.160830 of ReValver 4. If you have a different version, you may need to find a different crack file or update your software.
-
-
-
-Disclaimer: This article is for educational purposes only. We do not condone piracy or illegal use of software. Please support the developers by purchasing ReValver 4 from their website if you like it.
-
-
-
-ReValver 4 is not just a simple amp simulator. It allows you to customize and tweak every aspect of your tone, from the components of the amp to the microphone placement and the room acoustics. You can also create your own amp models by using the ACT (Audio Cloning Technology) module, which analyzes the sound of any amp and reproduces it in ReValver 4.
-
-
-
-ReValver 4 also has a huge collection of stompboxes and effects that you can use to enhance your sound. You can choose from classic pedals like overdrive, distortion, chorus, flanger, delay and reverb, or experiment with more exotic effects like pitch shifter, harmonizer, ring modulator and looper. You can also load your own impulse responses to create custom cabinet and speaker simulations.
-
-
-
-ReValver 4 is not only a great tool for recording and producing guitar tracks, but also a powerful live performance solution. You can use the GIG mode to load up to 8 presets into memory and switch between them seamlessly with a MIDI controller. You can also use ReValver 4 as a plugin in your favorite DAW or host software, and integrate it with other instruments and effects.
-
-
-
-ReValver 4 is the ultimate guitar amp modeling software for any guitarist who wants to achieve realistic and versatile tones. Whether you are a beginner or a professional, ReValver 4 can help you create the sound of your dreams. You can explore hundreds of presets and modules, or create your own from scratch. You can also expand ReValver 4 with more content from the Amp Store, where you can purchase new amps, stompboxes, effects and ACT content.
-
-
-
-If you are looking for a software that can emulate any guitar amp and effect with stunning accuracy and flexibility, ReValver 4 is the perfect choice for you. Download it today and experience the power of component level modeling.
-
- 1b8d091108
-
-
-
-
-
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Build and Launch Your Own Rockets in Spaceflight Simulator - Download Now.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Build and Launch Your Own Rockets in Spaceflight Simulator - Download Now.md
deleted file mode 100644
index 921806aa5e916ca8e3a2e11a7b1dc9fc23f4a2c1..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Build and Launch Your Own Rockets in Spaceflight Simulator - Download Now.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
Download Spaceflight Simulator: A Game That Lets You Explore the Solar System
-
Have you ever dreamed of building your own rocket and sending it to space? Have you ever wondered what it would be like to land on the Moon, Mars, or other planets? Have you ever wanted to experience the thrill of orbital mechanics and gravity assists? If you answered yes to any of these questions, then you should download Spaceflight Simulator, a game that lets you explore the solar system in a realistic and fun way.
Spaceflight Simulator is a game that simulates spaceflight using accurate physics and orbital mechanics. It is developed by Stefo Mai Morojna, an independent game developer from Romania. The game was first released in 2017 for Android devices, and later expanded to PC and online platforms. The game has received overwhelmingly positive reviews from players and critics alike, who praised its simplicity, realism, and creativity.
-
A game about building and launching rockets
-
In Spaceflight Simulator, you can build your own rocket from various parts, such as engines, fuel tanks, capsules, landing legs, solar panels, and more. You can design any rocket you want, from simple sounding rockets to complex interplanetary spacecraft. You can also customize the appearance of your rocket with different colors and skins.
-
A game with realistic physics and orbital mechanics
-
Once you have built your rocket, you can launch it and control it in flight. The game uses realistic physics to simulate the behavior of your rocket and its interaction with the environment. You can adjust the thrust, direction, and staging of your rocket as you fly. You can also see the effects of gravity, drag, atmospheric pressure, and temperature on your rocket.
-
The game also uses realistic orbital mechanics to simulate the motion of your rocket around different celestial bodies. You can see the orbit of your rocket on a map view, where you can also plan your trajectory and perform maneuvers. You can use gravity assists, orbital transfers, rendezvous, docking, and other techniques to reach your destination. You can also see the orbital parameters of your rocket, such as apoapsis, periapsis, inclination, eccentricity, and more.
-
How to download spaceflight simulator on PC
-Spaceflight simulator free download for Android
-Spaceflight simulator mod apk download
-Spaceflight simulator steam download
-Download spaceflight simulator latest version
-Spaceflight simulator download for Windows 10
-Spaceflight simulator download for Mac
-Spaceflight simulator download for iOS
-Spaceflight simulator download size
-Spaceflight simulator download link
-Download spaceflight simulator expansion pack
-Download spaceflight simulator full version
-Download spaceflight simulator game for free
-Download spaceflight simulator hack
-Download spaceflight simulator mod menu
-Download spaceflight simulator online
-Download spaceflight simulator offline
-Download spaceflight simulator premium apk
-Download spaceflight simulator pro version
-Download spaceflight simulator rocket blueprints
-Download spaceflight simulator update
-Download spaceflight simulator unlimited fuel mod
-Download spaceflight simulator with all planets unlocked
-Download spaceflight simulator with realistic graphics mod
-Download spaceflight simulator with sandbox mode
-Best website to download spaceflight simulator
-Can I download spaceflight simulator on Chromebook
-Can you download spaceflight simulator on PS4
-How do I download spaceflight simulator on laptop
-How long does it take to download spaceflight simulator
-How much does it cost to download spaceflight simulator
-Is it safe to download spaceflight simulator from third-party sources
-What are the system requirements to download spaceflight simulator
-What is the rating of spaceflight simulator on Google Play Store
-Where can I download spaceflight simulator for Linux
-Where to download spaceflight simulator mods
-Why can't I download spaceflight simulator on my device
-Spaceflight Simulator - Build and launch your own rocket game download
-Spaceflight Simulator - Explore the solar system game download
-Spaceflight Simulator - Learn about rocket physics game download
-Spaceflight Simulator - Recreate historic missions game download
-Spaceflight Simulator - Sandbox mode game download
-Spaceflight Simulator - Simulation game download
-Spaceflight Simulator - Steam game download
-Spaceflight Simulator - Unlimited fuel mod game download
-
A game with a variety of planets and moons to visit
-
The game features a realistically scaled solar system with several planets and moons to explore. You can visit Mercury, Venus, Earth, Moon, Mars, Phobos, Deimos, and more. Each world has its own characteristics, such as size, gravity, atmosphere, terrain, rotation, and tilt. You can land on these worlds and deploy payloads, such as rovers, satellites, flags, or anything else you can imagine.
-
How to download Spaceflight Simulator?
-
Spaceflight Simulator is available for download on various platforms. Here are some of the options:
-
Download from Steam for PC
-
If you want to play Spaceflight Simulator on your PC, you can download it from Steam for $9.99 USD. The PC version of the game offers improved graphics, performance, controls, and features compared to the mobile version. You can also access Steam Workshop to download user-created rockets and mods. To download Spaceflight Simulator from Steam, click [here](^1^).
-
Download from Google Play for Android
-
If you want to play Spaceflight Simulator on your Android device, you can download it from Google Play for free. The Android version of the game offers a simple and intuitive interface, as well as touch-screen controls. You can also purchase in-app items to unlock more parts, skins, and planets. To download Spaceflight Simulator from Google Play, click [here].
-
Download from Lagged for online play
-
If you want to play Spaceflight Simulator online, you can download it from Lagged for free. Lagged is a website that hosts thousands of free games that you can play on any device with a web browser. The online version of the game offers the same features as the mobile version, but with a smaller screen size. You can also save your progress and share your rockets with other players. To download Spaceflight Simulator from Lagged, click [here].
-
How to play Spaceflight Simulator?
-
Spaceflight Simulator is easy to learn but hard to master. Here are some tips on how to play the game:
-
Build your own rocket from parts
-
To build your rocket, you need to select the parts from the menu and drag them to the screen. You can rotate, resize, and snap the parts together to create your desired design. You can also use the mirror tool to duplicate parts symmetrically. You can save your rocket and load it later, or export it as a file and share it with others.
-
Launch your rocket and control it in flight
-
To launch your rocket, you need to press the launch button and then the start button. You can control your rocket by using the buttons on the screen or by tilting your device. You can adjust the throttle, direction, and staging of your rocket as you fly. You can also use the map view to see your orbit and trajectory.
-
Plan your trajectory and perform maneuvers
-
To plan your trajectory, you need to use the maneuver tool on the map view. You can drag the handles on your orbit to create a maneuver node, which shows you how much delta-v (change in velocity) you need to perform the maneuver. You can also see the time and direction of the maneuver burn. To perform the maneuver, you need to align your rocket with the blue marker on the navball and fire your engines at the right time.
-
Land on different worlds and deploy payloads
-
To land on different worlds, you need to slow down your rocket and aim for a flat spot. You can use parachutes, retro rockets, or aerobraking to reduce your speed. You can also use landing legs or wheels to cushion your landing. To deploy payloads, you need to detach them from your rocket and activate them. You can use rovers, satellites, flags, or anything else you can think of.
-
Why play Spaceflight Simulator?
-
Spaceflight Simulator is more than just a game. It is also a learning tool, a challenge, and a creative outlet. Here are some reasons why you should play Spaceflight Simulator:
-
Learn about rocket science and space exploration
-
Spaceflight Simulator teaches you about the basics of rocket science and space exploration in a fun and interactive way. You can learn about how rockets work, how orbits work, how gravity works, and how to navigate in space. You can also learn about the history and future of space exploration, as well as the current missions and projects of various space agencies.
-
Challenge yourself with realistic scenarios and missions
-
Spaceflight Simulator challenges you with realistic scenarios and missions that test your skills and knowledge. You can try to recreate historical missions, such as Apollo 11 or SpaceX Falcon 9. You can also try to achieve new goals, such as landing on Mars or building a space station. You can also compete with other players and compare your rockets and achievements.
-
Express your creativity and imagination
-
Spaceflight Simulator allows you to express your creativity and imagination by building any rocket you want. You can design rockets that are realistic or fantastical, simple or complex, elegant or absurd. You can also create your own scenarios and stories by using different payloads and destinations. You can also share your creations with other players and see what they have made.
-
Conclusion
-
Spaceflight Simulator is a game that lets you explore the solar system in a realistic and fun way. You can build your own rocket from parts, launch it and control it in flight, plan your trajectory and perform maneuvers, land on different worlds and deploy payloads, learn about rocket science and space exploration, challenge yourself with realistic scenarios and missions, and express your creativity and imagination. If you are interested in spaceflight or just looking for a fun game to play, you should download Spaceflight Simulator today. You can download it from Steam for PC, Google Play for Android, or Lagged for online play. Have fun and enjoy the game!
-
FAQs
-
Here are some frequently asked questions about Spaceflight Simulator:
-
Q: How realistic is Spaceflight Simulator?
-
A: Spaceflight Simulator is very realistic in terms of physics and orbital mechanics. The game uses the same equations and formulas that real rockets and spacecraft use. The game also features a realistically scaled solar system with accurate distances, sizes, and characteristics of the planets and moons. However, the game also simplifies some aspects of spaceflight, such as aerodynamics, heat, radiation, and communication.
-
Q: How hard is Spaceflight Simulator?
-
A: Spaceflight Simulator can be as easy or as hard as you want it to be. The game offers a sandbox mode where you can build and fly any rocket you want without any restrictions or objectives. The game also offers a career mode where you have to complete missions and earn money to unlock more parts and planets. The game also has a difficulty slider that lets you adjust the realism and challenge of the game.
-
Q: How long is Spaceflight Simulator?
-
A: Spaceflight Simulator does not have a fixed length or end. The game is open-ended and lets you play as long as you want. You can create your own goals and challenges, or just explore the solar system at your own pace. The game also updates regularly with new features and content, so there is always something new to discover and try.
-
Q: How much does Spaceflight Simulator cost?
-
A: Spaceflight Simulator is free to download and play on Android devices and online platforms. However, the game also offers in-app purchases that let you unlock more parts, skins, and planets. The game also costs $9.99 USD to download and play on PC devices via Steam.
-
Q: How can I share my rockets with other players?
-
A: Spaceflight Simulator lets you share your rockets with other players in different ways. You can export your rocket as a file and send it to others via email, social media, or other platforms. You can also upload your rocket to Steam Workshop if you are playing on PC, or to Lagged if you are playing online. You can also browse and download other players' rockets from these platforms.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simsantonioii/MusicGen-Continuation/audiocraft/data/audio.py b/spaces/simsantonioii/MusicGen-Continuation/audiocraft/data/audio.py
deleted file mode 100644
index 2048df6f175d7303bcf5c7b931922fd297908ead..0000000000000000000000000000000000000000
--- a/spaces/simsantonioii/MusicGen-Continuation/audiocraft/data/audio.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Audio IO methods are defined in this module (info, read, write),
-We rely on av library for faster read when possible, otherwise on torchaudio.
-"""
-
-from dataclasses import dataclass
-from pathlib import Path
-import logging
-import typing as tp
-
-import numpy as np
-import soundfile
-import torch
-from torch.nn import functional as F
-import torchaudio as ta
-
-import av
-
-from .audio_utils import f32_pcm, i16_pcm, normalize_audio
-
-
-_av_initialized = False
-
-
-def _init_av():
- global _av_initialized
- if _av_initialized:
- return
- logger = logging.getLogger('libav.mp3')
- logger.setLevel(logging.ERROR)
- _av_initialized = True
-
-
-@dataclass(frozen=True)
-class AudioFileInfo:
- sample_rate: int
- duration: float
- channels: int
-
-
-def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- _init_av()
- with av.open(str(filepath)) as af:
- stream = af.streams.audio[0]
- sample_rate = stream.codec_context.sample_rate
- duration = float(stream.duration * stream.time_base)
- channels = stream.channels
- return AudioFileInfo(sample_rate, duration, channels)
-
-
-def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- info = soundfile.info(filepath)
- return AudioFileInfo(info.samplerate, info.duration, info.channels)
-
-
-def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- # torchaudio no longer returns useful duration informations for some formats like mp3s.
- filepath = Path(filepath)
- if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info
- # ffmpeg has some weird issue with flac.
- return _soundfile_info(filepath)
- else:
- return _av_info(filepath)
-
-
-def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]:
- """FFMPEG-based audio file reading using PyAV bindings.
- Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
-
- Args:
- filepath (str or Path): Path to audio file to read.
- seek_time (float): Time at which to start reading in the file.
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
- Returns:
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate
- """
- _init_av()
- with av.open(str(filepath)) as af:
- stream = af.streams.audio[0]
- sr = stream.codec_context.sample_rate
- num_frames = int(sr * duration) if duration >= 0 else -1
- frame_offset = int(sr * seek_time)
- # we need a small negative offset otherwise we get some edge artifact
- # from the mp3 decoder.
- af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream)
- frames = []
- length = 0
- for frame in af.decode(streams=stream.index):
- current_offset = int(frame.rate * frame.pts * frame.time_base)
- strip = max(0, frame_offset - current_offset)
- buf = torch.from_numpy(frame.to_ndarray())
- if buf.shape[0] != stream.channels:
- buf = buf.view(-1, stream.channels).t()
- buf = buf[:, strip:]
- frames.append(buf)
- length += buf.shape[1]
- if num_frames > 0 and length >= num_frames:
- break
- assert frames
- # If the above assert fails, it is likely because we seeked past the end of file point,
- # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp.
- # This will need proper debugging, in due time.
- wav = torch.cat(frames, dim=1)
- assert wav.shape[0] == stream.channels
- if num_frames > 0:
- wav = wav[:, :num_frames]
- return f32_pcm(wav), sr
-
-
-def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,
- duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:
- """Read audio by picking the most appropriate backend tool based on the audio format.
-
- Args:
- filepath (str or Path): Path to audio file to read.
- seek_time (float): Time at which to start reading in the file.
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
- pad (bool): Pad output audio if not reaching expected duration.
- Returns:
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate.
- """
- fp = Path(filepath)
- if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg
- # There is some bug with ffmpeg and reading flac
- info = _soundfile_info(filepath)
- frames = -1 if duration <= 0 else int(duration * info.sample_rate)
- frame_offset = int(seek_time * info.sample_rate)
- wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)
- assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}"
- wav = torch.from_numpy(wav).t().contiguous()
- if len(wav.shape) == 1:
- wav = torch.unsqueeze(wav, 0)
- elif (
- fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats()
- and duration <= 0 and seek_time == 0
- ):
- # Torchaudio is faster if we load an entire file at once.
- wav, sr = ta.load(fp)
- else:
- wav, sr = _av_read(filepath, seek_time, duration)
- if pad and duration > 0:
- expected_frames = int(duration * sr)
- wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))
- return wav, sr
-
-
-def audio_write(stem_name: tp.Union[str, Path],
- wav: torch.Tensor, sample_rate: int,
- format: str = 'wav', mp3_rate: int = 320, normalize: bool = True,
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
- loudness_compressor: bool = False,
- log_clipping: bool = True, make_parent_dir: bool = True,
- add_suffix: bool = True) -> Path:
- """Convenience function for saving audio to disk. Returns the filename the audio was written to.
-
- Args:
- stem_name (str or Path): Filename without extension which will be added automatically.
- format (str): Either "wav" or "mp3".
- mp3_rate (int): kbps when using mp3s.
- normalize (bool): if `True` (default), normalizes according to the prescribed
- strategy (see after). If `False`, the strategy is only used in case clipping
- would happen.
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
- with extra headroom to avoid clipping. 'clip' just clips.
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
- than the `peak_clip` one to avoid further clipping.
- loudness_headroom_db (float): Target loudness for loudness normalization.
- loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.
- when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still
- occurs despite strategy (only for 'rms').
- make_parent_dir (bool): Make parent directory if it doesn't exist.
- Returns:
- Path: Path of the saved audio.
- """
- assert wav.dtype.is_floating_point, "wav is not floating point"
- if wav.dim() == 1:
- wav = wav[None]
- elif wav.dim() > 2:
- raise ValueError("Input wav should be at most 2 dimension.")
- assert wav.isfinite().all()
- wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,
- rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping,
- sample_rate=sample_rate, stem_name=str(stem_name))
- kwargs: dict = {}
- if format == 'mp3':
- suffix = '.mp3'
- kwargs.update({"compression": mp3_rate})
- elif format == 'wav':
- wav = i16_pcm(wav)
- suffix = '.wav'
- kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16})
- else:
- raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.")
- if not add_suffix:
- suffix = ''
- path = Path(str(stem_name) + suffix)
- if make_parent_dir:
- path.parent.mkdir(exist_ok=True, parents=True)
- try:
- ta.save(path, wav, sample_rate, **kwargs)
- except Exception:
- if path.exists():
- # we do not want to leave half written files around.
- path.unlink()
- raise
- return path
diff --git a/spaces/siya02/Konakni-TTS/ttsv/scripts/hifi/prepare_data.sh b/spaces/siya02/Konakni-TTS/ttsv/scripts/hifi/prepare_data.sh
deleted file mode 100644
index d620cfeb93d8de9b2f750ad9bd52a937b0b88c33..0000000000000000000000000000000000000000
--- a/spaces/siya02/Konakni-TTS/ttsv/scripts/hifi/prepare_data.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-input_wav_path='/home/harveen/en/iitm_data/english/wav_22k' #give multiple folders separated by comma(,)
-gender='male'
-
-output_data_path='../../data/hifi/'$gender
-
-valid_samples=100
-test_samples=10
-
-mkdir -p $output_data_path
-python ../../utils/hifi/prepare_iitm_data_hifi.py -i $input_wav_path -v $valid_samples -t $test_samples -d $output_data_path
diff --git a/spaces/skf15963/summary/fengshen/data/dreambooth_datasets/dreambooth_datasets.py b/spaces/skf15963/summary/fengshen/data/dreambooth_datasets/dreambooth_datasets.py
deleted file mode 100644
index 6f94216f3dadbd5423dfdb53fe1b2ff9382fb4d5..0000000000000000000000000000000000000000
--- a/spaces/skf15963/summary/fengshen/data/dreambooth_datasets/dreambooth_datasets.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# -*- encoding: utf-8 -*-
-'''
-Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@File : dreambooth_datasets.py
-@Time : 2022/11/10 00:20
-@Author : Gan Ruyi
-@Version : 1.0
-@Contact : ganruyi@idea.edu.cn
-@License : (C)Copyright 2022-2023, CCNL-IDEA
-'''
-from torch.utils.data import Dataset
-from torchvision import transforms
-from PIL import Image
-from pathlib import Path
-
-
-def add_data_args(parent_args):
- parser = parent_args.add_argument_group('taiyi stable diffusion data args')
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default=None,
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--resolution", type=int, default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop", action="store_true", default=False,
- help="Whether to center crop images before resizing to resolution"
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- return parent_args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_dir,
- instance_prompt,
- tokenizer,
- class_data_dir=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
-
- self.instance_data_dir = Path(instance_data_dir)
- if not self.instance_data_dir.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_dir).iterdir())
- print(self.instance_images_path)
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if class_data_dir is not None:
- self.class_data_dir = Path(class_data_dir)
- self.class_data_dir.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_dir.iterdir())
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_dir = None
-
- self.image_transforms = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
- example["instance_images"] = self.image_transforms(instance_image)
- example["instance_prompt_ids"] = self.tokenizer(
- self.instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=64,
- # max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_dir:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- example["class_images"] = self.image_transforms(class_image)
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- # max_length=self.tokenizer.model_max_length,
- max_length=64,
- ).input_ids
-
- return example
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
diff --git a/spaces/skf15963/summary/fengshen/data/megatron_dataloader/blendable_dataset.py b/spaces/skf15963/summary/fengshen/data/megatron_dataloader/blendable_dataset.py
deleted file mode 100644
index ee24d4056b86333a13d4926e79283a0bc96bbea3..0000000000000000000000000000000000000000
--- a/spaces/skf15963/summary/fengshen/data/megatron_dataloader/blendable_dataset.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# coding=utf-8
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Blendable dataset."""
-
-import time
-
-import numpy as np
-import torch
-
-from fengshen.data.megatron_dataloader.utils import print_rank_0
-
-
-class BlendableDataset(torch.utils.data.Dataset):
-
- def __init__(self, datasets, weights):
-
- self.datasets = datasets
- num_datasets = len(datasets)
- assert num_datasets == len(weights)
-
- self.size = 0
- for dataset in self.datasets:
- self.size += len(dataset)
-
- # Normalize weights.
- weights = np.array(weights, dtype=np.float64)
- sum_weights = np.sum(weights)
- assert sum_weights > 0.0
- weights /= sum_weights
-
- # Build indecies.
- start_time = time.time()
- assert num_datasets < 255
- self.dataset_index = np.zeros(self.size, dtype=np.uint8)
- self.dataset_sample_index = np.zeros(self.size, dtype=np.int64)
-
- from fengshen.data.megatron_dataloader import helpers
- helpers.build_blending_indices(self.dataset_index,
- self.dataset_sample_index,
- weights, num_datasets, self.size,
- torch.distributed.get_rank() == 0)
- print_rank_0('> elapsed time for building blendable dataset indices: '
- '{:.2f} (sec)'.format(time.time() - start_time))
-
- def __len__(self):
- return self.size
-
- def __getitem__(self, idx):
- dataset_idx = self.dataset_index[idx]
- sample_idx = self.dataset_sample_index[idx]
- return self.datasets[dataset_idx][sample_idx]
diff --git a/spaces/skf15963/summary/fengshen/examples/longformer/README.md b/spaces/skf15963/summary/fengshen/examples/longformer/README.md
deleted file mode 100644
index ef4706898b87d2f10eff5df2db24ae3a182ce673..0000000000000000000000000000000000000000
--- a/spaces/skf15963/summary/fengshen/examples/longformer/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# longformer model (Chinese),one model of [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM).
-We modify the original position code of longformer to rotational position coding,and on the basis of [chinese_roformer_L-12_H-768_A-12.zip](https://github.com/ZhuiyiTechnology/roformer), use 180G of data to continue training
-
-## Usage
-There is no structure of Longformer-base in [Transformers](https://github.com/huggingface/transformers), you can run follow code to get structure of longformer from [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM)
-
- ```shell
- git clone https://github.com/IDEA-CCNL/Fengshenbang-LM.git
- ```
-
-### Load Model
-```python
-from fengshen import LongformerModel
-from fengshen import LongformerConfig
-from transformers import BertTokenizer
-
-tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Erlangshen-Longformer-110M")
-config = LongformerConfig.from_pretrained("IDEA-CCNL/Erlangshen-Longformer-110M")
-model = LongformerModel.from_pretrained("IDEA-CCNL/Erlangshen-Longformer-110M")
-```
-
-
-
-## Citation
-If you find the resource is useful, please cite the following website in your paper.
-
-```
-@misc{Fengshenbang-LM,
- title={Fengshenbang-LM},
- author={IDEA-CCNL},
- year={2021},
- howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}},
-}
-```
diff --git a/spaces/skf15963/summary/fengshen/examples/zen2_finetune/ner_zen2_large_cluener.sh b/spaces/skf15963/summary/fengshen/examples/zen2_finetune/ner_zen2_large_cluener.sh
deleted file mode 100644
index 07193e3f15ca69755853623a57fee0a573db6593..0000000000000000000000000000000000000000
--- a/spaces/skf15963/summary/fengshen/examples/zen2_finetune/ner_zen2_large_cluener.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=zen2_large_cluener # create a short name for your job
-#SBATCH --nodes=1 # node count
-#SBATCH --ntasks=1 # total number of tasks across all nodes
-#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks)
-#SBATCH --gres=gpu:1 # number of gpus per node
-#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc.
-#SBATCH -o /cognitive_comp/ganruyi/experiments/ner_finetune/zen2_large_cluener/%x-%j.log # output and error file name (%x=job name, %j=job id)
-
-
-# export CUDA_VISIBLE_DEVICES='2'
-export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions
-
-MODEL_NAME=zen2_large
-
-TASK=cluener
-
-ZERO_STAGE=1
-STRATEGY=deepspeed_stage_${ZERO_STAGE}
-
-ROOT_DIR=/cognitive_comp/ganruyi/experiments/ner_finetune/${MODEL_NAME}_${TASK}
-if [ ! -d ${ROOT_DIR} ];then
- mkdir -p ${ROOT_DIR}
- echo ${ROOT_DIR} created!!!!!!!!!!!!!!
-else
- echo ${ROOT_DIR} exist!!!!!!!!!!!!!!!
-fi
-
-DATA_DIR=/cognitive_comp/lujunyu/data_zh/NER_Aligned/CLUENER/
-PRETRAINED_MODEL_PATH=/cognitive_comp/ganruyi/hf_models/zen/zh_zen_large_2.0
-
-CHECKPOINT_PATH=${ROOT_DIR}/ckpt/
-OUTPUT_PATH=${ROOT_DIR}/predict.json
-
-DATA_ARGS="\
- --data_dir $DATA_DIR \
- --train_data train.char.txt \
- --valid_data dev.char.txt \
- --test_data dev.char.txt \
- --train_batchsize 16 \
- --valid_batchsize 16 \
- --max_seq_length 256 \
- --task_name cluener \
- "
-
-MODEL_ARGS="\
- --learning_rate 3e-5 \
- --weight_decay 0.1 \
- --warmup_ratio 0.01 \
- --markup bio \
- --middle_prefix I- \
- "
-
-MODEL_CHECKPOINT_ARGS="\
- --monitor val_f1 \
- --save_top_k 3 \
- --mode max \
- --every_n_train_steps 100 \
- --save_weights_only True \
- --dirpath $CHECKPOINT_PATH \
- --filename model-{epoch:02d}-{val_f1:.4f} \
- "
-
-TRAINER_ARGS="\
- --max_epochs 30 \
- --gpus 1 \
- --check_val_every_n_epoch 1 \
- --val_check_interval 200 \
- --default_root_dir $ROOT_DIR \
- "
-
-
-options=" \
- --pretrained_model_path $PRETRAINED_MODEL_PATH \
- --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \
- --do_lower_case \
- --output_save_path $OUTPUT_PATH \
- $DATA_ARGS \
- $MODEL_ARGS \
- $MODEL_CHECKPOINT_ARGS \
- $TRAINER_ARGS \
-"
-SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/zen2_finetune/fengshen_token_level_ft_task.py
-/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options
-
-# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif
-# python3 $SCRIPT_PATH $options
-# source activate base
-# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options
-# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options
-
diff --git a/spaces/sklearn-docs/Lasso_and_elasticnet_for_sparse_signals/README.md b/spaces/sklearn-docs/Lasso_and_elasticnet_for_sparse_signals/README.md
deleted file mode 100644
index c64d2e79789e7b1628b99ee0360a3159317eab72..0000000000000000000000000000000000000000
--- a/spaces/sklearn-docs/Lasso_and_elasticnet_for_sparse_signals/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Lasso And Elasticnet For Sparse Signals
-emoji: 👁
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: creativeml-openrail-m
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/songdaooi/ketsueki/swapper.py b/spaces/songdaooi/ketsueki/swapper.py
deleted file mode 100644
index f7f359961e465004fed3311b8dee0bf51c56b649..0000000000000000000000000000000000000000
--- a/spaces/songdaooi/ketsueki/swapper.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import cv2
-import numpy as np
-from insightface.utils import face_align
-from face_parsing.swap import swap_regions
-from utils import add_logo_to_image
-
-swap_options_list = [
- "All face",
- "Age less than",
- "Age greater than",
- "All Male",
- "All Female",
- "Specific Face",
-]
-
-
-def swap_face(whole_img, target_face, source_face, models):
- inswapper = models.get("swap")
- face_enhancer = models.get("enhance", None)
- face_parser = models.get("face_parser", None)
- fe_enable = models.get("enhance_sett", False)
-
- bgr_fake, M = inswapper.get(whole_img, target_face, source_face, paste_back=False)
- image_size = 128 if not fe_enable else 512
- aimg, _ = face_align.norm_crop2(whole_img, target_face.kps, image_size=image_size)
-
- if face_parser is not None:
- fp_enable, includes, smooth_mask, blur_amount = models.get("face_parser_sett")
- if fp_enable:
- bgr_fake = swap_regions(
- bgr_fake, aimg, face_parser, smooth_mask, includes=includes, blur=blur_amount
- )
-
- if fe_enable:
- _, bgr_fake, _ = face_enhancer.enhance(
- bgr_fake, paste_back=True, has_aligned=True
- )
- bgr_fake = bgr_fake[0]
- M /= 0.25
-
- IM = cv2.invertAffineTransform(M)
-
- img_white = np.full((aimg.shape[0], aimg.shape[1]), 255, dtype=np.float32)
- bgr_fake = cv2.warpAffine(
- bgr_fake, IM, (whole_img.shape[1], whole_img.shape[0]), borderValue=0.0
- )
- img_white = cv2.warpAffine(
- img_white, IM, (whole_img.shape[1], whole_img.shape[0]), borderValue=0.0
- )
- img_white[img_white > 20] = 255
- img_mask = img_white
- mask_h_inds, mask_w_inds = np.where(img_mask == 255)
- mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
- mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
- mask_size = int(np.sqrt(mask_h * mask_w))
-
- k = max(mask_size // 10, 10)
- img_mask = cv2.erode(img_mask, np.ones((k, k), np.uint8), iterations=1)
-
- k = max(mask_size // 20, 5)
- kernel_size = (k, k)
- blur_size = tuple(2 * i + 1 for i in kernel_size)
- img_mask = cv2.GaussianBlur(img_mask, blur_size, 0) / 255
-
- img_mask = np.reshape(img_mask, [img_mask.shape[0], img_mask.shape[1], 1])
- fake_merged = img_mask * bgr_fake + (1 - img_mask) * whole_img.astype(np.float32)
- fake_merged = add_logo_to_image(fake_merged.astype("uint8"))
- return fake_merged
-
-
-def swap_face_with_condition(
- whole_img, target_faces, source_face, condition, age, models
-):
- swapped = whole_img.copy()
-
- for target_face in target_faces:
- if condition == "All face":
- swapped = swap_face(swapped, target_face, source_face, models)
- elif condition == "Age less than" and target_face["age"] < age:
- swapped = swap_face(swapped, target_face, source_face, models)
- elif condition == "Age greater than" and target_face["age"] > age:
- swapped = swap_face(swapped, target_face, source_face, models)
- elif condition == "All Male" and target_face["gender"] == 1:
- swapped = swap_face(swapped, target_face, source_face, models)
- elif condition == "All Female" and target_face["gender"] == 0:
- swapped = swap_face(swapped, target_face, source_face, models)
-
- return swapped
-
-
-def swap_specific(source_specifics, target_faces, whole_img, models, threshold=0.6):
- swapped = whole_img.copy()
-
- for source_face, specific_face in source_specifics:
- specific_embed = specific_face["embedding"]
- specific_embed /= np.linalg.norm(specific_embed)
-
- for target_face in target_faces:
- target_embed = target_face["embedding"]
- target_embed /= np.linalg.norm(target_embed)
- cosine_distance = 1 - np.dot(specific_embed, target_embed)
- if cosine_distance > threshold:
- continue
- swapped = swap_face(swapped, target_face, source_face, models)
-
- return swapped
diff --git a/spaces/spark-nlp/SparkNLP_NER/app.py b/spaces/spark-nlp/SparkNLP_NER/app.py
deleted file mode 100644
index 2834ea77ffd5d2baed8d56e17fc69722b989c260..0000000000000000000000000000000000000000
--- a/spaces/spark-nlp/SparkNLP_NER/app.py
+++ /dev/null
@@ -1,313 +0,0 @@
-import streamlit as st
-
-
-st.set_page_config(
- layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
- initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
- page_title='Extractive Summarization', # String or None. Strings get appended with "• Streamlit".
- page_icon='./favicon.png', # String, anything supported by st.image, or None.
-)
-
-
-import pandas as pd
-import numpy as np
-import json
-import os
-import sys
-sys.path.append(os.path.abspath('./'))
-import streamlit_apps_config as config
-from streamlit_ner_output import show_html2, jsl_display_annotations, get_color
-
-import sparknlp
-from sparknlp.base import *
-from sparknlp.annotator import *
-from pyspark.sql import functions as F
-from sparknlp_display import NerVisualizer
-from pyspark.ml import Pipeline
-from pyspark.sql.types import StringType
-
-spark= sparknlp.start()
-
-## Marking down NER Style
-st.markdown(config.STYLE_CONFIG, unsafe_allow_html=True)
-
-root_path = config.project_path
-
-########## To Remove the Main Menu Hamburger ########
-
-hide_menu_style = """
-
- """
-st.markdown(hide_menu_style, unsafe_allow_html=True)
-
-########## Side Bar ########
-
-## loading logo(newer version with href)
-import base64
-@st.cache(allow_output_mutation=True)
-def get_base64_of_bin_file(bin_file):
- with open(bin_file, 'rb') as f:
- data = f.read()
- return base64.b64encode(data).decode()
-
-@st.cache(allow_output_mutation=True)
-def get_img_with_href(local_img_path, target_url):
- img_format = os.path.splitext(local_img_path)[-1].replace('.', '')
- bin_str = get_base64_of_bin_file(local_img_path)
- html_code = f'''
-
-
- '''
- return html_code
-
-logo_html = get_img_with_href('./jsl-logo.png', 'https://www.johnsnowlabs.com/')
-st.sidebar.markdown(logo_html, unsafe_allow_html=True)
-
-
-
-#sidebar info
-model_name= ["nerdl_fewnerd_100d", "ner_conll_elmo", "ner_mit_movie_complex_distilbert_base_cased", "ner_conll_albert_large_uncased", "onto_100"]
-st.sidebar.title("Pretrained model to test")
-selected_model = st.sidebar.selectbox("", model_name)
-
-######## Main Page #########
-
-if selected_model == "nerdl_fewnerd_100d":
- app_title= "Detect up to 8 entity types in general domain texts"
- app_description= "Named Entity Recognition model aimed to detect up to 8 entity types from general domain texts. This model was trained on the Few-NERD/inter public dataset using Spark NLP, and it is available in Spark NLP Models hub. "
- st.title(app_title)
- st.markdown("
"+app_description+"
" , unsafe_allow_html=True)
- st.markdown("**`PERSON`** **,** **`ORGANIZATION`** **,** **`LOCATION`** **,** **`ART`** **,** **`BUILDING`** **,** **`PRODUCT`** **,** **`EVENT`** **,** **`OTHER`**", unsafe_allow_html=True)
-
-elif selected_model== "ner_conll_elmo":
- app_title= "Detect up to 4 entity types in general domain texts"
- app_description= "Named Entity Recognition model aimed to detect up to 4 entity types from general domain texts. This model was trained on the CoNLL 2003 text corpus using Spark NLP, and it is available in Spark NLP Models hub. "
- st.title(app_title)
- st.markdown("
"+app_description+"
" , unsafe_allow_html=True)
- st.markdown("**`PER`** **,** **`LOC`** **,** **`ORG`** **,** **`MISC` **", unsafe_allow_html=True)
-
-elif selected_model== "ner_mit_movie_complex_distilbert_base_cased":
- app_title= "Detect up to 12 entity types in movie domain texts"
- app_description= "Named Entity Recognition model aimed to detect up to 12 entity types from movie domain texts. This model was trained on the MIT Movie Corpus complex queries dataset to detect movie trivia using Spark NLP, and it is available in Spark NLP Models hub. "
- st.title(app_title)
- st.markdown("
"+app_description+"
" , unsafe_allow_html=True)
- st.markdown("""**`ACTOR`** **,** **`AWARD`** **,** **`CHARACTER_NAME`** **,** **`DIRECTOR`** **,** **`GENRE`** **,** **`OPINION`** **,** **`ORIGIN`** **,** **`PLOT`**,
- **`QUOTE`** **,** **`RELATIONSHIP`** **,** **`SOUNDTRACK`** **,** **`YEAR` **""", unsafe_allow_html=True)
-
-
-elif selected_model=="ner_conll_albert_large_uncased":
- app_title= "Detect up to 4 entity types in general domain texts"
- app_description= "Named Entity Recognition model aimed to detect up to 4 entity types from general domain texts. This model was trained on the CoNLL 2003 text corpus using Spark NLP, and it is available in Spark NLP Models hub. "
- st.title(app_title)
- st.markdown("
"+app_description+"
" , unsafe_allow_html=True)
- st.markdown("**`PER`** **,** **`LOC`** **,** **`ORG`** **,** **`MISC` **", unsafe_allow_html=True)
-
-elif selected_model=="onto_100":
- app_title= "Detect up to 18 entity types in general domain texts"
- app_description= "Named Entity Recognition model aimed to detect up to 18 entity types from general domain texts. This model was trained with GloVe 100d word embeddings using Spark NLP, so be sure to use same embeddings in the pipeline. It is available in Spark NLP Models hub. "
- st.title(app_title)
- st.markdown("
"+app_description+"
" , unsafe_allow_html=True)
- st.markdown("""**`CARDINAL`** **,** **`EVENT`** **,** **`WORK_OF_ART`** **,** **`ORG`** **,** **`DATE`** **,** **`GPE`** **,** **`PERSON`** **,** **`PRODUCT`**,
- **`NORP`** **,** **`ORDINAL`** **,** **`MONEY`** **,** **`LOC` **, **`FAC`** **,** **`LAW`** **,** **`TIME`** **,** **`PERCENT`** **,** **`QUANTITY`** **,** **`LANGUAGE` **""", unsafe_allow_html=True)
-
-
-st.subheader("")
-
-
-
-
-
-#caching the models in the dictionary
-@st.cache(allow_output_mutation=True, show_spinner=False)
-def load_sparknlp_models():
- ner_models_list= ["nerdl_fewnerd_100d", "ner_conll_elmo", "ner_mit_movie_complex_distilbert_base_cased",
- "ner_conll_albert_large_uncased", "onto_100"]
- embeddings_list= ["glove_100d", "elmo", "distilbert_base_cased", "albert_large_uncased", "glove_100d_for_onto"]
-
-
- documentAssembler = DocumentAssembler()\
- .setInputCol("text")\
- .setOutputCol("document")
-
- sentenceDetector= SentenceDetector()\
- .setInputCols(["document"])\
- .setOutputCol("sentence")
-
- tokenizer = Tokenizer()\
- .setInputCols(["sentence"])\
- .setOutputCol("token")
-
- ner_converter= NerConverter()\
- .setInputCols(["document", "token", "ner"])\
- .setOutputCol("ner_chunk")
-
- model_dict= {
- 'documentAssembler': documentAssembler,
- 'sentenceDetector': sentenceDetector,
- 'tokenizer': tokenizer,
- 'ner_converter': ner_converter
- }
-
- for embeddings_name, ner_model_name in zip(embeddings_list, ner_models_list):
-
- try:
- if embeddings_name=="glove_100d":
- model_dict[embeddings_name]= WordEmbeddingsModel.pretrained(embeddings_name, "en")\
- .setInputCols(["sentence", "token"])\
- .setOutputCol("embeddings")
-
- elif embeddings_name=="elmo":
- model_dict[embeddings_name]= ElmoEmbeddings.pretrained(embeddings_name, "en")\
- .setInputCols(["token", "document"])\
- .setOutputCol("embeddings")\
- .setPoolingLayer("elmo")
-
- elif embeddings_name=="distilbert_base_cased":
- model_dict[embeddings_name]= DistilBertEmbeddings\
- .pretrained(embeddings_name, 'en')\
- .setInputCols(["token", "document"])\
- .setOutputCol("embeddings")
-
- elif embeddings_name=="albert_large_uncased":
- model_dict[embeddings_name]= AlbertEmbeddings\
- .pretrained(embeddings_name, 'en')\
- .setInputCols(["document", "token"])\
- .setOutputCol("embeddings")
-
- elif embeddings_name=="glove_100d_for_onto":
- model_dict[embeddings_name]= WordEmbeddingsModel.pretrained("glove_100d", "en")\
- .setInputCols(["sentence", "token"])\
- .setOutputCol("embeddings")
-
-
- model_dict[ner_model_name]= NerDLModel.pretrained(ner_model_name, "en")\
- .setInputCols(["document", "token", "embeddings"])\
- .setOutputCol("ner")
-
-
- except:
- pass
- return model_dict
-
-
-
-placeholder_= st.empty()
-placeholder_.info("If you are launching the app for the first time, it may take some time (approximately 1 minute) for SparkNLP models to load...")
-nlp_dict= load_sparknlp_models()
-placeholder_.empty()
-
-
-
-
-if selected_model=="ner_conll_albert_large_uncased":
- text= st.text_input("Type here your text and press enter to run:", value="Mark Knopfler was born in Glasgow, Scotland. He is a British singer-songwriter, guitarist, and record producer. He became known as the lead guitarist, singer and songwriter of the rock band Dire Straits.")
-
-elif selected_model=="ner_mit_movie_complex_distilbert_base_cased":
- text= st.text_input("Type here your text and press enter to run:", value="It's only appropriate that Solaris, Russian filmmaker Andrei Tarkovsky's psychological sci-fi classic from 1972, contains an equally original and mind-bending score. Solaris explores the inadequacies of time and memory on an enigmatic planet below a derelict space station. To reinforce the film's chilling setting, Tarkovsky commissioned composer Eduard Artemiev to construct an electronic soundscape reflecting planet Solaris' amorphous and mysterious surface")
-
-elif selected_model=="ner_conll_elmo":
- text= st.text_input("Type here your text and press enter to run: ", value="Tottenham Hotspur Football Club, commonly referred to as Tottenham or Spurs, is an English professional football club based in Tottenham, London, that competes in the Premier League, the top flight of English football.")
-
-elif selected_model=="onto_100":
- text= st.text_input("Type here your text and press enter to run: ", value="William Henry Gates III (born October 28, 1955) is an American business magnate, software developer, investor, and philanthropist. He is best known as the co-founder of Microsoft Corporation. During his career at Microsoft, Gates held the positions of chairman, chief executive officer (CEO), president and chief software architect, while also being the largest individual shareholder until May 2014. He is one of the best-known entrepreneurs and pioneers of the microcomputer revolution of the 1970s and 1980s. Born and raised in Seattle, Washington, Gates co-founded Microsoft with childhood friend Paul Allen in 1975, in Albuquerque, New Mexico; it went on to become the world's largest personal computer software company. Gates led the company as chairman and CEO until stepping down as CEO in January 2000, but he remained chairman and became chief software architect.")
-
-else:
- text= st.text_input("Type here your text and press enter to run:", value="12 Corazones ('12 Hearts') is Spanish-language dating game show produced in the United States for the television network Telemundo since January 2005, based on its namesake Argentine TV show format. The show is filmed in Los Angeles and revolves around the twelve Zodiac signs that identify each contestant. In 2008, Ho filmed a cameo in the Steven Spielberg feature film The Cloverfield Paradox, as a news pundit.")
-
-
-
-def build_pipeline(text, model_name=selected_model):
-
- base_pipeline= Pipeline(stages=[
- nlp_dict["documentAssembler"],
- nlp_dict["sentenceDetector"],
- nlp_dict["tokenizer"]
- ])
-
- fewnerd_pipeline= Pipeline(stages=[
- base_pipeline,
- nlp_dict["glove_100d"],
- nlp_dict[model_name],
- nlp_dict["ner_converter"]
- ])
-
- elmo_pipeline= Pipeline(stages=[
- base_pipeline,
- nlp_dict["elmo"],
- nlp_dict[model_name],
- nlp_dict["ner_converter"]
- ])
-
- movie_pipeline= Pipeline(stages=[
- base_pipeline,
- nlp_dict["distilbert_base_cased"],
- nlp_dict[model_name],
- nlp_dict["ner_converter"]
- ])
-
- albert_pipeline= Pipeline(stages=[
- base_pipeline,
- nlp_dict["albert_large_uncased"],
- nlp_dict[model_name],
- nlp_dict["ner_converter"]
- ])
-
- onto_pipeline= Pipeline(stages=[
- base_pipeline,
- nlp_dict["glove_100d_for_onto"],
- nlp_dict[model_name],
- nlp_dict["ner_converter"]
- ])
-
-
- text_df = spark.createDataFrame([[text]]).toDF("text")
-
- if model_name=="nerdl_fewnerd_100d":
- pipeline_model= fewnerd_pipeline.fit(text_df)
-
- elif model_name=="ner_conll_elmo":
- pipeline_model= elmo_pipeline.fit(text_df)
-
- elif model_name=="ner_mit_movie_complex_distilbert_base_cased":
- pipeline_model= movie_pipeline.fit(text_df)
-
- elif model_name=="ner_conll_albert_large_uncased":
- pipeline_model= albert_pipeline.fit(text_df)
-
- elif model_name=="onto_100":
- pipeline_model= onto_pipeline.fit(text_df)
-
- result = pipeline_model.transform(text_df).toPandas()
-
- return result
-
-#placeholder for warning
-placeholder= st.empty()
-placeholder.info("Processing...")
-
-result= build_pipeline(text)
-placeholder.empty()
-
-df= pd.DataFrame({"ner_chunk": result["ner_chunk"].iloc[0]})
-
-labels_set = set()
-for i in df['ner_chunk'].values:
- labels_set.add(i[4]['entity'])
-labels_set = list(labels_set)
-
-labels = st.sidebar.multiselect(
- "NER Labels", options=labels_set, default=list(labels_set)
- )
-
-show_html2(text, df, labels, "Text annotated with identified Named Entities")
-
-try_link=""""""
-st.sidebar.title('')
-st.sidebar.markdown("
Try it yourself:
" , unsafe_allow_html=True)
-st.sidebar.markdown(try_link, unsafe_allow_html=True)
-
-st.sidebar.info("""Want to see more?
-- Check Spark NLP in action, including our Spark NLP for Healthcare & Spark OCR demos at [here](https://nlp.johnsnowlabs.com/demos)
-- Check our 4.4K+ models available in Spark NLP Models Hub [here](https://nlp.johnsnowlabs.com/models)""")
diff --git a/spaces/sriramelango/Social_Classification_Public/utils/BPE/__init__.py b/spaces/sriramelango/Social_Classification_Public/utils/BPE/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/srush/minichain/bash.html b/spaces/srush/minichain/bash.html
deleted file mode 100644
index 1e09036901ec62d446b919418a018dbeee54d781..0000000000000000000000000000000000000000
--- a/spaces/srush/minichain/bash.html
+++ /dev/null
@@ -1,15153 +0,0 @@
-
-
-
-
-
-bash
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
CLIPrompt().show(
- {"question":"list the files in the directory"},
- """```bash\nls\n```""")
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Out[3]:
-
-
-
-
-
-
-
-
-
CLIPrompt
-
-
-
Input:
-
-
{'question':'list the files in the directory'}
-
-
-
-
-
-
Full Prompt:
-
-
-
-
If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:
Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'"
I need to take the following actions: - List all files in the directory - Create a new directory - Copy the files from the first directory into the second directory ```bash ls mkdir myNewDirectory cp -r target/* myNewDirectory ```
f1d34948-3c10-43b1-993c-d3c55d3255b5
-
-b4113480-2253-4c08-be66-b4d54acd0911
- │ ├── input:
- │ │ └── question: "go up one directory and list the files in the directory"
- │ ├── prompt: If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:⏎
- │ │ ⏎
- │ │ Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'"⏎
- │ │ ⏎
- │ │ I need to take the following actions:⏎
- │ │ - List all files in the directory⏎
- │ │ - Create a new directory⏎
- │ │ - Copy the files from the first directory into the second directory⏎
- │ │ ```bash⏎
- │ │ ls⏎
- │ │ mkdir myNewDirectory⏎
- │ │ cp -r target/* myNewDirectory⏎
- │ │ ```⏎
- │ │ ⏎
- │ │ That is the format. Begin!⏎
- │ │ ⏎
- │ │ Question: "go up one directory and list the files in the directory"
- │ ├── result: ```bash⏎
- │ │ ls⏎
- │ │ ls⏎
- │ │ ls⏎
- │ │ ```
- │ │ └── returned:
- │ │ ├── 0: ls
- │ │ ├── 1: ls
- │ │ └── 2: ls
-
-bbfa93a2-07f9-4c2f-880e-8b4ea8cd5d66
- │ ├── input:
- │ │ ├── 0: ls
- │ │ ├── 1: ls
- │ │ └── 2: ls
- │ ├── prompt: ls;ls;ls
- │ ├── result: base.py⏎
- │ │ bash.html⏎
- │ │ bash.ipynb⏎
- │ │ bash.log⏎
- │ │ bash.pmpt.tpl⏎
- │ │ bash.pmpt.tpl~⏎
- │ │ bash.py⏎
- │ │ bash.py~⏎
- │ │ #parallel.py#⏎
- │ │ parallel.py⏎
- │ │ parallel.py~⏎
- │ │ selfask⏎
- │ │ selfask.html⏎
- │ │ selfask.ipynb⏎
- │ │ selfask.log⏎
- │ │ selfask.pmpt.tpl⏎
- │ │ selfask.py⏎
- │ │ story.py⏎
- │ │ story.py~⏎
- │ │ base.py⏎
- │ │ bash.html⏎
- │ │ bash.ipynb⏎
- │ │ bash.log⏎
- │ │ bash.pmpt.tpl⏎
- │ │ bash.pmpt.tpl~⏎
- │ │ bash.py⏎
- │ │ bash.py~⏎
- │ │ #parallel.py#⏎
- │ │ parallel.py⏎
- │ │ parallel.py~⏎
- │ │ selfask⏎
- │ │ selfask.html⏎
- │ │ selfask.ipynb⏎
- │ │ selfask.log⏎
- │ │ selfask.pmpt.tpl⏎
- │ │ selfask.py⏎
- │ │ story.py⏎
- │ │ story.py~⏎
- │ │ base.py⏎
- │ │ bash.html⏎
- │ │ bash.ipynb⏎
- │ │ bash.log⏎
- │ │ bash.pmpt.tpl⏎
- │ │ bash.pmpt.tpl~⏎
- │ │ bash.py⏎
- │ │ bash.py~⏎
- │ │ #parallel.py#⏎
- │ │ parallel.py⏎
- │ │ parallel.py~⏎
- │ │ selfask⏎
- │ │ selfask.html⏎
- │ │ selfask.ipynb⏎
- │ │ selfask.log⏎
- │ │ selfask.pmpt.tpl⏎
- │ │ selfask.py⏎
- │ │ story.py⏎
- │ │ story.py~⏎
- │ │
- │ │ └── returned: base.py⏎
- │ │ bash.html⏎
- │ │ bash.ipynb⏎
- │ │ bash.log⏎
- │ │ bash.pmpt.tpl⏎
- │ │ bash.pmpt.tpl~⏎
- │ │ bash.py⏎
- │ │ bash.py~⏎
- │ │ #parallel.py#⏎
- │ │ parallel.py⏎
- │ │ parallel.py~⏎
- │ │ selfask⏎
- │ │ selfask.html⏎
- │ │ selfask.ipynb⏎
- │ │ selfask.log⏎
- │ │ selfask.pmpt.tpl⏎
- │ │ selfask.py⏎
- │ │ story.py⏎
- │ │ story.py~⏎
- │ │ base.py⏎
- │ │ bash.html⏎
- │ │ bash.ipynb⏎
- │ │ bash.log⏎
- │ │ bash.pmpt.tpl⏎
- │ │ bash.pmpt.tpl~⏎
- │ │ bash.py⏎
- │ │ bash.py~⏎
- │ │ #parallel.py#⏎
- │ │ parallel.py⏎
- │ │ parallel.py~⏎
- │ │ selfask⏎
- │ │ selfask.html⏎
- │ │ selfask.ipynb⏎
- │ │ selfask.log⏎
- │ │ selfask.pmpt.tpl⏎
- │ │ selfask.py⏎
- │ │ story.py⏎
- │ │ story.py~⏎
- │ │ base.py⏎
- │ │ bash.html⏎
- │ │ bash.ipynb⏎
- │ │ bash.log⏎
- │ │ bash.pmpt.tpl⏎
- │ │ bash.pmpt.tpl~⏎
- │ │ bash.py⏎
- │ │ bash.py~⏎
- │ │ #parallel.py#⏎
- │ │ parallel.py⏎
- │ │ parallel.py~⏎
- │ │ selfask⏎
- │ │ selfask.html⏎
- │ │ selfask.ipynb⏎
- │ │ selfask.log⏎
- │ │ selfask.pmpt.tpl⏎
- │ │ selfask.py⏎
- │ │ story.py⏎
- │ │ story.py~⏎
- │ │
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Eydie Gorme And Los Panchos 24 Grandes Exitos Full Album Zip UPD.md b/spaces/stomexserde/gpt4-ui/Examples/Eydie Gorme And Los Panchos 24 Grandes Exitos Full Album Zip UPD.md
deleted file mode 100644
index adc9e58a1d9fc53dbe53952e01d80b8d053eaeec..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Eydie Gorme And Los Panchos 24 Grandes Exitos Full Album Zip UPD.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fix Generator Samsung Clp 365 V..md b/spaces/stomexserde/gpt4-ui/Examples/Fix Generator Samsung Clp 365 V..md
deleted file mode 100644
index 38224901e088280f16243435e2e8c71dde6e69d6..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Fix Generator Samsung Clp 365 V..md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
How to Fix Generator Samsung Clp 365 V: A Step-by-Step Guide
-
If you own a Samsung Clp 365 V printer, you may encounter some issues with the generator, which is the part that transfers the toner from the cartridge to the paper. The generator may malfunction due to various reasons, such as dust, wear and tear, or electrical problems. When this happens, you may notice poor print quality, streaks, or blank pages. Fortunately, you can fix the generator yourself by following these simple steps.
-
What You Need
-
Before you start, make sure you have the following tools and materials:
A new generator (you can buy one online from Samsung Parts or other authorized dealers)
-
-
Step 1: Turn Off and Unplug the Printer
-
The first thing you need to do is to turn off and unplug the printer from the power source. This will prevent any electric shocks or damage to the printer. Wait for at least 10 minutes before proceeding to the next step.
-
Step 2: Open the Front Cover and Remove the Toner Cartridges
-
Next, you need to open the front cover of the printer by pulling it down gently. You will see four toner cartridges inside: black, cyan, magenta, and yellow. To remove them, press the release lever on each cartridge and pull it out carefully. Place them on a clean surface and avoid touching the toner.
-
Step 3: Remove the Generator
-
Now you need to remove the generator from the printer. The generator is a long metal bar that runs across the width of the printer. It has four contacts on each end that connect to the toner cartridges. To remove it, use a Phillips screwdriver to unscrew the two screws on each side of the generator. Then use a flathead screwdriver to pry off the plastic clips that hold it in place. Be careful not to damage the contacts or the wires. Once you have removed the generator, set it aside.
-
Step 4: Install the New Generator
-
To install the new generator, follow the reverse steps of removing the old one. Align the contacts of the new generator with the slots on each side of the printer. Push it in firmly until it snaps into place. Then secure it with the screws and clips that you removed earlier. Make sure there are no loose wires or connections.
-
Step 5: Reinstall the Toner Cartridges and Close the Front Cover
-
The final step is to reinstall the toner cartridges and close the front cover. To do this, insert each cartridge into its corresponding slot and push it in until it clicks. Then close the front cover by lifting it up gently until it locks.
-
-
Step 6: Test Your Printer
-
You have successfully fixed your generator Samsung Clp 365 V! Now you can test your printer by plugging it in and turning it on. Print a test page or a document and check if the print quality has improved. If not, you may need to contact Samsung customer service or a professional technician for further assistance.
e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/roles/test_ui.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/roles/test_ui.py
deleted file mode 100644
index 285bff3231d852e65df0109999f1701dc7cff099..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/tests/metagpt/roles/test_ui.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Date : 2023/7/22 02:40
-# @Author : stellahong (stellahong@fuzhi.ai)
-#
-from metagpt.software_company import SoftwareCompany
-from metagpt.roles import ProductManager
-
-from tests.metagpt.roles.ui_role import UI
-
-
-def test_add_ui():
- ui = UI()
- assert ui.profile == "UI Design"
-
-
-async def test_ui_role(idea: str, investment: float = 3.0, n_round: int = 5):
- """Run a startup. Be a boss."""
- company = SoftwareCompany()
- company.hire([ProductManager(), UI()])
- company.invest(investment)
- company.start_project(idea)
- await company.run(n_round=n_round)
diff --git a/spaces/subhc/Guess-What-Moves/mask_former/modeling/heads/mask_former_head.py b/spaces/subhc/Guess-What-Moves/mask_former/modeling/heads/mask_former_head.py
deleted file mode 100644
index 095b0e625975429e35276713af6974bf9846a12c..0000000000000000000000000000000000000000
--- a/spaces/subhc/Guess-What-Moves/mask_former/modeling/heads/mask_former_head.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-from copy import deepcopy
-from typing import Callable, Dict, List, Optional, Tuple, Union
-
-import fvcore.nn.weight_init as weight_init
-from torch import nn
-from torch.nn import functional as F
-
-from detectron2.config import configurable
-from detectron2.layers import Conv2d, ShapeSpec, get_norm
-from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
-
-from ..transformer.transformer_predictor import TransformerPredictor
-from .pixel_decoder import build_pixel_decoder
-
-
-@SEM_SEG_HEADS_REGISTRY.register()
-class MaskFormerHead(nn.Module):
-
- _version = 2
-
- def _load_from_state_dict(
- self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
- ):
- version = local_metadata.get("version", None)
- if version is None or version < 2:
- # Do not warn if train from scratch
- scratch = True
- logger = logging.getLogger(__name__)
- for k in list(state_dict.keys()):
- newk = k
- if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
- newk = k.replace(prefix, prefix + "pixel_decoder.")
- # logger.debug(f"{k} ==> {newk}")
- if newk != k:
- state_dict[newk] = state_dict[k]
- del state_dict[k]
- scratch = False
-
- if not scratch:
- logger.warning(
- f"Weight format of {self.__class__.__name__} have changed! "
- "Please upgrade your models. Applying automatic conversion now ..."
- )
-
- @configurable
- def __init__(
- self,
- input_shape: Dict[str, ShapeSpec],
- *,
- num_classes: int,
- pixel_decoder: nn.Module,
- loss_weight: float = 1.0,
- ignore_value: int = -1,
- # extra parameters
- transformer_predictor: nn.Module,
- transformer_in_feature: str,
- ):
- """
- NOTE: this interface is experimental.
- Args:
- input_shape: shapes (channels and stride) of the input features
- num_classes: number of classes to predict
- pixel_decoder: the pixel decoder module
- loss_weight: loss weight
- ignore_value: category id to be ignored during training.
- transformer_predictor: the transformer decoder that makes prediction
- transformer_in_feature: input feature name to the transformer_predictor
- """
- super().__init__()
- input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
- self.in_features = [k for k, v in input_shape]
- feature_strides = [v.stride for k, v in input_shape]
- feature_channels = [v.channels for k, v in input_shape]
-
- self.ignore_value = ignore_value
- self.common_stride = 4
- self.loss_weight = loss_weight
-
- self.pixel_decoder = pixel_decoder
- self.predictor = transformer_predictor
- self.transformer_in_feature = transformer_in_feature
-
- self.num_classes = num_classes
-
- @classmethod
- def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
- return {
- "input_shape": {
- k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
- },
- "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
- "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
- "pixel_decoder": build_pixel_decoder(cfg, input_shape),
- "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
- "transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
- "transformer_predictor": TransformerPredictor(
- cfg,
- cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
- if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder"
- else input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels,
- mask_classification=True,
- ),
- }
-
- def forward(self, features):
- return self.layers(features)
-
- def layers(self, features):
- mask_features, transformer_encoder_features = self.pixel_decoder.forward_features(features)
- if self.transformer_in_feature == "transformer_encoder":
- assert (
- transformer_encoder_features is not None
- ), "Please use the TransformerEncoderPixelDecoder."
- predictions = self.predictor(transformer_encoder_features, mask_features)
- else:
- predictions = self.predictor(features[self.transformer_in_feature], mask_features)
- # predictions['features'] = mask_features
- return predictions
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Pesme Za Karaoke Van Basco Domace 36.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Pesme Za Karaoke Van Basco Domace 36.md
deleted file mode 100644
index 09e454aea3760606f97dccd99d2a8c62e79a99f4..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Pesme Za Karaoke Van Basco Domace 36.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
Grab free prepaid and postpaid mobile internet data in Turkey. Thanks Pesme za karaoke van basco domace free download. http://www.asiacgt.org/profile/Pesme-Za-Karaoke-Van-Basco-Domace-36/profile get data. Press question mark to get here.
http://tfarok2.com/en/tune/32/download/iframe.1_2017.html https://mixcloud.com/poketmakan/pesme-pe-cha-karaoke-kosomen-vanbasco-domace/ pesmecha pesmecha karaoke domace vanbasco dervichekadem fotoekst in new york ie6 erfahrung.
-
http://www.zeitvierzeichen.de/de/?tag=Pesme%20Za%20Karaoke%20Van%20Basco%20Domace%2036/ https://spoj.quirkol.info/download_pesme-za-karaoke-van-basco-domace-36-gubu.jpg itunes installation assistant pesme za karaoke domace basco vancity kontact 007 pro mod? pesme za karaoke van basco domace rapidshare rar domace runtime errr.
-
https://www.bbc.com/news/world-39169543 pesme za karaoke domace pasar marinero https://trello.com/c/enCFdsxX/44-pesme-za-karaoke-van-basco-domace-360-download https://sommerzaubermarkt.de/index.php/component/k2/item/200. https://trello.com/c/fNbdhGmY/39-pesme-zas-shen-karaoke-wusu-domace-dervichekadem-nisen-iray.
-
Includes a brief biographical essay. In the many years that followed, the record label continue to be one of the best known labels in the eastern part of Europe, and in the media. https://coub.com/stories/2963929-pesme-za-karaoke-van-basco-domace-rapidshare.
-
-
S. Pesme Za Karaoke Van Basco Domace Rapidshare Sortir Hors De Son Corps Akhena Pdf 60.06.2020 (9K) By Ralf Charest PDF. https://coub.com/stories/2963929-pesme-za-karaoke-van-basco-domace-rapidshare.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/RapportinoDiLavoroGiornalieropdf.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/RapportinoDiLavoroGiornalieropdf.md
deleted file mode 100644
index b63fcba6190cb3a7ebd61ea3c3cbde0d3898ca94..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/RapportinoDiLavoroGiornalieropdf.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
KomodoHackeur 7b17bfd26b tidak jemat. Merdeka-Makaskib 7b17bfd26b https://trello.com/c/PUvsJ7Zv/21-rapportinodilavorogiornalieropdf mochimochi 2022215. Results 1 - 26 of 26.. http://ramndiplomacy.website/download/rapportinodilavorogiornalieropdf. it`s all right 5b17bfd26b https://hackerone.com/reports/195866 http://www.vulnerable-application.com/rapportinodilavorogiornalieropdf/abay44-4019/abay44-4019. Address 1 = http://eracl_komodo.sky.in/profile/rapportinodilavorogiornalieropdf/profile; Address2=
-
Jonathan Funches 7b17bfd26b https://trello.com/c/IH5Nqm7t/23-rapportinodilavorogiornalieropdf abinl 5 months ago. There is a template for RapportinoDiLavoroGiornalieropdf.. https://lh3.googleusercontent.com/wL9ygG-8_Q9WPhfKK0m8NNBAslhKbch4HXd2i87Q8Ae_3Gh-jhEZXez1qAQkVLVxhVXksJZM7W7I0rw9VFUJn8I3EkBQAq7-5G7LHc3XeLfB6LuW8hS9y1A3i58j_I=w1920-h1080-rw-no 0 0 0
-
RapportinoDiLavoroGiornalieropdf and its free version.. It needs to be managed, entered and annotated by the users.. Drawing shapes (circles, boxes,.... https://www.google.com/search?q=RapportinoDiLavoroGiornalieropdf free desktop annotation app&rlz=1C1GLE_enUS89US89_orvUD9PUS898&ved=2ahUKEwi6y5nLm9pZAhU1eHJEMbAZYQQ0EoAQhYAw&biw=1366&bih=644&dpr=2.4
-
- d5da3c52bf
-
-
-
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Corporate Chanakya Pdf Download !!INSTALL!!.md b/spaces/terfces0erbo/CollegeProjectV2/Corporate Chanakya Pdf Download !!INSTALL!!.md
deleted file mode 100644
index f6e95458139191d7221d38f2b7f1ae7644ec26b1..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Corporate Chanakya Pdf Download !!INSTALL!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
Download Chanakya Niti Book (Chanakya Niti) in PDF format. Chanakya was an Indian philosopher and polymath, a strategic advisor, author and statesman. His work Arthasastra refers to an entire body of texts known today as Arthashastra. The emperor Chandragupta Maurya, who himself wrote and was a great patron of the Arthashastra, called Chanakya "Neeti-Pradah". The original work, however, is known today as the "Chanakya Niti". In some extant versions, the title has been attributed to Chanakya, while others claim it was written by the statesman Varahamihira.
-
There were two rulers who took power after Chandragupta died without a known heir. the Haryanvi Prince Kharavela ended the Mauryan Empire, and had the greatest impact on the country of India and the Indian subcontinent. " The Arthashashastra is divided into many sections, as the title indicates, and is a guidebook, a treatise, for all the things that are necessary for a king, and a king should be the book of all. Kautilya was an author of the Arthashastra in the 5th century BCE, born in a Shakya family. The Arthashastra is a highly popular and influential text for both the early modern and contemporary Indian economies, influencing government policy, the military, and corporate management. Most of the practices highlighted by Kautilya have remained unchanged. Though the Arthashashtra has considerable influence, it is difficult to define Kautilya's influence in certain times and communities, as the text was written in Sanskrit and he also wrote a book on the policy of King Ashoka, the Edicts of Ashoka (Ashokan edicts).Kautilya's advice includes political and administrative prescriptions for a king. Chanakya Niti Pdf Download. Fourth 90 minutes for meeting with ministers. Fifth 90 minutes for correspondence. Sixth 90 minutes for lunch. Kautilya goes on to describe an exhausting schedule in which the king has roughly four and half hours to sleep and the rest of the time is almost entirely involved in running the kingdom. Sanskrit language. Oracle of the East Series, Volume 2. Manas Navghan, Hindavi Neeti. The Arthashastra is divided into many sections, as the title indicates, and is a guidebook, a treatise, for all the things that are necessary for a king, and a king should be the book of all. Chanakya was the author of Arthashashtra (4th century BC), a treatise on polity. " In the 4th century BC, Chanakya documented his ideas on leadership and strategy in the Arthashashtra. In its sections on Leadership, Management & Training, Corporate Chanakya applies Chanakya's Wisdom across a host of areas including business organisation, strategy, decision making, finance, time management and responsibilities of a leader. Gain from this guide and discover the leader in you...
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Culegere Matematica Ion Petrica Pdf Free.md b/spaces/terfces0erbo/CollegeProjectV2/Culegere Matematica Ion Petrica Pdf Free.md
deleted file mode 100644
index c54b6af0a6f1e154acc72902b5694f794a644609..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Culegere Matematica Ion Petrica Pdf Free.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Foxit PhantomPDF Business 9.7.1 Crack Patch [Latest]: A Complete PDF Solution for Business Users
-
-
PDF files are widely used in various fields and industries, such as education, business, law, medicine, and more. They are convenient, secure, and versatile, allowing users to create, view, edit, share, and sign documents with ease. However, not all PDF software are created equal. Some may lack the features, functions, or compatibility that users need. That's why many users look for a reliable and powerful PDF solution that can meet their demands and expectations.
-
-
One of the best PDF solutions in the market today is Foxit PhantomPDF Business 9.7.1. It is a comprehensive and advanced PDF software that offers a full range of features and tools for business users. It can help users create professional-looking PDF documents and forms, edit and modify PDF content, collaborate and review PDF files with others, secure and protect PDF data, convert PDF files to and from other formats, and more.
-
Foxit PhantomPDF Business 9.7.1 Crack Patch [Latest]
However, Foxit PhantomPDF Business 9.7.1 is not a free software. Users have to pay a license fee to use it legally and safely. But what if you want to enjoy the benefits of Foxit PhantomPDF Business 9.7.1 without paying anything? Is there a way to get Foxit PhantomPDF Business 9.7.1 for free?
-
-
The answer is yes. You can use Foxit PhantomPDF Business 9.7.1 crack patch [latest] to activate the full version of the software without paying any fees. A crack patch is a modified file that bypasses the license verification process of the software and allows you to use it as if you have purchased it.
-
-
In this article, we will show you how to download and install Foxit PhantomPDF Business 9.7.1 crack patch [latest] and what benefits and risks it can bring to your PDF experience.
-
-
How to download and install Foxit PhantomPDF Business 9.7.1 crack patch [latest]?
-
-
To download and install Foxit PhantomPDF Business 9.7.1 crack patch [latest], follow these steps:
-
-
-
Go to one of the websites that offer Foxit PhantomPDF Business 9.7.1 crack patch [latest], such as FileCR, The Pirate City, or SoundCloud.
-
Click on the download link and save the zip file to your computer.
-
Extract the zip file using a program like WinRAR or 7-Zip.
-
Run the setup.exe file and follow the installation wizard.
-
Copy the patch.exe file from the zip file and paste it into the installation folder of Foxit PhantomPDF Business 9.7.1. Usually, it is located at C:\Program Files\Foxit Software\Foxit PhantomPDF.
-
Run the patch.exe file as administrator and click on the patch button.
-
Launch Foxit PhantomPDF Business 9.7.1 and enjoy its features.
-
-
-
What are the benefits of using Foxit PhantomPDF Business 9.7.1 crack patch [latest]?
-
-
By using Foxit PhantomPDF Business 9.7.1 crack patch [latest], you can enjoy the following benefits:
-
-
-
You can save money by not paying for the license fee of Foxit PhantomPDF Business 9.7.1.
-
You can access all the features and functions of Foxit PhantomPDF Business 9.7.1 without any limitations or restrictions.
-
You can improve your PDF experience by using a comprehensive and advanced PDF software that supports various formats, devices, systems, and standards.
-
-
-
What are the risks of using Foxit PhantomPDF Business 9.7.1 crack patch [latest]?
-
-
However, using Foxit PhantomPDF Business 9.7.1 crack patch [latest] also comes with some risks, such as:
-
-
-
-
You may violate the copyright law and face legal consequences if you use cracked software without permission from the original developer.
-
You may expose your computer to viruses, malware, or spyware that may be hidden in the cracked software or the websites that offer it.
-
You may experience errors, bugs, or crashes that may affect the performance or functionality of Foxit PhantomPDF Business 9.7.1 or your PDF files.
-
You may not receive any updates, support, or warranty from Foxit Software if you use cracked software.
-
-
-
Therefore, we recommend that you use Foxit PhantomPDF Business 9.7.1 crack patch [latest] at your own risk and discretion. Alternatively, you can purchase the original version of Foxit PhantomPDF Business 9.7.1 from Foxit Software or its authorized dealers and enjoy its features legally and safely.
-
-
Conclusion
-
-
Foxit PhantomPDF Business 9.7.1 is a complete PDF solution that offers a full-featured, security, deployability, and other advanced support with qualified quality for business users. You can use Foxit PhantomPDF Business 9.7.1 crack patch [latest] to activate the full version of the software without paying any fees.
-
-
However, you should also be aware of the risks and consequences of using cracked software, such as legal issues, security threats, performance problems, and lack of updates and support.
-
-
We hope this article has helped you understand how to download and install Foxit PhantomPDF Business 9.7.1 crack patch [latest] and what benefits and risks it can bring to your PDF experience.
-
What are the features and functions of Foxit PhantomPDF Business 9.7.1?
-
-
Foxit PhantomPDF Business 9.7.1 is a comprehensive and advanced PDF software that offers a full range of features and tools for business users. Some of the main features and functions of Foxit PhantomPDF Business 9.7.1 are:
-
-
-
Create, edit, manage, and share PDF documents and forms with ease and efficiency.
-
Convert PDF files to and from other formats, such as Microsoft Word, Excel, PowerPoint, HTML, TXT, RTF, and more.
-
Collaborate and review PDF files with others using shared review, commenting, annotation, and markup tools.
-
Secure and protect PDF files with password, encryption, digital signature, redaction, watermark, and permission settings.
-
Optimize and compress PDF files to reduce file size and improve performance.
-
Create and validate PDF files that comply with various standards, such as PDF/A, PDF/E, PDF/X, ISO 32000-1:2008.
-
Add bates numbering to PDF files for easy identification and indexing.
-
Scan and OCR paper documents to create searchable and editable PDF files.
-
Edit text and images in scanned or image-based PDF files using OCR technology.
-
Insert objects, images, videos, links, bookmarks, headers, footers, page numbers, backgrounds, and more to enhance your PDF documents.
-
Create and fill out electronic forms using form design, recognition, and distribution tools.
-
Create and manage PDF portfolios that contain multiple file types.
-
Integrate with cloud services, such as Google Drive, Dropbox, Box, OneDrive, SharePoint, DocuSign, Evernote, etc.
-
-
-
How to use Foxit PhantomPDF Business 9.7.1 crack patch [latest] safely?
-
-
While using Foxit PhantomPDF Business 9.7.1 crack patch [latest] can save you money and give you access to all the features of the software, it also comes with some risks that you should be aware of. To use Foxit PhantomPDF Business 9.7.1 crack patch [latest] safely, you should follow these tips:
-
-
-
Download Foxit PhantomPDF Business 9.7.1 crack patch [latest] only from trusted sources that have positive reviews and feedback from other users.
-
Scan the downloaded file with a reliable antivirus or anti-malware program before opening or installing it.
-
Disable your internet connection or firewall before running the patch.exe file to avoid any interference or detection by the software or its developer.
-
Create a backup of your important PDF files before using Foxit PhantomPDF Business 9.7.1 crack patch [latest] in case of any errors or problems that may damage or corrupt your files.
-
Do not update Foxit PhantomPDF Business 9.7.1 after applying the crack patch [latest] as it may deactivate the software or cause some issues.
-
-
-
By following these tips, you can use Foxit PhantomPDF Business 9.7.1 crack patch [latest] safely and enjoy its features without any worries.
-
How to use Foxit PhantomPDF Business 9.7.1 for PDF creation and editing?
-
-
Foxit PhantomPDF Business 9.7.1 is a user-friendly and versatile PDF software that allows you to create and edit PDF files with ease and efficiency. Here are some steps on how to use Foxit PhantomPDF Business 9.7.1 for PDF creation and editing:
-
-
-
To create a PDF file from scratch, click on the File tab and select Create > Blank. You can also create a PDF file from other sources, such as Microsoft Office, Outlook, Visio, HTML, TXT, RTF, etc., by clicking on the File tab and selecting Create > From File.
-
To edit a PDF file, open it in Foxit PhantomPDF Business 9.7.1 and use the tools in the Edit tab to modify the text, images, objects, links, bookmarks, headers, footers, backgrounds, etc. You can also use the tools in the Organize tab to manage the pages of the PDF file, such as insert, delete, crop, rotate, split, merge, etc.
-
To save a PDF file, click on the File tab and select Save or Save As. You can also export a PDF file to other formats, such as Word, Excel, PowerPoint, HTML, TXT, RTF, etc., by clicking on the File tab and selecting Export > To.
-
-
-
By using Foxit PhantomPDF Business 9.7.1 for PDF creation and editing, you can produce professional-looking PDF documents and forms with ease and efficiency.
-
-
How to use Foxit PhantomPDF Business 9.7.1 for PDF collaboration and review?
-
-
Foxit PhantomPDF Business 9.7.1 is a collaborative and interactive PDF software that allows you to work with others on PDF files with ease and speed. Here are some steps on how to use Foxit PhantomPDF Business 9.7.1 for PDF collaboration and review:
-
-
-
To share a PDF file with others, click on the Share tab and select one of the options to send the PDF file via email or cloud services, such as Google Drive, Dropbox, Box, OneDrive, SharePoint, DocuSign, Evernote, etc.
-
To initiate a shared review of a PDF file with others, click on the Share tab and select Shared Review > Start Shared Review. You can choose to host the shared review on a network folder or a cloud service and invite reviewers by email or link.
-
To comment on a PDF file with others, click on the Comment tab and use the tools to add annotations, markups, stamps, drawings, shapes, etc., to the PDF file. You can also reply to or summarize comments from other reviewers.
-
To sign a PDF file with others, click on the Protect tab and select Sign & Certify > Place Signature or Place Certificate Signature. You can choose to sign the PDF file with an ink signature or a digital signature.
-
-
-
By using Foxit PhantomPDF Business 9.7.1 for PDF collaboration and review, you can work with others on PDF files with ease and speed.
-
Conclusion
-
-
Foxit PhantomPDF Business 9.7.1 is a complete PDF solution that offers a full-featured, security, deployability, and other advanced support with qualified quality for business users. You can use Foxit PhantomPDF Business 9.7.1 crack patch [latest] to activate the full version of the software without paying any fees.
-
-
However, you should also be aware of the risks and consequences of using cracked software, such as legal issues, security threats, performance problems, and lack of updates and support.
-
-
We hope this article has helped you understand how to download and install Foxit PhantomPDF Business 9.7.1 crack patch [latest] and how to use it for PDF creation, editing, collaboration, and review.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Install and Activate Microsoft Office for Mac Without Cracking It A Reddit Guide.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Install and Activate Microsoft Office for Mac Without Cracking It A Reddit Guide.md
deleted file mode 100644
index be30597ac26541c38af6c1d23e30d2f76f1047cc..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Install and Activate Microsoft Office for Mac Without Cracking It A Reddit Guide.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
How to Crack Microsoft Office for Mac: A Guide for Beginners
-
If you want to use Microsoft Office for Mac without paying for a subscription, you might be tempted to look for a cracked version online. However, this can be risky and illegal, as you might end up with malware or viruses on your computer. Moreover, you might not be able to get updates or support from Microsoft if you use a cracked version.
Fortunately, there is a way to install and activate Microsoft Office for Mac without cracking it. This method involves downloading the official software from Microsoft and using a volume license serializer to bypass the sign-in process. This way, you can enjoy the full features of Microsoft Office for Mac without breaking the law or compromising your security.
-
In this article, we will show you how to do this step by step. Before we begin, make sure you have a Mac computer with an internet connection and enough disk space to install Microsoft Office.
-
Step 1: Download Microsoft Office for Mac from MacAdmins
-
The first step is to download the official Microsoft Office for Mac software from a reputable source. We recommend using MacAdmins, which is a website that provides links to official, unaltered products from Microsoft and other vendors. You can choose which version of Office you want to download, such as 2019 or 2021.
-
-
To download Microsoft Office for Mac from MacAdmins, follow these steps:
Click on the link that corresponds to the version of Office you want to download. For example, if you want Office 2019, click on Office 2019 Installer.pkg.
-
A new tab will open with a download page. Click on the Download button and wait for the file to download.
-
Once the download is complete, locate the file in your Downloads folder and double-click on it to launch the installer.
-
Follow the on-screen instructions to install Microsoft Office for Mac on your computer. Do not open any of the Office apps yet.
-
-
Step 2: Download and Run a Volume License Serializer
-
The next step is to download and run a volume license serializer, which is a small file that allows you to activate Microsoft Office for Mac without signing in with a Microsoft account. A volume license serializer is usually used by organizations that have multiple computers that need to run Office.
-
You can find a volume license serializer for your version of Office on GitHub, which is a platform that hosts open-source projects. Here is how to do it:
Scroll down and find the file that matches your version of Office. For example, if you have Office 2019, look for Microsoft_Office_2019_VL_Serializer.pkg.
-
Click on the file name and then click on the Download button on the right side of the page.
-
Once the download is complete, locate the file in your Downloads folder and double-click on it to launch the serializer.
-
Follow the on-screen instructions to apply the volume license serializer to your Office installation. You might need to enter your administrator password.
-
-
Step 3: Enjoy Microsoft Office for Mac
-
The final step is to open any of the Office apps and enjoy using them without signing in. You should see a new button that says Skip Sign In, which means that the volume license serializer has worked. You can now use all the features of Microsoft Office for Mac without any limitations.
-
Note that this method does not allow you to update your Office apps automatically. If you want to get updates, you will need to repeat these steps with newer versions of the software and the
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Coolios Gangster Paradise - The Full Song Download You Need.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Coolios Gangster Paradise - The Full Song Download You Need.md
deleted file mode 100644
index cd2c844487ea613cfee672593a6cf4df4bd6d569..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Coolios Gangster Paradise - The Full Song Download You Need.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
How to Download Gangster Paradise Full Song by Coolio
-
Gangster Paradise is one of the most iconic rap songs of all time. It was released in 1995 by Coolio, featuring L.V., and was the theme song for the movie Dangerous Minds. The song tells the story of a young man who lives in a violent and hopeless urban environment, where he struggles to survive and find meaning in life. The song samples the chorus and music of Stevie Wonder's Pastime Paradise, and adds a dark and gritty twist to it.
If you are a fan of Gangster Paradise, or you want to discover this classic rap song, you might be wondering how to download it to your device. There are many ways to download Gangster Paradise full song, depending on your preferences and needs. In this article, we will show you how to download Gangster Paradise from three different sources: Internet Archive, YouTube, and Spotify. We will also explain the benefits of downloading Gangster Paradise, such as enjoying it offline, saving data, and supporting the artist.
-
Internet Archive
-
Internet Archive is a non-profit digital library that offers free access to millions of books, movies, music, software, and more. It also preserves historical and cultural artifacts for future generations. One of the treasures you can find on Internet Archive is the full album of Coolio's Gangsta's Paradise, which includes the title track and other songs by the rapper.
-
Here is how to download Gangster Paradise from Internet Archive:
-
-
Go to [1](https://archive.org/details/coolio-gangstas-paradise) on your browser.
-
Scroll down to find the track list and click on the song "Gangsta's Paradise (Ft. L.V.)".
-
A media player will appear on the right side of the screen. Click on the three dots icon below the player and select "Download".
-
A new tab will open with a list of download options. Choose the format you prefer, such as MP3 or OGG, and click on it.
-
The download will start automatically. You can then save the file to your device and enjoy listening to Gangster Paradise anytime.
-
-
YouTube
-
YouTube is the most popular video-sharing platform in the world. It allows users to upload, watch, comment, and share videos of various genres and topics. You can also find music videos on YouTube, including the official music video of Gangster Paradise by Coolio.
-
gangster paradise full song download mp3
-gangster paradise full song download free
-gangster paradise full song download 320kbps
-gangster paradise full song download pagalworld
-gangster paradise full song download mr jatt
-gangster paradise full song download coolio
-gangster paradise full song download audio
-gangster paradise full song download video
-gangster paradise full song download youtube
-gangster paradise full song download online
-gangster paradise full song download zip
-gangster paradise full song download ringtone
-gangster paradise full song download lyrics
-gangster paradise full song download instrumental
-gangster paradise full song download remix
-gangster paradise full song download hd
-gangster paradise full song download skull
-gangster paradise full song download wapking
-gangster paradise full song download djpunjab
-gangster paradise full song download djmaza
-gangster paradise full song download waploaded
-gangster paradise full song download fakaza
-gangster paradise full song download naijaloaded
-gangster paradise full song download tubidy
-gangster paradise full song download musicpleer
-gangster paradise full song download spotify
-gangster paradise full song download apple music
-gangster paradise full song download amazon music
-gangster paradise full song download soundcloud
-gangster paradise full song download gaana
-gangster paradise full song download wynk music
-gangster paradise full song download hungama music
-gangster paradise full song download jiosaavn
-gangster paradise full song download rhapsody
-gangster paradise full song download pandora
-gangster paradise full song download deezer
-gangster paradise full song download tidal
-gangster paradise full song download napster
-gangster paradise full song download iheartradio
-gangster paradise full song download shazam
-gangster paradise full song download musixmatch
-gangster paradise full song download genius lyrics
-gangster paradise full song download azlyrics
-gangster paradise full song download metrolyrics
-gangster paradise full song download lyricstranslate
-gangster paradise full song download karaoke version
-gangster paradise full song download backing track
-gangster paradise full song download sheet music
-gangster paradise full song download chords and tabs
-
Here is how to download Gangster Paradise from YouTube:
-
-
Go to [2](https://www.youtube.com/watch?v=fPO76Jlnz6c) on your browser.
-
Copy the URL of the video from the address bar.
-
Go to a YouTube video downloader website, such as [3](https://ytmp3.cc/en13/).
-
Paste the URL of the video into the input box and click on "Convert".
-
Wait for a few seconds until the conversion is done - The website will show you a preview of the video and a download button. Click on the download button and choose the quality and format you want, such as MP3 or MP4.
-
The download will start automatically. You can then save the file to your device and enjoy watching or listening to Gangster Paradise anytime.
-
-
Spotify
-
Spotify is one of the most popular music streaming services in the world. It allows users to access millions of songs, podcasts, and playlists from various artists and genres. You can also find Gangster Paradise by Coolio on Spotify, along with his other albums and songs.
-
Here is how to download Gangster Paradise from Spotify:
-
-
Go to [4](https://open.spotify.com/track/3u9fHuAtjMY1RW2mZfO4Cf) on your browser or open the Spotify app on your device.
-
If you are not a Spotify Premium subscriber, you will need to sign up for a free trial or a paid plan to download songs from Spotify. You can do this by clicking on the "Upgrade" button on the top right corner of the screen or following the instructions on the app.
-
Once you are a Spotify Premium subscriber, you can download Gangster Paradise by clicking on the heart icon next to the song title. This will add the song to your library.
-
Go to your library and find the song under "Liked Songs". Click on the download toggle switch next to the song title. The song will start downloading to your device.
-
You can then listen to Gangster Paradise offline anytime you want, as long as you have an active Spotify Premium subscription.
-
-
Conclusion
-
Gangster Paradise is a rap masterpiece that deserves to be in your music collection. It is a powerful and poignant song that reflects the harsh realities of life in the inner city. It is also a catchy and memorable tune that will make you nod your head and sing along.
-
There are many ways to download Gangster Paradise full song by Coolio, depending on your preferences and needs. You can use Internet Archive, YouTube, or Spotify to get the song for free or for a fee. Each option has its own advantages and disadvantages, so you can choose the one that suits you best.
-
What are you waiting for? Download Gangster Paradise today and enjoy the classic rap song that has stood the test of time. You won't regret it!
-
FAQs
-
What is the meaning of Gangster Paradise?
-
Gangster Paradise is a sarcastic term that refers to the miserable and dangerous living conditions of urban poor communities, where crime, violence, drugs, and death are rampant. The song uses irony and contrast to show how far from paradise these places are.
-
Who sings Gangster Paradise?
-
Gangster Paradise is sung by Coolio, an American rapper, actor, and producer. He is best known for his hit songs such as Fantastic Voyage, 1, 2, 3, 4 (Sumpin' New), and C U When U Get There. He has won several awards, including a Grammy, an MTV Video Music Award, and an American Music Award.
-
When was Gangster Paradise released?
-
Gangster Paradise was released in 1995 as the lead single from Coolio's second studio album, Gangsta's Paradise. It was also featured on the soundtrack of the movie Dangerous Minds, starring Michelle Pfeiffer. The song was a huge commercial success, reaching number one on several charts around the world.
-
Is Gangster Paradise based on a true story?
-
Gangster Paradise is not based on a specific true story, but it is inspired by Coolio's own experiences growing up in Compton, California. He has said that he wrote the song to express his feelings about the struggles and challenges he faced as a young black man in America. He also wanted to raise awareness and empathy for people who live in similar situations.
-
What are some other songs by Coolio?
-
Some other songs by Coolio are:
-
-
Fantastic Voyage: A funky and upbeat song that samples Lakeside's 1980 hit of the same name. It is about escaping from the troubles of life and having fun with friends.
-
1, 2, 3, 4 (Sumpin' New): A catchy and energetic song that samples Thelma Houston's Don't Leave Me This Way and The Gap Band's Outstanding. It is about enjoying life and dancing to the music.
-
C U When U Get There: A - A smooth and soulful song that samples Pachelbel's Canon in D and features 40 Thevz. It is about hoping to see a loved one again in heaven.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Bhatias Battery Of Performance Test Of Intelligence Pdf 673.md b/spaces/tioseFevbu/cartoon-converter/scripts/Bhatias Battery Of Performance Test Of Intelligence Pdf 673.md
deleted file mode 100644
index fd74206c81b51abfd3536bc0cd0087240d0c0bda..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Bhatias Battery Of Performance Test Of Intelligence Pdf 673.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
How to Use Bhatia's Battery of Performance Test of Intelligence Pdf 673
-
-
Bhatia's Battery of Performance Test of Intelligence (BBPTI) is a non-verbal test that measures the general intelligence of children and adolescents. It consists of five subtests: Koh's Block Design, Alexander's Pass-Along, Pattern Drawing, Picture Construction, and Immediate Memory. The test is suitable for language-incompatible or delayed subjects, as well as for literate and illiterate groups. It is easy to administer and score, and it has separate norms for boys and girls.
-
Bhatia's Battery Of Performance Test Of Intelligence Pdf 673
In this article, we will explain how to use Bhatia's Battery of Performance Test of Intelligence Pdf 673, which is a digital version of the test manual. You will learn how to prepare the test materials, administer the test, score the responses, and interpret the results.
-
-
Preparing the Test Materials
-
-
To use Bhatia's Battery of Performance Test of Intelligence Pdf 673, you will need the following materials:
-
-
-
A computer or tablet with a PDF reader software.
-
A printer and paper to print the test stimuli and scoring sheets.
-
A stopwatch or a timer to measure the time limit for each subtest.
-
A pencil and an eraser for the examiner and the examinee.
-
A quiet and well-lit room with a table and two chairs.
-
-
-
Before administering the test, you should print the test stimuli and scoring sheets from the PDF file. You should also familiarize yourself with the test instructions and scoring criteria for each subtest.
-
-
-
Administering the Test
-
-
To administer Bhatia's Battery of Performance Test of Intelligence Pdf 673, you should follow these steps:
-
-
-
Introduce yourself to the examinee and explain the purpose and nature of the test. Assure them that there are no right or wrong answers, and that they should try their best.
-
Start with Koh's Block Design subtest. Show the examinee a set of four wooden blocks with different colors on each face. Ask them to arrange the blocks to match a given pattern on a card. Give them one practice item and then 12 test items. Record their responses on the scoring sheet. The time limit for this subtest is 10 minutes.
-
Next, administer Alexander's Pass-Along subtest. Show the examinee a set of cards with different shapes on them. Ask them to pass along the cards one by one in a certain order according to a given rule. Give them one practice item and then 12 test items. Record their responses on the scoring sheet. The time limit for this subtest is 10 minutes.
-
Then, administer Pattern Drawing subtest. Show the examinee a set of cards with different patterns on them. Ask them to draw the patterns on a blank paper using a pencil. Give them one practice item and then 12 test items. Record their responses on the scoring sheet. The time limit for this subtest is 10 minutes.
-
After that, administer Picture Construction subtest. Show the examinee a set of cards with different parts of a picture on them. Ask them to arrange the cards to form a complete picture on a blank paper using a pencil. Give them one practice item and then 12 test items. Record their responses on the scoring sheet. The time limit for this subtest is 10 minutes.
-
Finally, administer Immediate Memory subtest. Show the examinee a set of cards with different sounds on them. Ask them to repeat the sounds after you in the same order as you say them. Give them one practice item and then 12 test items. Record their responses on the scoring sheet. The time limit for this subtest is 10 minutes.
-
-
-
At the end of each subtest, praise the examinee for their effort and encourage them to continue with the next subtest.
-
-
Scoring the Responses
-
-
To score Bhatia's Battery of Performance Test of Intelligence Pdf 673, you should use the scoring criteria provided in the PDF file for each subtest. You should assign one point for each correct response and zero points for each incorrect or incomplete response. You should also note any errors or difficulties that the examinee had during the test.
e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Epson Px710w Adjustment Program.md b/spaces/tioseFevbu/cartoon-converter/scripts/Epson Px710w Adjustment Program.md
deleted file mode 100644
index 13e5a39b287a8c145da92df104ea65e67139b872..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Epson Px710w Adjustment Program.md
+++ /dev/null
@@ -1,166 +0,0 @@
-
-
-
-
What is Epson PX710W Adjustment Program?
-
-
-
If you own an Epson Stylus Photo PX710W printer, you may have encountered some problems that affect its performance or functionality. For example, you may see an error message saying that your printer's ink pads are at the end of their service life, or that your printer needs to be initialized. These problems can prevent you from printing or scanning your documents or photos.
Fortunately, there is a solution for these problems. It is called Epson PX710W Adjustment Program. This is a software tool that allows you to adjust and reset various settings and parameters of your printer. By using this tool, you can solve many common printer issues and improve your printer's performance.
-
-
-
Why do you need Epson PX710W Adjustment Program?
-
-
-
Epson PX710W Adjustment Program can help you with several printer problems that may occur over time. Some of these problems are:
-
-
Waste ink pad counter overflow. This happens when your printer's ink pads reach their maximum capacity and cannot absorb any more ink. When this happens, your printer will stop working and display an error message. To fix this problem, you need to reset the waste ink pad counter using Epson PX710W Adjustment Program.
-
Print head ID mismatch. This happens when your printer's print head ID does not match with the one stored in your printer's firmware. This can cause print quality issues or errors. To fix this problem, you need to set the print head ID using Epson PX710W Adjustment Program.
-
Printer initialization failure. This happens when your printer's firmware is corrupted or damaged. This can cause your printer to malfunction or not work at all. To fix this problem, you need to initialize the printer using Epson PX710W Adjustment Program.
-
-
By using Epson PX710W Adjustment Program, you can solve these problems and restore your printer's functionality. You can also save money and time by avoiding the need to replace your printer or take it to a service center.
-
-
-
How to download and install Epson PX710W Adjustment Program?
-
-
-
Downloading the program
-
To download Epson PX710W Adjustment Program, you need to find a reliable source that offers the program for free or for a reasonable price. One such source is Reset Printers, a website that provides various adjustment programs for different printer models. Here are the steps to download the program from this website:
Select the product that matches your printer model and click on "Add to cart".
-
Proceed to checkout and enter your payment details. You can pay with PayPal or credit card.
-
After completing the payment, you will receive an email with a download link and a license key for the program.
-
Click on the download link and save the program file on your computer.
-
-
-
-
Installing the program
-
To install Epson PX710W Adjustment Program, you need to follow these steps:
-
-
-
Extract the program file from the zip folder using a software like WinRAR or 7-Zip.
-
Disable your antivirus software temporarily, as it may interfere with the installation process.
-
Run the program file as administrator by right-clicking on it and selecting "Run as administrator".
-
Enter the license key that you received in your email and click on "OK".
-
Follow the instructions on the screen to complete the installation.
-
Enable your antivirus software again after the installation is done.
-
-
-
How to use Epson PX710W Adjustment Program?
-
-
-
Running the program
-
To run Epson PX710W Adjustment Program, you need to follow these steps:
-
-
Make sure your printer is connected to your computer and turned on.
-
Open the program folder and double-click on the "AdjProg.exe" file.
-
Select your printer model from the drop-down menu and click on "OK".
-
Click on "Particular adjustment mode" to access the main functions of the program.
-
-
-
-
Resetting the waste ink pad counter
-
To reset the waste ink pad counter using Epson PX710W Adjustment Program, you need to follow these steps:
-
-
In the "Particular adjustment mode" window, select "Waste ink pad counter" and click on "OK".
-
In the next window, check the boxes next to "Main pad counter" and "Platen pad counter" and click on "Check". The program will display the current values of the counters.
-
Click on "Initialization" to reset the counters to zero. The program will ask you to turn off your printer and then turn it on again.
-
Click on "OK" and then "Finish" to exit the program.
-
-
-
Setting the print head ID
-
To set the print head ID using Epson PX710W Adjustment Program, you need to follow these steps:
-
-
In the "Particular adjustment mode" window, select "Head ID input" and click on "OK".
-
In the next window, enter the print head ID that matches your printer's print head. You can find the print head ID on a label attached to the print head or on the printer's packaging.
-
Click on "Set" to save the print head ID. The program will ask you to turn off your printer and then turn it on again.
-
Click on "OK" and then "Finish" to exit the program.
-
-
-
-
Initializing the printer
-
To initialize the printer using Epson PX710W Adjustment Program, you need to follow these steps:
-
-
In the "Particular adjustment mode" window, select "Initialization" and click on "OK".
-
In the next window, click on "OK" to start the initialization process. The program will erase all the data and settings stored in your printer's firmware and restore it to its factory default state.
-
Wait for the initialization process to complete. The program will ask you to turn off your printer and then turn it on again.
-
Click on "OK" and then "Finish" to exit the program.
-
-
-
What are the benefits of Epson PX710W Adjustment Program?
-
-
-
Epson PX710W Adjustment Program can provide you with many benefits that can enhance your printing experience and save you money. Some of these benefits are:
-
-
You can reset the waste ink pad counter and avoid the need to replace the ink pads or take your printer to a service center. This can save you a lot of money and hassle.
-
You can set the print head ID and ensure that your printer recognizes your print head correctly. This can improve your print quality and prevent errors or malfunctions.
-
You can initialize the printer and restore it to its original state. This can fix any firmware issues or errors that may affect your printer's performance or functionality.
-
You can access other useful functions of the program, such as nozzle check, head cleaning, ink charge, EEPROM data copy, etc. These functions can help you maintain your printer and optimize its performance.
-
-
-
-
What are the limitations of Epson PX710W Adjustment Program?
-
-
-
Epson PX710W Adjustment Program is a powerful and useful tool, but it also has some limitations that you should be aware of. Some of these limitations are:
-
-
The program is compatible only with Epson Stylus Photo PX710W printers. You cannot use it for other printer models or brands.
-
The program requires a license key to run. You need to purchase the license key from a trusted source and enter it correctly when installing the program.
-
The program may be detected as a virus or malware by some antivirus software. You need to disable your antivirus software temporarily when downloading and installing the program.
-
The program may not work properly if you have other printer drivers or software installed on your computer. You need to uninstall or disable them before using the program.
-
-
-
How to troubleshoot Epson PX710W Adjustment Program?
-
-
-
Error messages and indicators
-
Sometimes, you may encounter some error messages or indicators when using Epson PX710W Adjustment Program. Here is a table of the possible errors and their solutions:
-
-
-
Error message or indicator
-
Solution
-
-
-
"Communication error"
-
Check your printer connection and make sure it is properly connected to your computer. Try using a different USB cable or port.
-
-
-
"This program cannot be used"
-
Make sure you have entered the correct license key and selected the correct printer model. Also, make sure you have disabled your antivirus software and other printer drivers or software.
-
-
-
"The printer's ink pads are at the end of their service life"
-
Reset the waste ink pad counter using the program as described above.
-
-
-
"The printer's ink pad is nearly full"
-
Reset the waste ink pad counter using the program as described above.
-
-
-
"A printer error has occurred"
-
Turn off your printer and then turn it on again. If the error persists, initialize the printer using the program as described above.
-
-
-
"Cannot recognize the ink cartridge(s)"
-
Remove and reinsert the ink cartridge(s) that are not recognized. Make sure they are compatible with your printer model and not expired or damaged.
-
-
-
"Ink out"
-
Replace the ink cartridge(s) that are out of ink. Make sure they are compatible with your printer model and not expired or damaged.
-
-
-
"Paper jam"
-
Remove any paper that is jammed in your printer. Make sure you use the correct paper size and type for your printer.
-
-
Printer problems and solutions
Besides the error messages or indicators, you may also experience some printer problems when using Epson PX710W Adjustment Program or after using it. Here is a table of the possible problems and their solutions:
Printer problem
Solution
Print quality is poor or inconsistent
Perform a nozzle check and a head cleaning using the program. Also, make sure you use genuine Epson ink cartridges and high-quality paper for your printer.
Printer does not print or scan
Check your printer connection and make sure it is properly connected to your computer. Also, make sure you have selected the correct printer settings and preferences for your print or scan job.
Printer makes noise or vibrates
This is normal during the initialization process or when the print head moves. However, if the noise or vibration is excessive or continuous, contact Epson support for help.
Printer does not turn on or off
Check your power cord and make sure it is securely plugged into your printer and a working power outlet. Also, make sure you press and hold the power button for at least three seconds to turn on or off your printer.
How to contact Epson support for help?
If you have any questions or issues that cannot be solved by using Epson PX710W Adjustment Program or by following the troubleshooting tips above, you can contact Epson support for help. Epson support can provide you with technical assistance, warranty information, product registration, and more. Here are some ways to contact Epson support:
Visit https://epson.com/Support/sl/s and enter your printer model number to access online resources, such as manuals, drivers, FAQs, videos, etc.
Call 1-800-463-7766 (U.S.) or 1-800-807-7766 (Canada) to speak with a customer service representative. The phone support hours are Monday to Friday from 6 a.m. to 8 p.m. PT, and Saturday from 7 a.m. to 4 p.m. PT.
Email support@epson.com with your name, phone number, email address, printer model number, serial number, and a brief description of your issue. You will receive a reply within 24 hours.
Chat with a live agent via https://epson.com/chat. The chat support hours are Monday to Friday from 6 a.m. to 8 p.m. PT, and Saturday from 7 a.m. to 4 p.m. PT.
Conclusion
Epson PX710W Adjustment Program is a handy tool that can help you fix many common printer problems and improve your printer's performance. By using this tool, you can reset the waste ink pad counter, set the print head ID, initialize the printer, and access other useful functions. However, you should also be aware of the limitations and precautions of using this tool, such as compatibility issues, license restrictions, antivirus interference, etc. If you encounter any issues that cannot be solved by using this tool or by following the troubleshooting tips above, you can contact Epson support for help.
-
We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy printing!
FAQs
Here are some frequently asked questions and their answers about Epson PX710W Adjustment Program:
-
-
Q: How often do I need to use Epson PX710W Adjustment Program? A: There is no fixed rule on how often you need to use this tool. It depends on how frequently you use your printer and how much ink it consumes. Generally, you may need to use this tool when you see an error message or indicator related to the waste ink pad counter, the print head ID, or the printer initialization.
-
Q: Is Epson PX710W Adjustment Program safe to use? A: Yes, this tool is safe to use as long as you download it from a reliable source and follow the instructions carefully. However, you should also take some precautions, such as disabling your antivirus software temporarily, uninstalling or disabling other printer drivers or software, and backing up your printer data before using this tool.
-
Q: Can I use Epson PX710W Adjustment Program for other printer models? A: No, this tool is compatible only with Epson Stylus Photo PX710W printers. You cannot use it for other printer models or brands. If you have other Epson printers, you may need to find the corresponding adjustment program for them.
-
Q: Where can I find more information about Epson PX710W Adjustment Program? A: You can find more information about this tool on the website where you downloaded it from or on the official Epson website. You can also watch some video tutorials on YouTube that show how to use this tool.
-
Q: What if Epson PX710W Adjustment Program does not work for me? A: If this tool does not work for you or causes any problems, you can contact Epson support for help. They can provide you with technical assistance, warranty information, product registration, and more.
-
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Invision Power Board Download Nulled.md b/spaces/tioseFevbu/cartoon-converter/scripts/Invision Power Board Download Nulled.md
deleted file mode 100644
index 3d4c55cccf4ec853e3abb9de547f7b392140a78c..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Invision Power Board Download Nulled.md
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
Invision Power Board Download Nulled: What You Need to Know
-
Invision Power Board, also known as Invision Community, is a popular forum software that allows you to create and manage online communities. It has many features and benefits that make it a great choice for building and growing your community. However, some people may be tempted to use Invision Power Board download nulled, which is a cracked or pirated version of the software that is available for free on some websites. This may seem like a good deal, but it is actually a bad idea that can cause you many problems. In this article, we will explain what Invision Power Board is, what nulled software is, why you should not use Invision Power Board download nulled, and what alternatives and solutions you can use instead.
-
What is Invision Power Board?
-
Invision Power Board is a brand of forum software developed in 2002 and originally marketed as Invision Power Board. The current version of the software was written in PHP and uses MySQL for database storage. Invision Power Services (IPS) is the company that develops and sells Invision Power Board and other related products.
Invision Power Board has many features and benefits that make it a powerful and flexible platform for creating and managing online communities. Some of these features and benefits are:
-
-
Forums: You can create unlimited forums and subforums, customize the layout and appearance, moderate the content and users, enable various options such as polls, attachments, ratings, reactions, etc.
-
Ecommerce: You can sell products and services, accept payments, manage orders and invoices, create coupons and discounts, etc.
-
CMS: You can create and publish articles, pages, blogs, galleries, calendars, etc., with an easy-to-use editor and drag-and-drop interface.
-
File Sharing: You can upload and share files, images, videos, etc., with your community members, set permissions and quotas, create categories and tags, etc.
-
Clubs: You can create and join private or public groups within your community, with their own forums, blogs, galleries, calendars, etc.
-
Blogs and News: You can write and share your thoughts, opinions, news, etc., with your community members, enable comments and feedback, etc.
-
Resource Community: You can share information, files and articles relevant to your niche, with ratings, reviews, downloads, etc.
-
Subscriptions: You can create exclusive subscription-based communities for your VIPs, with recurring payments, access control, etc.
-
Micro Communities: You can segment your community and let your members niche down into specific topics or interests.
-
Engagement Tools: You can increase user engagement and loyalty with tools such as gamification (badges, points, ranks), notifications (web push or email), social media integration (Facebook, Twitter), etc.
-
Analytics Tools: You can monitor and measure your community's performance with tools such as statistics (views, registrations, posts, etc.), reports (activity, moderation, reputation, etc.), and insights (trends, patterns, recommendations, etc.).
-
Security Tools: You can protect your community and data with tools such as encryption (SSL), backups (cloud or local), spam prevention (CAPTCHA, email validation, etc.), moderation tools (warnings, bans, etc.), and permissions (roles, groups, etc.).
-
Customization Tools: You can customize your community and make it unique with tools such as themes (colors, fonts, logos, etc.), plugins (add-ons, extensions, etc.), languages (translations, localization, etc.), and code (HTML, CSS, PHP, etc.).
-
Support Tools: You can get help and support from the IPS team and the Invision Community with tools such as documentation (guides, tutorials, FAQs, etc.), forums (questions, answers, feedback, etc.), tickets (technical support, bug reports, etc.), and services (installation, migration, upgrade, etc.).
-
-
As you can see, Invision Power Board has a lot to offer for anyone who wants to create and manage a successful online community. However, it is not a free software. You need to pay for a license and a renewal fee to use it legally and get access to all the features and benefits.
-
Pricing and licensing of Invision Power Board
-
Invision Power Board has different pricing and licensing options depending on your needs and preferences. You can choose between a cloud plan or a self-hosted plan.
-
-
Cloud Plan
Self-Hosted Plan
-
You pay a monthly or yearly fee based on the number of page views and online users you have. You get access to all the features of Invision Power Board plus hosting, backups, updates, support, and security. You don't need to worry about installation or maintenance.
You pay a one-time fee for the core software plus any additional applications you want. You also need to pay a renewal fee every six months to get access to updates and support. You need to host the software on your own server and take care of installation and maintenance.
-
The cloud plan starts from $45 per month for up to 25k page views and 100 online users. The price increases as you need more resources. You can also get a custom plan for larger communities.
The self-hosted plan starts from $175 for the core software plus the forums application. The price increases as you add more applications. The renewal fee is $25 every six months for the core software plus $15 for each application.
-
-
Both plans have their pros and cons. The cloud plan is more convenient and hassle-free but more expensive in the long run. The self-hosted plan is more flexible and cost-effective but more complicated and time-consuming. You need to weigh your options carefully and decide which one suits you best.
-
What is nulled software?
-
Nulled software is software that has been modified or cracked to remove or bypass the license verification or activation process. This means that you can use the software without paying for it or following the terms and conditions of the original developer. Nulled software is usually distributed for free on some websites that offer downloads of pirated or illegal software.
-
Definition and examples of nulled software
-
Nulled software is a type of software piracy that involves modifying or cracking the original software code to remove or bypass the license verification or activation process. This allows the user to use the software without paying for it or following the terms and conditions of the original developer. Nulled software is also known as cracked software or warez.
-
Nulled software is different from freeware or open-source software. Freeware is software that is offered for free by the original developer without any restrictions or limitations. Open-source software is software that has its source code available for anyone to view, modify, or distribute under certain licenses. Nulled software is software that has been illegally tampered with by someone other than the original developer without their permission or consent.
-
-
Nulled software can be found for almost any type of software that requires a license or activation to use. Some examples of nulled software are:
-
-
Nulled WordPress themes and plugins: These are themes and plugins for WordPress that have been modified or cracked to remove or bypass the license verification or activation process. They are usually downloaded from websites that offer pirated or illegal WordPress products.
-
Nulled antivirus programs: These are antivirus programs that have been modified or cracked to remove or bypass the license verification or activation process. They are usually downloaded from websites that offer pirated or illegal antivirus products.
-
N
-
Nulled games: These are games that have been modified or cracked to remove or bypass the license verification or activation process. They are usually downloaded from websites that offer pirated or illegal games.
-
Nulled Invision Power Board: This is Invision Power Board that has been modified or cracked to remove or bypass the license verification or activation process. It is usually downloaded from websites that offer pirated or illegal Invision Power Board products.
-
-
Nulled software may seem like a good deal, but it is actually a bad idea that can cause you many problems. In the next section, we will explain why you should not use nulled software, especially Invision Power Board download nulled.
-
Risks and disadvantages of using nulled software
-
Using nulled software is not only illegal and unethical, but also risky and disadvantageous. Some of the risks and disadvantages of using nulled software are:
-
-
Legal issues and ethical concerns: Using nulled software is a violation of the intellectual property rights of the original developer. You are stealing their work and depriving them of their rightful income. You are also breaking the law and exposing yourself to potential lawsuits and penalties. Moreover, you are being dishonest and unfair to yourself and others who pay for the software legally.
-
Security threats and performance issues: Using nulled software is a security risk and a performance issue. Nulled software may contain malware, viruses, spyware, adware, etc., that can harm your computer, data, and privacy. Nulled software may also have bugs, errors, glitches, etc., that can affect the functionality and reliability of the software. Nulled software may also be incompatible with other software or updates, causing conflicts and crashes.
-
Lack of features and benefits: Using nulled software is a loss of features and benefits. Nulled software may not have all the features and benefits of the original software, or they may not work properly or at all. Nulled software may also not have access to updates, support, security, customization, etc., that are available for the original software. Nulled software may also not have any warranty or guarantee from the original developer.
-
-
As you can see, using nulled software is a bad idea that can cause you many problems. This is especially true for Invision Power Board download nulled, which is a complex and sophisticated forum software that requires a lot of resources and maintenance. In the next section, we will explain why you should not use Invision Power Board download nulled, and what alternatives and solutions you can use instead.
-
Why you should not use Invision Power Board download nulled
-
Invision Power Board download nulled is a cracked or pirated version of Invision Power Board that is available for free on some websites. It may seem like a good deal, but it is actually a bad idea that can cause you many problems. Here are some reasons why you should not use Invision Power Board download nulled:
-
Legal issues and ethical concerns of using Invision Power Board download nulled
-
Using Invision Power Board download nulled is a violation of the intellectual property rights of IPS, the company that develops and sells Invision Power Board. You are stealing their work and depriving them of their rightful income. You are also breaking the law and exposing yourself to potential lawsuits and penalties from IPS or other authorities.
-
Moreover, you are being dishonest and unfair to yourself and others who pay for Invision Power Board legally. You are taking advantage of their hard work and investment without contributing anything in return. You are also undermining the quality and reputation of Invision Power Board by using a substandard and illegal version of it.
-
Security threats and performance issues of using Invision Power Board download nulled
-
Using Invision Power Board download nulled is a security risk and a performance issue. Invision Power Board download nulled may contain malware, viruses, spyware, adware, etc., that can harm your computer, data, and privacy. For example, it may steal your personal information, infect your files, display unwanted ads, redirect your traffic, etc.
-
Invision Power Board download nulled may also have bugs, errors, glitches, etc., that can affect the functionality and reliability of Invision Power Board. For example, it may crash frequently, display incorrect or incomplete information, fail to perform certain tasks, etc.
-
Invision Power Board download nulled may also be incompatible with other software or updates, causing conflicts and crashes. For example, it may not work with the latest version of PHP, MySQL, or other software that Invision Power Board depends on. It may also not work with the latest features or updates that IPS releases for Invision Power Board.
-
Alternatives and solutions to using Invision Power Board download nulled
-
Using Invision Power Board download nulled is not worth the risk and hassle. There are better alternatives and solutions that you can use instead. Some of these alternatives and solutions are:
-
-
Buy a legal license of Invision Power Board: The best and most obvious solution is to buy a legal license of Invision Power Board from IPS. This way, you can enjoy all the features and benefits of Invision Power Board without any legal or ethical issues. You can also get access to updates, support, security, customization, etc., that are available for Invision Power Board. You can choose between a cloud plan or a self-hosted plan depending on your needs and preferences.
-
Use a free or open-source forum software: If you don't want to pay for Invision Power Board, you can use a free or open-source forum software instead. There are many free or open-source forum software that you can use to create and manage your online community. Some examples are phpBB, MyBB, SMF, Flarum, Discourse, etc. These forum software may not have all the features and benefits of Invision Power Board, but they are still good enough for most purposes. They are also legal and ethical to use.
-
Use a hosted forum service: If you don't want to install or maintain any forum software, you can use a hosted forum service instead. There are many hosted forum services that you can use to create and manage your online community. Some examples are ProBoards, Forumotion, ZetaBoards, etc. These hosted forum services may not have all the features and benefits of Invision Power Board, but they are still convenient and easy to use. They are also legal and ethical to use.
-
-
As you can see, there are many alternatives and solutions that you can use instead of Invision Power Board download nulled. You should avoid using Invision Power Board download nulled at all costs and choose one of these alternatives and solutions instead.
-
Conclusion
-
In this article, we have explained what Invision Power Board is, what nulled software is, why you should not use Invision Power Board download nulled, and what alternatives and solutions you can use instead. We hope that this article has been helpful and informative for you.
-
In summary, here are the main points that we have covered:
-
-
Invision Power Board is a popular forum software that allows you to create and manage online communities. It has many features and benefits that make it a great choice for building and growing your community.
-
Nulled software is software that has been modified or cracked to remove or bypass the license verification or activation process. This means that you can use the software without paying for it or following the terms and conditions of the original developer.
-
Using Invision Power Board download nulled is a bad idea that can cause you many problems. It is illegal and unethical to use Invision Power Board download nulled. It is also risky and disadvantageous to use Invision Power Board download nulled.
-
There are better alternatives and solutions that you can use instead of Invision Power Board download nulled. You can buy a legal license of Invision Power Board from IPS. You can use a free or open-source forum software instead. You can use a hosted forum service instead.
-
-
We hope that this article has answered your questions about Invision Power Board download nulled. If you have any more questions or comments, please feel free to leave them below.
-
FAQs
-
Here are some frequently asked questions about Invision Power Board download nulled:
-
Q: Where can I find Invision Power Board download nulled?
-
A: You can find Invision Power Board download nulled on some websites that offer downloads of pirated or illegal software. However, we strongly advise you not to use these websites or download these files as they are illegal and unsafe.
-
Q: What are the advantages of using Invision Power Board download nulled?
-
A: The only advantage of using Invision Power Board download nulled is that it is free. However, this advantage is outweighed by the many disadvantages and risks of using Invision Power Board download nulled.
-
Q: What are the disadvantages of using Invision Power Board download nulled?
-
A: The disadvantages of using Invision Power Board download nulled are: - Legal issues and ethical concerns: You are violating the intellectual property rights of IPS and breaking the law by using Invision Power Board download nulled. You are also being dishonest and unfair to yourself and others who pay for Invision Power Board legally. - Security threats and performance issues: You are exposing your computer, data, and privacy to malware, viruses, spyware, adware, etc., that may be hidden in Invision Power Board download nulled. You are also compromising the functionality and reliability of Invision Power Board by using a substandard and illegal version of it. - Lack of features and benefits: You are missing out on the features and benefits of Invision Power Board that are available for the legal version of it. You are also losing access to updates, support, security, customization, etc., that are provided by IPS for Invision Power Board.
-
Q: How can I get a legal license of Invision Power Board?
-
A: You can get a legal license of Invision Power Board by buying it from IPS, the company that develops and sells Invision Power Board. You can visit their website at https://invisioncommunity.com/ and choose between a cloud plan or a self-hosted plan depending on your needs and preferences.
-
Q: What are some free or open-source forum software that I can use instead of Invision Power Board?
-
A: Some free or open-source forum software that you can use instead of Invision Power Board are: - phpBB: A free and open-source forum software that is widely used and supported by a large community. It has many features and extensions that you can use to customize your forum. You can visit their website at https://www.phpbb.com/. - MyBB: A free and open-source forum software that is easy to use and manage. It has many features and plugins that you can use to enhance your forum. You can visit their website at https://mybb.com/. - SMF: A free and open-source forum software that is fast and secure. It has many features and mods that you can use to improve your forum. You can visit their website at https://www.simplemachines.org/. - Flarum: A free and open-source forum software that is modern and elegant. It has many features and extensions that you can use to create a beautiful and engaging forum. You can visit their website at https://flarum.org/. - Discourse: A free and open-source forum software that is innovative and interactive. It has many features and integrations that you can use to create a dynamic and collaborative forum. You can visit their website at https://www.discourse.org/.
-
Q: What are some hosted forum services that I can use instead of Invision Power Board?
-
A: Some hosted forum services that you can use instead of Invision Power Board are: - ProBoards: A hosted forum service that is free and easy to use. It has many features and themes that you can use to create your own forum. You can visit their website at https://www.proboards.com/. - Forumotion: A hosted forum service that is free and flexible. It has many features and options that you can use to customize your forum. You can visit their website at https://www.forumotion.com/. - ZetaBoards: A hosted forum service that is free and powerful. It has many features and tools that you can use to optimize your forum. You can visit their website at https://www.zetaboards.com/.
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_compat.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_compat.py
deleted file mode 100644
index e0879807ab91beabd4158c91c4b7a74277b86463..0000000000000000000000000000000000000000
--- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/metadata/importlib/_compat.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import importlib.metadata
-from typing import Any, Optional, Protocol, cast
-
-
-class BasePath(Protocol):
- """A protocol that various path objects conform.
-
- This exists because importlib.metadata uses both ``pathlib.Path`` and
- ``zipfile.Path``, and we need a common base for type hints (Union does not
- work well since ``zipfile.Path`` is too new for our linter setup).
-
- This does not mean to be exhaustive, but only contains things that present
- in both classes *that we need*.
- """
-
- @property
- def name(self) -> str:
- raise NotImplementedError()
-
- @property
- def parent(self) -> "BasePath":
- raise NotImplementedError()
-
-
-def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]:
- """Find the path to the distribution's metadata directory.
-
- HACK: This relies on importlib.metadata's private ``_path`` attribute. Not
- all distributions exist on disk, so importlib.metadata is correct to not
- expose the attribute as public. But pip's code base is old and not as clean,
- so we do this to avoid having to rewrite too many things. Hopefully we can
- eliminate this some day.
- """
- return getattr(d, "_path", None)
-
-
-def get_dist_name(dist: importlib.metadata.Distribution) -> str:
- """Get the distribution's project name.
-
- The ``name`` attribute is only available in Python 3.10 or later. We are
- targeting exactly that, but Mypy does not know this.
- """
- return cast(Any, dist).name
diff --git a/spaces/tomofi/MMOCR/tests/test_dataset/test_ocr_seg_dataset.py b/spaces/tomofi/MMOCR/tests/test_dataset/test_ocr_seg_dataset.py
deleted file mode 100644
index f7678123ea5340826c6562c5fba3502068a8ddd4..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/tests/test_dataset/test_ocr_seg_dataset.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import json
-import math
-import os.path as osp
-import tempfile
-
-import pytest
-
-from mmocr.datasets.ocr_seg_dataset import OCRSegDataset
-
-
-def _create_dummy_ann_file(ann_file):
- ann_info1 = {
- 'file_name':
- 'sample1.png',
- 'annotations': [{
- 'char_text':
- 'F',
- 'char_box': [11.0, 0.0, 22.0, 0.0, 12.0, 12.0, 0.0, 12.0]
- }, {
- 'char_text':
- 'r',
- 'char_box': [23.0, 2.0, 31.0, 1.0, 24.0, 11.0, 16.0, 11.0]
- }, {
- 'char_text':
- 'o',
- 'char_box': [33.0, 2.0, 43.0, 2.0, 36.0, 12.0, 25.0, 12.0]
- }, {
- 'char_text':
- 'm',
- 'char_box': [46.0, 2.0, 61.0, 2.0, 53.0, 12.0, 39.0, 12.0]
- }, {
- 'char_text':
- ':',
- 'char_box': [61.0, 2.0, 69.0, 2.0, 63.0, 12.0, 55.0, 12.0]
- }],
- 'text':
- 'From:'
- }
- ann_info2 = {
- 'file_name':
- 'sample2.png',
- 'annotations': [{
- 'char_text': 'o',
- 'char_box': [0.0, 5.0, 7.0, 5.0, 9.0, 15.0, 2.0, 15.0]
- }, {
- 'char_text':
- 'u',
- 'char_box': [7.0, 4.0, 14.0, 4.0, 18.0, 18.0, 11.0, 18.0]
- }, {
- 'char_text':
- 't',
- 'char_box': [13.0, 1.0, 19.0, 2.0, 24.0, 18.0, 17.0, 18.0]
- }],
- 'text':
- 'out'
- }
-
- with open(ann_file, 'w') as fw:
- for ann_info in [ann_info1, ann_info2]:
- fw.write(json.dumps(ann_info) + '\n')
-
- return ann_info1, ann_info2
-
-
-def _create_dummy_loader():
- loader = dict(
- type='HardDiskLoader',
- repeat=1,
- parser=dict(
- type='LineJsonParser', keys=['file_name', 'text', 'annotations']))
- return loader
-
-
-def test_ocr_seg_dataset():
- tmp_dir = tempfile.TemporaryDirectory()
- # create dummy data
- ann_file = osp.join(tmp_dir.name, 'fake_data.txt')
- ann_info1, ann_info2 = _create_dummy_ann_file(ann_file)
-
- # test initialization
- loader = _create_dummy_loader()
- dataset = OCRSegDataset(ann_file, loader, pipeline=[])
-
- tmp_dir.cleanup()
-
- # test pre_pipeline
- img_info = dataset.data_infos[0]
- results = dict(img_info=img_info)
- dataset.pre_pipeline(results)
- assert results['img_prefix'] == dataset.img_prefix
-
- # test _parse_anno_info
- annos = ann_info1['annotations']
- with pytest.raises(AssertionError):
- dataset._parse_anno_info(annos[0])
- annos2 = ann_info2['annotations']
- with pytest.raises(AssertionError):
- dataset._parse_anno_info([{'char_text': 'i'}])
- with pytest.raises(AssertionError):
- dataset._parse_anno_info([{'char_box': [1, 2, 3, 4, 5, 6, 7, 8]}])
- annos2[0]['char_box'] = [1, 2, 3]
- with pytest.raises(AssertionError):
- dataset._parse_anno_info(annos2)
-
- return_anno = dataset._parse_anno_info(annos)
- assert return_anno['chars'] == ['F', 'r', 'o', 'm', ':']
- assert len(return_anno['char_rects']) == 5
-
- # test prepare_train_img
- expect_results = {
- 'img_info': {
- 'filename': 'sample1.png'
- },
- 'img_prefix': '',
- 'ann_info': return_anno
- }
- data = dataset.prepare_train_img(0)
- assert data == expect_results
-
- # test evluation
- metric = 'acc'
- results = [{'text': 'From:'}, {'text': 'ou'}]
- eval_res = dataset.evaluate(results, metric)
-
- assert math.isclose(eval_res['word_acc'], 0.5, abs_tol=1e-4)
- assert math.isclose(eval_res['char_precision'], 1.0, abs_tol=1e-4)
- assert math.isclose(eval_res['char_recall'], 0.857, abs_tol=1e-4)
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py
deleted file mode 100644
index 030ae3721ef62cf990b9e75a7d229616a263952e..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
-
-import torch
-import torch.nn as nn
-from mmcv.cnn import ConvModule
-from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
-from mmcv.runner import BaseModule
-
-from mmdet.models.builder import HEADS, build_loss
-
-
-@HEADS.register_module()
-class MaskPointHead(BaseModule):
- """A mask point head use in PointRend.
-
- ``MaskPointHead`` use shared multi-layer perceptron (equivalent to
- nn.Conv1d) to predict the logit of input points. The fine-grained feature
- and coarse feature will be concatenate together for predication.
-
- Args:
- num_fcs (int): Number of fc layers in the head. Default: 3.
- in_channels (int): Number of input channels. Default: 256.
- fc_channels (int): Number of fc channels. Default: 256.
- num_classes (int): Number of classes for logits. Default: 80.
- class_agnostic (bool): Whether use class agnostic classification.
- If so, the output channels of logits will be 1. Default: False.
- coarse_pred_each_layer (bool): Whether concatenate coarse feature with
- the output of each fc layer. Default: True.
- conv_cfg (dict | None): Dictionary to construct and config conv layer.
- Default: dict(type='Conv1d'))
- norm_cfg (dict | None): Dictionary to construct and config norm layer.
- Default: None.
- loss_point (dict): Dictionary to construct and config loss layer of
- point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
- loss_weight=1.0).
- init_cfg (dict or list[dict], optional): Initialization config dict.
- """
-
- def __init__(self,
- num_classes,
- num_fcs=3,
- in_channels=256,
- fc_channels=256,
- class_agnostic=False,
- coarse_pred_each_layer=True,
- conv_cfg=dict(type='Conv1d'),
- norm_cfg=None,
- act_cfg=dict(type='ReLU'),
- loss_point=dict(
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
- init_cfg=dict(
- type='Normal', std=0.001,
- override=dict(name='fc_logits'))):
- super().__init__(init_cfg)
- self.num_fcs = num_fcs
- self.in_channels = in_channels
- self.fc_channels = fc_channels
- self.num_classes = num_classes
- self.class_agnostic = class_agnostic
- self.coarse_pred_each_layer = coarse_pred_each_layer
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.loss_point = build_loss(loss_point)
-
- fc_in_channels = in_channels + num_classes
- self.fcs = nn.ModuleList()
- for _ in range(num_fcs):
- fc = ConvModule(
- fc_in_channels,
- fc_channels,
- kernel_size=1,
- stride=1,
- padding=0,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg)
- self.fcs.append(fc)
- fc_in_channels = fc_channels
- fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
-
- out_channels = 1 if self.class_agnostic else self.num_classes
- self.fc_logits = nn.Conv1d(
- fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
-
- def forward(self, fine_grained_feats, coarse_feats):
- """Classify each point base on fine grained and coarse feats.
-
- Args:
- fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
- shape (num_rois, in_channels, num_points).
- coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
- shape (num_rois, num_classes, num_points).
-
- Returns:
- Tensor: Point classification results,
- shape (num_rois, num_class, num_points).
- """
-
- x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
- for fc in self.fcs:
- x = fc(x)
- if self.coarse_pred_each_layer:
- x = torch.cat((x, coarse_feats), dim=1)
- return self.fc_logits(x)
-
- def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
- cfg):
- """Get training targets of MaskPointHead for all images.
-
- Args:
- rois (Tensor): Region of Interest, shape (num_rois, 5).
- rel_roi_points: Points coordinates relative to RoI, shape
- (num_rois, num_points, 2).
- sampling_results (:obj:`SamplingResult`): Sampling result after
- sampling and assignment.
- gt_masks (Tensor) : Ground truth segmentation masks of
- corresponding boxes, shape (num_rois, height, width).
- cfg (dict): Training cfg.
-
- Returns:
- Tensor: Point target, shape (num_rois, num_points).
- """
-
- num_imgs = len(sampling_results)
- rois_list = []
- rel_roi_points_list = []
- for batch_ind in range(num_imgs):
- inds = (rois[:, 0] == batch_ind)
- rois_list.append(rois[inds])
- rel_roi_points_list.append(rel_roi_points[inds])
- pos_assigned_gt_inds_list = [
- res.pos_assigned_gt_inds for res in sampling_results
- ]
- cfg_list = [cfg for _ in range(num_imgs)]
-
- point_targets = map(self._get_target_single, rois_list,
- rel_roi_points_list, pos_assigned_gt_inds_list,
- gt_masks, cfg_list)
- point_targets = list(point_targets)
-
- if len(point_targets) > 0:
- point_targets = torch.cat(point_targets)
-
- return point_targets
-
- def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
- gt_masks, cfg):
- """Get training target of MaskPointHead for each image."""
- num_pos = rois.size(0)
- num_points = cfg.num_points
- if num_pos > 0:
- gt_masks_th = (
- gt_masks.to_tensor(rois.dtype, rois.device).index_select(
- 0, pos_assigned_gt_inds))
- gt_masks_th = gt_masks_th.unsqueeze(1)
- rel_img_points = rel_roi_point_to_rel_img_point(
- rois, rel_roi_points, gt_masks_th.shape[2:])
- point_targets = point_sample(gt_masks_th,
- rel_img_points).squeeze(1)
- else:
- point_targets = rois.new_zeros((0, num_points))
- return point_targets
-
- def loss(self, point_pred, point_targets, labels):
- """Calculate loss for MaskPointHead.
-
- Args:
- point_pred (Tensor): Point predication result, shape
- (num_rois, num_classes, num_points).
- point_targets (Tensor): Point targets, shape (num_roi, num_points).
- labels (Tensor): Class label of corresponding boxes,
- shape (num_rois, )
-
- Returns:
- dict[str, Tensor]: a dictionary of point loss components
- """
-
- loss = dict()
- if self.class_agnostic:
- loss_point = self.loss_point(point_pred, point_targets,
- torch.zeros_like(labels))
- else:
- loss_point = self.loss_point(point_pred, point_targets, labels)
- loss['loss_point'] = loss_point
- return loss
-
- def _get_uncertainty(self, mask_pred, labels):
- """Estimate uncertainty based on pred logits.
-
- We estimate uncertainty as L1 distance between 0.0 and the logits
- prediction in 'mask_pred' for the foreground class in `classes`.
-
- Args:
- mask_pred (Tensor): mask predication logits, shape (num_rois,
- num_classes, mask_height, mask_width).
-
- labels (list[Tensor]): Either predicted or ground truth label for
- each predicted mask, of length num_rois.
-
- Returns:
- scores (Tensor): Uncertainty scores with the most uncertain
- locations having the highest uncertainty score,
- shape (num_rois, 1, mask_height, mask_width)
- """
- if mask_pred.shape[1] == 1:
- gt_class_logits = mask_pred.clone()
- else:
- inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
- gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
- return -torch.abs(gt_class_logits)
-
- def get_roi_rel_points_train(self, mask_pred, labels, cfg):
- """Get ``num_points`` most uncertain points with random points during
- train.
-
- Sample points in [0, 1] x [0, 1] coordinate space based on their
- uncertainty. The uncertainties are calculated for each point using
- '_get_uncertainty()' function that takes point's logit prediction as
- input.
-
- Args:
- mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
- mask_height, mask_width) for class-specific or class-agnostic
- prediction.
- labels (list): The ground truth class for each instance.
- cfg (dict): Training config of point head.
-
- Returns:
- point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
- that contains the coordinates sampled points.
- """
- num_points = cfg.num_points
- oversample_ratio = cfg.oversample_ratio
- importance_sample_ratio = cfg.importance_sample_ratio
- assert oversample_ratio >= 1
- assert 0 <= importance_sample_ratio <= 1
- batch_size = mask_pred.shape[0]
- num_sampled = int(num_points * oversample_ratio)
- point_coords = torch.rand(
- batch_size, num_sampled, 2, device=mask_pred.device)
- point_logits = point_sample(mask_pred, point_coords)
- # It is crucial to calculate uncertainty based on the sampled
- # prediction value for the points. Calculating uncertainties of the
- # coarse predictions first and sampling them for points leads to
- # incorrect results. To illustrate this: assume uncertainty func(
- # logits)=-abs(logits), a sampled point between two coarse
- # predictions with -1 and 1 logits has 0 logits, and therefore 0
- # uncertainty value. However, if we calculate uncertainties for the
- # coarse predictions first, both will have -1 uncertainty,
- # and sampled point will get -1 uncertainty.
- point_uncertainties = self._get_uncertainty(point_logits, labels)
- num_uncertain_points = int(importance_sample_ratio * num_points)
- num_random_points = num_points - num_uncertain_points
- idx = torch.topk(
- point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
- shift = num_sampled * torch.arange(
- batch_size, dtype=torch.long, device=mask_pred.device)
- idx += shift[:, None]
- point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
- batch_size, num_uncertain_points, 2)
- if num_random_points > 0:
- rand_roi_coords = torch.rand(
- batch_size, num_random_points, 2, device=mask_pred.device)
- point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
- return point_coords
-
- def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
- """Get ``num_points`` most uncertain points during test.
-
- Args:
- mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
- mask_height, mask_width) for class-specific or class-agnostic
- prediction.
- pred_label (list): The predication class for each instance.
- cfg (dict): Testing config of point head.
-
- Returns:
- point_indices (Tensor): A tensor of shape (num_rois, num_points)
- that contains indices from [0, mask_height x mask_width) of the
- most uncertain points.
- point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
- that contains [0, 1] x [0, 1] normalized coordinates of the
- most uncertain points from the [mask_height, mask_width] grid .
- """
- num_points = cfg.subdivision_num_points
- uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
- num_rois, _, mask_height, mask_width = uncertainty_map.shape
- h_step = 1.0 / mask_height
- w_step = 1.0 / mask_width
-
- uncertainty_map = uncertainty_map.view(num_rois,
- mask_height * mask_width)
- num_points = min(mask_height * mask_width, num_points)
- point_indices = uncertainty_map.topk(num_points, dim=1)[1]
- point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2)
- point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
- mask_width).float() * w_step
- point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
- mask_width).float() * h_step
- return point_indices, point_coords
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_models/test_dense_heads/test_autoassign_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_models/test_dense_heads/test_autoassign_head.py
deleted file mode 100644
index ebcf6fed1d6520b29d4482611436129002c026c0..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_models/test_dense_heads/test_autoassign_head.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import mmcv
-import torch
-
-from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
-from mmdet.models.dense_heads.paa_head import levels_to_images
-
-
-def test_autoassign_head_loss():
- """Tests autoassign head loss when truth is empty and non-empty."""
-
- s = 256
- img_metas = [{
- 'img_shape': (s, s, 3),
- 'scale_factor': 1,
- 'pad_shape': (s, s, 3)
- }]
- train_cfg = mmcv.Config(
- dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
- self = AutoAssignHead(
- num_classes=4,
- in_channels=1,
- train_cfg=train_cfg,
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
- feat = [
- torch.rand(1, 1, s // feat_size, s // feat_size)
- for feat_size in [4, 8, 16, 32, 64]
- ]
- self.init_weights()
- cls_scores, bbox_preds, objectnesses = self(feat)
- # Test that empty ground truth encourages the network to predict background
- gt_bboxes = [torch.empty((0, 4))]
- gt_labels = [torch.LongTensor([])]
- gt_bboxes_ignore = None
- empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
- gt_bboxes, gt_labels, img_metas,
- gt_bboxes_ignore)
- # When there is no truth, the cls loss should be nonzero but there should
- # be no box loss.
- empty_pos_loss = empty_gt_losses['loss_pos']
- empty_neg_loss = empty_gt_losses['loss_neg']
- empty_center_loss = empty_gt_losses['loss_center']
- assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
- assert empty_pos_loss.item() == 0, (
- 'there should be no box loss when there are no true boxes')
- assert empty_center_loss.item() == 0, (
- 'there should be no box loss when there are no true boxes')
-
- # When truth is non-empty then both cls and box loss should be nonzero for
- # random inputs
- gt_bboxes = [
- torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
- ]
- gt_labels = [torch.LongTensor([2])]
- one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
- gt_labels, img_metas, gt_bboxes_ignore)
- onegt_pos_loss = one_gt_losses['loss_pos']
- onegt_neg_loss = one_gt_losses['loss_neg']
- onegt_center_loss = one_gt_losses['loss_center']
- assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
- assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
- assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
- n, c, h, w = 10, 4, 20, 20
- mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
- results = levels_to_images(mlvl_tensor)
- assert len(results) == n
- assert results[0].size() == (h * w * 5, c)
- cls_scores = [torch.ones(2, 4, 5, 5)]
- bbox_preds = [torch.ones(2, 4, 5, 5)]
- iou_preds = [torch.ones(2, 1, 5, 5)]
- mlvl_anchors = [torch.ones(5 * 5, 2)]
- img_shape = None
- scale_factor = [0.5, 0.5]
- cfg = mmcv.Config(
- dict(
- nms_pre=1000,
- min_bbox_size=0,
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.6),
- max_per_img=100))
- rescale = False
- self._get_bboxes(
- cls_scores,
- bbox_preds,
- iou_preds,
- mlvl_anchors,
- img_shape,
- scale_factor,
- cfg,
- rescale=rescale)
diff --git a/spaces/trttung1610/musicgen/audiocraft/models/encodec.py b/spaces/trttung1610/musicgen/audiocraft/models/encodec.py
deleted file mode 100644
index 1cf6b54b582975a01bdb7a06280c766d3d2cc72c..0000000000000000000000000000000000000000
--- a/spaces/trttung1610/musicgen/audiocraft/models/encodec.py
+++ /dev/null
@@ -1,392 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Compression models or wrapper around existing models.
-Also defines the main interface that a model must follow to be usable as an audio tokenizer.
-"""
-
-from abc import ABC, abstractmethod
-import logging
-import math
-from pathlib import Path
-import typing as tp
-
-import numpy as np
-import torch
-from torch import nn
-from transformers import EncodecModel as HFEncodecModel
-
-from .. import quantization as qt
-
-
-logger = logging.getLogger()
-
-
-class CompressionModel(ABC, nn.Module):
- """Base API for all compression model that aim at being used as audio tokenizers
- with a language model.
- """
-
- @abstractmethod
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- ...
-
- @abstractmethod
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- """See `EncodecModel.encode`."""
- ...
-
- @abstractmethod
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- """See `EncodecModel.decode`."""
- ...
-
- @abstractmethod
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- ...
-
- @property
- @abstractmethod
- def channels(self) -> int:
- ...
-
- @property
- @abstractmethod
- def frame_rate(self) -> float:
- ...
-
- @property
- @abstractmethod
- def sample_rate(self) -> int:
- ...
-
- @property
- @abstractmethod
- def cardinality(self) -> int:
- ...
-
- @property
- @abstractmethod
- def num_codebooks(self) -> int:
- ...
-
- @property
- @abstractmethod
- def total_codebooks(self) -> int:
- ...
-
- @abstractmethod
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer."""
- ...
-
- @staticmethod
- def get_pretrained(
- name: str, device: tp.Union[torch.device, str] = 'cpu'
- ) -> 'CompressionModel':
- """Instantiate a CompressionModel from a given pretrained model.
-
- Args:
- name (Path or str): name of the pretrained model. See after.
- device (torch.device or str): Device on which the model is loaded.
-
- Pretrained models:
- - dac_44khz (https://github.com/descriptinc/descript-audio-codec)
- - dac_24khz (same)
- - facebook/encodec_24khz (https://huggingface.co/facebook/encodec_24khz)
- - facebook/encodec_32khz (https://huggingface.co/facebook/encodec_32khz)
- - your own model on HugginFace. Export instructions to come...
- """
-
- from . import builders, loaders
- model: CompressionModel
- if name in ['dac_44khz', 'dac_24khz']:
- model_type = name.split('_')[1]
- logger.info("Getting pretrained compression model from DAC %s", model_type)
- model = DAC(model_type)
- elif name in ['debug_compression_model']:
- logger.info("Getting pretrained compression model for debug")
- model = builders.get_debug_compression_model()
- elif Path(name).exists():
- # We assume here if the paths exist that it is in fact an AC checkpoint
- # that was exported using `audiocraft.utils.export` functions.
- model = loaders.load_compression_model(name, device=device)
- else:
- logger.info("Getting pretrained compression model from HF %s", name)
- hf_model = HFEncodecModel.from_pretrained(name)
- model = HFEncodecCompressionModel(hf_model).to(device)
- return model.to(device).eval()
-
-
-class EncodecModel(CompressionModel):
- """Encodec model operating on the raw waveform.
-
- Args:
- encoder (nn.Module): Encoder network.
- decoder (nn.Module): Decoder network.
- quantizer (qt.BaseQuantizer): Quantizer network.
- frame_rate (int): Frame rate for the latent representation.
- sample_rate (int): Audio sample rate.
- channels (int): Number of audio channels.
- causal (bool): Whether to use a causal version of the model.
- renormalize (bool): Whether to renormalize the audio before running the model.
- """
- # we need assignment to override the property in the abstract class,
- # I couldn't find a better way...
- frame_rate: float = 0
- sample_rate: int = 0
- channels: int = 0
-
- def __init__(self,
- encoder: nn.Module,
- decoder: nn.Module,
- quantizer: qt.BaseQuantizer,
- frame_rate: int,
- sample_rate: int,
- channels: int,
- causal: bool = False,
- renormalize: bool = False):
- super().__init__()
- self.encoder = encoder
- self.decoder = decoder
- self.quantizer = quantizer
- self.frame_rate = frame_rate
- self.sample_rate = sample_rate
- self.channels = channels
- self.renormalize = renormalize
- self.causal = causal
- if self.causal:
- # we force disabling here to avoid handling linear overlap of segments
- # as supported in original EnCodec codebase.
- assert not self.renormalize, 'Causal model does not support renormalize'
-
- @property
- def total_codebooks(self):
- """Total number of quantizer codebooks available."""
- return self.quantizer.total_codebooks
-
- @property
- def num_codebooks(self):
- """Active number of codebooks used by the quantizer."""
- return self.quantizer.num_codebooks
-
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer."""
- self.quantizer.set_num_codebooks(n)
-
- @property
- def cardinality(self):
- """Cardinality of each codebook."""
- return self.quantizer.bins
-
- def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- scale: tp.Optional[torch.Tensor]
- if self.renormalize:
- mono = x.mean(dim=1, keepdim=True)
- volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()
- scale = 1e-8 + volume
- x = x / scale
- scale = scale.view(-1, 1)
- else:
- scale = None
- return x, scale
-
- def postprocess(self,
- x: torch.Tensor,
- scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor:
- if scale is not None:
- assert self.renormalize
- x = x * scale.view(-1, 1, 1)
- return x
-
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- assert x.dim() == 3
- length = x.shape[-1]
- x, scale = self.preprocess(x)
-
- emb = self.encoder(x)
- q_res = self.quantizer(emb, self.frame_rate)
- out = self.decoder(q_res.x)
-
- # remove extra padding added by the encoder and decoder
- assert out.shape[-1] >= length, (out.shape[-1], length)
- out = out[..., :length]
-
- q_res.x = self.postprocess(out, scale)
-
- return q_res
-
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- """Encode the given input tensor to quantized representation along with scale parameter.
-
- Args:
- x (torch.Tensor): Float tensor of shape [B, C, T]
-
- Returns:
- codes, scale (tuple of torch.Tensor, torch.Tensor): Tuple composed of:
- codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep.
- scale a float tensor containing the scale for audio renormalizealization.
- """
- assert x.dim() == 3
- x, scale = self.preprocess(x)
- emb = self.encoder(x)
- codes = self.quantizer.encode(emb)
- return codes, scale
-
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- """Decode the given codes to a reconstructed representation, using the scale to perform
- audio denormalization if needed.
-
- Args:
- codes (torch.Tensor): Int tensor of shape [B, K, T]
- scale (torch.Tensor, optional): Float tensor containing the scale value.
-
- Returns:
- out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio.
- """
- emb = self.decode_latent(codes)
- out = self.decoder(emb)
- out = self.postprocess(out, scale)
- # out contains extra padding added by the encoder and decoder
- return out
-
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- return self.quantizer.decode(codes)
-
-
-class DAC(CompressionModel):
- def __init__(self, model_type: str = "44khz"):
- super().__init__()
- try:
- import dac.utils
- except ImportError:
- raise RuntimeError("Could not import dac, make sure it is installed, "
- "please run `pip install descript-audio-codec`")
- self.model = dac.utils.load_model(model_type=model_type)
- self.n_quantizers = self.total_codebooks
-
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- # We don't support training with this.
- raise NotImplementedError("Forward and training with DAC not supported.")
-
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- codes = self.model.encode(x, self.n_quantizers)[1]
- return codes, None
-
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- assert scale is None
- z_q = self.decode_latent(codes)
- return self.model.decode(z_q)
-
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- return self.model.quantizer.from_codes(codes)[0]
-
- @property
- def channels(self) -> int:
- return 1
-
- @property
- def frame_rate(self) -> float:
- return self.model.sample_rate / self.model.hop_length
-
- @property
- def sample_rate(self) -> int:
- return self.model.sample_rate
-
- @property
- def cardinality(self) -> int:
- return self.model.codebook_size
-
- @property
- def num_codebooks(self) -> int:
- return self.n_quantizers
-
- @property
- def total_codebooks(self) -> int:
- return self.model.n_codebooks
-
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer.
- """
- assert n >= 1
- assert n <= self.total_codebooks
- self.n_quantizers = n
-
-
-class HFEncodecCompressionModel(CompressionModel):
- """Wrapper around HuggingFace Encodec.
- """
- def __init__(self, model: HFEncodecModel):
- super().__init__()
- self.model = model
- bws = self.model.config.target_bandwidths
- num_codebooks = [
- bw * 1000 / (self.frame_rate * math.log2(self.cardinality))
- for bw in bws
- ]
- deltas = [nc - int(nc) for nc in num_codebooks]
- # Checking we didn't do some bad maths and we indeed have integers!
- assert all(deltas) <= 1e-3, deltas
- self.possible_num_codebooks = [int(nc) for nc in num_codebooks]
- self.set_num_codebooks(max(self.possible_num_codebooks))
-
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- # We don't support training with this.
- raise NotImplementedError("Forward and training with HF EncodecModel not supported.")
-
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- bandwidth_index = self.possible_num_codebooks.index(self.num_codebooks)
- bandwidth = self.model.config.target_bandwidths[bandwidth_index]
- res = self.model.encode(x, None, bandwidth)
- assert len(res[0]) == 1
- assert len(res[1]) == 1
- return res[0][0], res[1][0]
-
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- if scale is None:
- scales = [None] # type: ignore
- else:
- scales = scale # type: ignore
- res = self.model.decode(codes[None], scales)
- return res[0]
-
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- return self.model.quantizer.decode(codes.transpose(0, 1))
-
- @property
- def channels(self) -> int:
- return self.model.config.audio_channels
-
- @property
- def frame_rate(self) -> float:
- hop_length = int(np.prod(self.model.config.upsampling_ratios))
- return self.sample_rate / hop_length
-
- @property
- def sample_rate(self) -> int:
- return self.model.config.sampling_rate
-
- @property
- def cardinality(self) -> int:
- return self.model.config.codebook_size
-
- @property
- def num_codebooks(self) -> int:
- return self._num_codebooks
-
- @property
- def total_codebooks(self) -> int:
- return max(self.possible_num_codebooks)
-
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer.
- """
- if n not in self.possible_num_codebooks:
- raise ValueError(f"Allowed values for num codebooks: {self.possible_num_codebooks}")
- self._num_codebooks = n
diff --git a/spaces/ttt246/brain/Brain/src/rising_plugin/guardrails-config/actions/__init__.py b/spaces/ttt246/brain/Brain/src/rising_plugin/guardrails-config/actions/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/uSerNameDDHL/bingo/src/components/ui/badge.tsx b/spaces/uSerNameDDHL/bingo/src/components/ui/badge.tsx
deleted file mode 100644
index d9a84b394090e5b4b3bd34f6135b9a2f2ead0aa2..0000000000000000000000000000000000000000
--- a/spaces/uSerNameDDHL/bingo/src/components/ui/badge.tsx
+++ /dev/null
@@ -1,36 +0,0 @@
-import * as React from 'react'
-import { cva, type VariantProps } from 'class-variance-authority'
-
-import { cn } from '@/lib/utils'
-
-const badgeVariants = cva(
- 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2',
- {
- variants: {
- variant: {
- default:
- 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80',
- secondary:
- 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80',
- destructive:
- 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80',
- outline: 'text-foreground'
- }
- },
- defaultVariants: {
- variant: 'default'
- }
- }
-)
-
-export interface BadgeProps
- extends React.HTMLAttributes,
- VariantProps {}
-
-function Badge({ className, variant, ...props }: BadgeProps) {
- return (
-
- )
-}
-
-export { Badge, badgeVariants }
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Active File Recovery Professional V10.0.5 With Key [TorDigger] Utorrent.md b/spaces/usbethFlerru/sovits-modelsV2/example/Active File Recovery Professional V10.0.5 With Key [TorDigger] Utorrent.md
deleted file mode 100644
index d4e1580decc33dfc98e1ac8ddc15ea14a629abd3..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/Active File Recovery Professional V10.0.5 With Key [TorDigger] Utorrent.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Active File Recovery Professional V10.0.5 With Key [TorDigger] Utorrent
-
- d5da3c52bf
-
-
-
diff --git a/spaces/vishnu0001/text2mesh/shap_e/models/transmitter/__init__.py b/spaces/vishnu0001/text2mesh/shap_e/models/transmitter/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/viveknarayan/Image_Colorization/README.md b/spaces/viveknarayan/Image_Colorization/README.md
deleted file mode 100644
index 7d9b4785840552dd47340a0e2091b5d8cd5eb3a0..0000000000000000000000000000000000000000
--- a/spaces/viveknarayan/Image_Colorization/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Image Colorization
-emoji: 💻
-colorFrom: purple
-colorTo: blue
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
deleted file mode 100644
index 988d9adf2f289ef223bd1c680a5ae1d3387f0269..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..utils import kaiming_init
-from .registry import PLUGIN_LAYERS
-
-
-@PLUGIN_LAYERS.register_module()
-class GeneralizedAttention(nn.Module):
- """GeneralizedAttention module.
-
- See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
- (https://arxiv.org/abs/1711.07971) for details.
-
- Args:
- in_channels (int): Channels of the input feature map.
- spatial_range (int): The spatial range. -1 indicates no spatial range
- constraint. Default: -1.
- num_heads (int): The head number of empirical_attention module.
- Default: 9.
- position_embedding_dim (int): The position embedding dimension.
- Default: -1.
- position_magnitude (int): A multiplier acting on coord difference.
- Default: 1.
- kv_stride (int): The feature stride acting on key/value feature map.
- Default: 2.
- q_stride (int): The feature stride acting on query feature map.
- Default: 1.
- attention_type (str): A binary indicator string for indicating which
- items in generalized empirical_attention module are used.
- Default: '1111'.
-
- - '1000' indicates 'query and key content' (appr - appr) item,
- - '0100' indicates 'query content and relative position'
- (appr - position) item,
- - '0010' indicates 'key content only' (bias - appr) item,
- - '0001' indicates 'relative position only' (bias - position) item.
- """
-
- _abbr_ = 'gen_attention_block'
-
- def __init__(self,
- in_channels,
- spatial_range=-1,
- num_heads=9,
- position_embedding_dim=-1,
- position_magnitude=1,
- kv_stride=2,
- q_stride=1,
- attention_type='1111'):
-
- super(GeneralizedAttention, self).__init__()
-
- # hard range means local range for non-local operation
- self.position_embedding_dim = (
- position_embedding_dim
- if position_embedding_dim > 0 else in_channels)
-
- self.position_magnitude = position_magnitude
- self.num_heads = num_heads
- self.in_channels = in_channels
- self.spatial_range = spatial_range
- self.kv_stride = kv_stride
- self.q_stride = q_stride
- self.attention_type = [bool(int(_)) for _ in attention_type]
- self.qk_embed_dim = in_channels // num_heads
- out_c = self.qk_embed_dim * num_heads
-
- if self.attention_type[0] or self.attention_type[1]:
- self.query_conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_c,
- kernel_size=1,
- bias=False)
- self.query_conv.kaiming_init = True
-
- if self.attention_type[0] or self.attention_type[2]:
- self.key_conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_c,
- kernel_size=1,
- bias=False)
- self.key_conv.kaiming_init = True
-
- self.v_dim = in_channels // num_heads
- self.value_conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=self.v_dim * num_heads,
- kernel_size=1,
- bias=False)
- self.value_conv.kaiming_init = True
-
- if self.attention_type[1] or self.attention_type[3]:
- self.appr_geom_fc_x = nn.Linear(
- self.position_embedding_dim // 2, out_c, bias=False)
- self.appr_geom_fc_x.kaiming_init = True
-
- self.appr_geom_fc_y = nn.Linear(
- self.position_embedding_dim // 2, out_c, bias=False)
- self.appr_geom_fc_y.kaiming_init = True
-
- if self.attention_type[2]:
- stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
- appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
- self.appr_bias = nn.Parameter(appr_bias_value)
-
- if self.attention_type[3]:
- stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
- geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
- self.geom_bias = nn.Parameter(geom_bias_value)
-
- self.proj_conv = nn.Conv2d(
- in_channels=self.v_dim * num_heads,
- out_channels=in_channels,
- kernel_size=1,
- bias=True)
- self.proj_conv.kaiming_init = True
- self.gamma = nn.Parameter(torch.zeros(1))
-
- if self.spatial_range >= 0:
- # only works when non local is after 3*3 conv
- if in_channels == 256:
- max_len = 84
- elif in_channels == 512:
- max_len = 42
-
- max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
- local_constraint_map = np.ones(
- (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
- for iy in range(max_len):
- for ix in range(max_len):
- local_constraint_map[
- iy, ix,
- max((iy - self.spatial_range) //
- self.kv_stride, 0):min((iy + self.spatial_range +
- 1) // self.kv_stride +
- 1, max_len),
- max((ix - self.spatial_range) //
- self.kv_stride, 0):min((ix + self.spatial_range +
- 1) // self.kv_stride +
- 1, max_len)] = 0
-
- self.local_constraint_map = nn.Parameter(
- torch.from_numpy(local_constraint_map).byte(),
- requires_grad=False)
-
- if self.q_stride > 1:
- self.q_downsample = nn.AvgPool2d(
- kernel_size=1, stride=self.q_stride)
- else:
- self.q_downsample = None
-
- if self.kv_stride > 1:
- self.kv_downsample = nn.AvgPool2d(
- kernel_size=1, stride=self.kv_stride)
- else:
- self.kv_downsample = None
-
- self.init_weights()
-
- def get_position_embedding(self,
- h,
- w,
- h_kv,
- w_kv,
- q_stride,
- kv_stride,
- device,
- dtype,
- feat_dim,
- wave_length=1000):
- # the default type of Tensor is float32, leading to type mismatch
- # in fp16 mode. Cast it to support fp16 mode.
- h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype)
- h_idxs = h_idxs.view((h, 1)) * q_stride
-
- w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype)
- w_idxs = w_idxs.view((w, 1)) * q_stride
-
- h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to(
- device=device, dtype=dtype)
- h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
-
- w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to(
- device=device, dtype=dtype)
- w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
-
- # (h, h_kv, 1)
- h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
- h_diff *= self.position_magnitude
-
- # (w, w_kv, 1)
- w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
- w_diff *= self.position_magnitude
-
- feat_range = torch.arange(0, feat_dim / 4).to(
- device=device, dtype=dtype)
-
- dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype)
- dim_mat = dim_mat**((4. / feat_dim) * feat_range)
- dim_mat = dim_mat.view((1, 1, -1))
-
- embedding_x = torch.cat(
- ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
-
- embedding_y = torch.cat(
- ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
-
- return embedding_x, embedding_y
-
- def forward(self, x_input):
- num_heads = self.num_heads
-
- # use empirical_attention
- if self.q_downsample is not None:
- x_q = self.q_downsample(x_input)
- else:
- x_q = x_input
- n, _, h, w = x_q.shape
-
- if self.kv_downsample is not None:
- x_kv = self.kv_downsample(x_input)
- else:
- x_kv = x_input
- _, _, h_kv, w_kv = x_kv.shape
-
- if self.attention_type[0] or self.attention_type[1]:
- proj_query = self.query_conv(x_q).view(
- (n, num_heads, self.qk_embed_dim, h * w))
- proj_query = proj_query.permute(0, 1, 3, 2)
-
- if self.attention_type[0] or self.attention_type[2]:
- proj_key = self.key_conv(x_kv).view(
- (n, num_heads, self.qk_embed_dim, h_kv * w_kv))
-
- if self.attention_type[1] or self.attention_type[3]:
- position_embed_x, position_embed_y = self.get_position_embedding(
- h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
- x_input.device, x_input.dtype, self.position_embedding_dim)
- # (n, num_heads, w, w_kv, dim)
- position_feat_x = self.appr_geom_fc_x(position_embed_x).\
- view(1, w, w_kv, num_heads, self.qk_embed_dim).\
- permute(0, 3, 1, 2, 4).\
- repeat(n, 1, 1, 1, 1)
-
- # (n, num_heads, h, h_kv, dim)
- position_feat_y = self.appr_geom_fc_y(position_embed_y).\
- view(1, h, h_kv, num_heads, self.qk_embed_dim).\
- permute(0, 3, 1, 2, 4).\
- repeat(n, 1, 1, 1, 1)
-
- position_feat_x /= math.sqrt(2)
- position_feat_y /= math.sqrt(2)
-
- # accelerate for saliency only
- if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
- appr_bias = self.appr_bias.\
- view(1, num_heads, 1, self.qk_embed_dim).\
- repeat(n, 1, 1, 1)
-
- energy = torch.matmul(appr_bias, proj_key).\
- view(n, num_heads, 1, h_kv * w_kv)
-
- h = 1
- w = 1
- else:
- # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
- if not self.attention_type[0]:
- energy = torch.zeros(
- n,
- num_heads,
- h,
- w,
- h_kv,
- w_kv,
- dtype=x_input.dtype,
- device=x_input.device)
-
- # attention_type[0]: appr - appr
- # attention_type[1]: appr - position
- # attention_type[2]: bias - appr
- # attention_type[3]: bias - position
- if self.attention_type[0] or self.attention_type[2]:
- if self.attention_type[0] and self.attention_type[2]:
- appr_bias = self.appr_bias.\
- view(1, num_heads, 1, self.qk_embed_dim)
- energy = torch.matmul(proj_query + appr_bias, proj_key).\
- view(n, num_heads, h, w, h_kv, w_kv)
-
- elif self.attention_type[0]:
- energy = torch.matmul(proj_query, proj_key).\
- view(n, num_heads, h, w, h_kv, w_kv)
-
- elif self.attention_type[2]:
- appr_bias = self.appr_bias.\
- view(1, num_heads, 1, self.qk_embed_dim).\
- repeat(n, 1, 1, 1)
-
- energy += torch.matmul(appr_bias, proj_key).\
- view(n, num_heads, 1, 1, h_kv, w_kv)
-
- if self.attention_type[1] or self.attention_type[3]:
- if self.attention_type[1] and self.attention_type[3]:
- geom_bias = self.geom_bias.\
- view(1, num_heads, 1, self.qk_embed_dim)
-
- proj_query_reshape = (proj_query + geom_bias).\
- view(n, num_heads, h, w, self.qk_embed_dim)
-
- energy_x = torch.matmul(
- proj_query_reshape.permute(0, 1, 3, 2, 4),
- position_feat_x.permute(0, 1, 2, 4, 3))
- energy_x = energy_x.\
- permute(0, 1, 3, 2, 4).unsqueeze(4)
-
- energy_y = torch.matmul(
- proj_query_reshape,
- position_feat_y.permute(0, 1, 2, 4, 3))
- energy_y = energy_y.unsqueeze(5)
-
- energy += energy_x + energy_y
-
- elif self.attention_type[1]:
- proj_query_reshape = proj_query.\
- view(n, num_heads, h, w, self.qk_embed_dim)
- proj_query_reshape = proj_query_reshape.\
- permute(0, 1, 3, 2, 4)
- position_feat_x_reshape = position_feat_x.\
- permute(0, 1, 2, 4, 3)
- position_feat_y_reshape = position_feat_y.\
- permute(0, 1, 2, 4, 3)
-
- energy_x = torch.matmul(proj_query_reshape,
- position_feat_x_reshape)
- energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
-
- energy_y = torch.matmul(proj_query_reshape,
- position_feat_y_reshape)
- energy_y = energy_y.unsqueeze(5)
-
- energy += energy_x + energy_y
-
- elif self.attention_type[3]:
- geom_bias = self.geom_bias.\
- view(1, num_heads, self.qk_embed_dim, 1).\
- repeat(n, 1, 1, 1)
-
- position_feat_x_reshape = position_feat_x.\
- view(n, num_heads, w*w_kv, self.qk_embed_dim)
-
- position_feat_y_reshape = position_feat_y.\
- view(n, num_heads, h * h_kv, self.qk_embed_dim)
-
- energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
- energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
-
- energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
- energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
-
- energy += energy_x + energy_y
-
- energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
-
- if self.spatial_range >= 0:
- cur_local_constraint_map = \
- self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
- contiguous().\
- view(1, 1, h*w, h_kv*w_kv)
-
- energy = energy.masked_fill_(cur_local_constraint_map,
- float('-inf'))
-
- attention = F.softmax(energy, 3)
-
- proj_value = self.value_conv(x_kv)
- proj_value_reshape = proj_value.\
- view((n, num_heads, self.v_dim, h_kv * w_kv)).\
- permute(0, 1, 3, 2)
-
- out = torch.matmul(attention, proj_value_reshape).\
- permute(0, 1, 3, 2).\
- contiguous().\
- view(n, self.v_dim * self.num_heads, h, w)
-
- out = self.proj_conv(out)
-
- # output is downsampled, upsample back to input size
- if self.q_downsample is not None:
- out = F.interpolate(
- out,
- size=x_input.shape[2:],
- mode='bilinear',
- align_corners=False)
-
- out = self.gamma * out + x_input
- return out
-
- def init_weights(self):
- for m in self.modules():
- if hasattr(m, 'kaiming_init') and m.kaiming_init:
- kaiming_init(
- m,
- mode='fan_in',
- nonlinearity='leaky_relu',
- bias=0,
- distribution='uniform',
- a=1)
diff --git a/spaces/vumichien/canvas_controlnet/ldm/data/util.py b/spaces/vumichien/canvas_controlnet/ldm/data/util.py
deleted file mode 100644
index 5b60ceb2349e3bd7900ff325740e2022d2903b1c..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/ldm/data/util.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import torch
-
-from ldm.modules.midas.api import load_midas_transform
-
-
-class AddMiDaS(object):
- def __init__(self, model_type):
- super().__init__()
- self.transform = load_midas_transform(model_type)
-
- def pt2np(self, x):
- x = ((x + 1.0) * .5).detach().cpu().numpy()
- return x
-
- def np2pt(self, x):
- x = torch.from_numpy(x) * 2 - 1.
- return x
-
- def __call__(self, sample):
- # sample['jpg'] is tensor hwc in [-1, 1] at this point
- x = self.pt2np(sample['jpg'])
- x = self.transform({"image": x})["image"]
- sample['midas_in'] = x
- return sample
\ No newline at end of file
diff --git a/spaces/widged/bloom_demo/spaces_info.py b/spaces/widged/bloom_demo/spaces_info.py
deleted file mode 100644
index 649c3185305b49bc3fe3fad8bc6be26719f599fb..0000000000000000000000000000000000000000
--- a/spaces/widged/bloom_demo/spaces_info.py
+++ /dev/null
@@ -1,76 +0,0 @@
-description = """Gradio Demo for BLOOM. To use it, simply add your text, or click one of the examples to load them.
-Tips:
-- Do NOT talk to BLOOM as an entity, it's not a chatbot but a webpage/blog/article completion model.
-- For the best results: MIMIC a few sentences of a webpage similar to the content you want to generate.
-Start a paragraph as if YOU were writing a blog, webpage, math post, coding article and BLOOM will generate a coherent follow-up. Longer prompts usually give more interesting results.
-- Content: Please see our [content disclaimer](https://hf.co/spaces/bigscience/bloom-book) before using the model, as it may sometimes behave in unexpected ways.
-
-Options:
-- sampling: imaginative completions (may be not super accurate e.g. math/history)
-- greedy: accurate completions (may be more boring or have repetitions)
-"""
-
-wip_description = """JAX / Flax Gradio Demo for BLOOM. The 176B BLOOM model running on a TPU v3-256 pod, with 2D model parallelism and custom mesh axes.
-Note:
-1. For this WIP demo, only **sampling** is supported.
-2. Rendering of the screenshot is currently not optimised. To experience the true speed of JAX / Flax, tick 'just output raw text'.
-"""
-
-examples = [
- [
- 'A "whatpu" is a small, furry animal native to Tanzania. An example of a sentence that uses the word whatpu is: We were traveling in Africa and we saw these very cute whatpus. To do a "farduddle" means to jump up and down really fast. An example of a sentence that uses the word farduddle is:',
- 32,
- "Sample",
- "Sample 1",
- ],
- [
- "A poem about the beauty of science by Alfred Edgar Brittle\nTitle: The Magic Craft\nIn the old times",
- 50,
- "Sample",
- "Sample 1",
- ],
- ["استخراج العدد العاملي في لغة بايثون:", 30, "Greedy", "Sample 1"],
- ["Pour déguster un ortolan, il faut tout d'abord", 32, "Sample", "Sample 1"],
- [
- "Traduce español de España a español de Argentina\nEl coche es rojo - el auto es rojo\nEl ordenador es nuevo - la computadora es nueva\nel boligrafo es negro -",
- 16,
- "Sample",
- "Sample 1",
- ],
- [
- "Estos ejemplos quitan vocales de las palabras\nEjemplos:\nhola - hl\nmanzana - mnzn\npapas - pps\nalacran - lcrn\npapa -",
- 16,
- "Sample",
- "Sample 1",
- ],
- [
- "Question: If I put cheese into the fridge, will it melt?\nAnswer:",
- 32,
- "Sample",
- "Sample 1",
- ],
- ["Math exercise - answers:\n34+10=44\n54+20=", 16, "Greedy", "Sample 1"],
- [
- "Question: Where does the Greek Goddess Persephone spend half of the year when she is not with her mother?\nAnswer:",
- 24,
- "Greedy",
- "Sample 1",
- ],
- [
- "spelling test answers.\nWhat are the letters in « language »?\nAnswer: l-a-n-g-u-a-g-e\nWhat are the letters in « Romanian »?\nAnswer:",
- 24,
- "Greedy",
- "Sample 1",
- ],
-]
-
-initial_prompt_value = """استخراج العدد العاملي في لغة بايثون :
-def factorial(n):
- if n == 0:
- return 1
- else:
- result = 1
- for i in range(1, n + 1) :
- result *= i
- return result
-print(factorial(5))"""
\ No newline at end of file
diff --git a/spaces/xiang2811/ChatGPT/assets/Kelpy-Codos.js b/spaces/xiang2811/ChatGPT/assets/Kelpy-Codos.js
deleted file mode 100644
index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000
--- a/spaces/xiang2811/ChatGPT/assets/Kelpy-Codos.js
+++ /dev/null
@@ -1,76 +0,0 @@
-// ==UserScript==
-// @name Kelpy Codos
-// @namespace https://github.com/Keldos-Li/Kelpy-Codos
-// @version 1.0.5
-// @author Keldos; https://keldos.me/
-// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially.
-// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22)
-// @license GPL-3.0
-// @grant none
-// ==/UserScript==
-
-(function () {
- 'use strict';
-
- function addCopyButton(pre) {
- var code = pre.querySelector('code');
- if (!code) {
- return; // 如果没有找到 元素,则不添加按钮
- }
- var firstChild = code.firstChild;
- if (!firstChild) {
- return; // 如果 元素没有子节点,则不添加按钮
- }
- var button = document.createElement('button');
- button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本
- button.style.position = 'relative';
- button.style.float = 'right';
- button.style.fontSize = '1em'; // 可选:调整按钮大小
- button.style.background = 'none'; // 可选:去掉背景颜色
- button.style.border = 'none'; // 可选:去掉边框
- button.style.cursor = 'pointer'; // 可选:显示指针样式
- button.addEventListener('click', function () {
- var range = document.createRange();
- range.selectNodeContents(code);
- range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前
- var selection = window.getSelection();
- selection.removeAllRanges();
- selection.addRange(range);
-
- try {
- var success = document.execCommand('copy');
- if (success) {
- button.textContent = '\u2714';
- setTimeout(function () {
- button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制”
- }, 2000);
- } else {
- button.textContent = '\u2716';
- }
- } catch (e) {
- console.error(e);
- button.textContent = '\u2716';
- }
-
- selection.removeAllRanges();
- });
- code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前
- }
-
- function handleNewElements(mutationsList, observer) {
- for (var mutation of mutationsList) {
- if (mutation.type === 'childList') {
- for (var node of mutation.addedNodes) {
- if (node.nodeName === 'PRE') {
- addCopyButton(node);
- }
- }
- }
- }
- }
-
- var observer = new MutationObserver(handleNewElements);
- observer.observe(document.documentElement, { childList: true, subtree: true });
-
- document.querySelectorAll('pre').forEach(addCopyButton);
-})();
diff --git a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/Synchronized-BatchNorm-PyTorch/README.md b/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/Synchronized-BatchNorm-PyTorch/README.md
deleted file mode 100644
index 779983436c9727dd0d6301a1c857f2360245b51d..0000000000000000000000000000000000000000
--- a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/models/networks/Synchronized-BatchNorm-PyTorch/README.md
+++ /dev/null
@@ -1,118 +0,0 @@
-# Synchronized-BatchNorm-PyTorch
-
-**IMPORTANT: Please read the "Implementation details and highlights" section before use.**
-
-Synchronized Batch Normalization implementation in PyTorch.
-
-This module differs from the built-in PyTorch BatchNorm as the mean and
-standard-deviation are reduced across all devices during training.
-
-For example, when one uses `nn.DataParallel` to wrap the network during
-training, PyTorch's implementation normalize the tensor on each device using
-the statistics only on that device, which accelerated the computation and
-is also easy to implement, but the statistics might be inaccurate.
-Instead, in this synchronized version, the statistics will be computed
-over all training samples distributed on multiple devices.
-
-Note that, for one-GPU or CPU-only case, this module behaves exactly same
-as the built-in PyTorch implementation.
-
-This module is currently only a prototype version for research usages. As mentioned below,
-it has its limitations and may even suffer from some design problems. If you have any
-questions or suggestions, please feel free to
-[open an issue](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues) or
-[submit a pull request](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues).
-
-## Why Synchronized BatchNorm?
-
-Although the typical implementation of BatchNorm working on multiple devices (GPUs)
-is fast (with no communication overhead), it inevitably reduces the size of batch size,
-which potentially degenerates the performance. This is not a significant issue in some
-standard vision tasks such as ImageNet classification (as the batch size per device
-is usually large enough to obtain good statistics). However, it will hurt the performance
-in some tasks that the batch size is usually very small (e.g., 1 per GPU).
-
-For example, the importance of synchronized batch normalization in object detection has been recently proved with a
-an extensive analysis in the paper [MegDet: A Large Mini-Batch Object Detector](https://arxiv.org/abs/1711.07240).
-
-## Usage
-
-To use the Synchronized Batch Normalization, we add a data parallel replication callback. This introduces a slight
-difference with typical usage of the `nn.DataParallel`.
-
-Use it with a provided, customized data parallel wrapper:
-
-```python
-from sync_batchnorm import SynchronizedBatchNorm1d, DataParallelWithCallback
-
-sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
-sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
-```
-
-Or, if you are using a customized data parallel module, you can use this library as a monkey patching.
-
-```python
-from torch.nn import DataParallel # or your customized DataParallel module
-from sync_batchnorm import SynchronizedBatchNorm1d, patch_replication_callback
-
-sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
-sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
-patch_replication_callback(sync_bn) # monkey-patching
-```
-
-You can use `convert_model` to convert your model to use Synchronized BatchNorm easily.
-
-```python
-import torch.nn as nn
-from torchvision import models
-from sync_batchnorm import convert_model
-# m is a standard pytorch model
-m = models.resnet18(True)
-m = nn.DataParallel(m)
-# after convert, m is using SyncBN
-m = convert_model(m)
-```
-
-See also `tests/test_sync_batchnorm.py` for numeric result comparison.
-
-## Implementation details and highlights
-
-If you are interested in how batch statistics are reduced and broadcasted among multiple devices, please take a look
-at the code with detailed comments. Here we only emphasize some highlights of the implementation:
-
-- This implementation is in pure-python. No C++ extra extension libs.
-- Easy to use as demonstrated above.
-- It uses unbiased variance to update the moving average, and use `sqrt(max(var, eps))` instead of `sqrt(var + eps)`.
-- The implementation requires that each module on different devices should invoke the `batchnorm` for exactly SAME
-amount of times in each forward pass. For example, you can not only call `batchnorm` on GPU0 but not on GPU1. The `#i
-(i = 1, 2, 3, ...)` calls of the `batchnorm` on each device will be viewed as a whole and the statistics will be reduced.
-This is tricky but is a good way to handle PyTorch's dynamic computation graph. Although sounds complicated, this
-will usually not be the issue for most of the models.
-
-## Known issues
-
-#### Runtime error on backward pass.
-
-Due to a [PyTorch Bug](https://github.com/pytorch/pytorch/issues/3883), using old PyTorch libraries will trigger an `RuntimeError` with messages like:
-
-```
-Assertion `pos >= 0 && pos < buffer.size()` failed.
-```
-
-This has already been solved in the newest PyTorch repo, which, unfortunately, has not been pushed to the official and anaconda binary release. Thus, you are required to build the PyTorch package from the source according to the
- instructions [here](https://github.com/pytorch/pytorch#from-source).
-
-#### Numeric error.
-
-Because this library does not fuse the normalization and statistics operations in C++ (nor CUDA), it is less
-numerically stable compared to the original PyTorch implementation. Detailed analysis can be found in
-`tests/test_sync_batchnorm.py`.
-
-## Authors and License:
-
-Copyright (c) 2018-, [Jiayuan Mao](https://vccy.xyz).
-
-**Contributors**: [Tete Xiao](https://tetexiao.com), [DTennant](https://github.com/DTennant).
-
-Distributed under **MIT License** (See LICENSE)
-
diff --git a/spaces/xxccc/gpt-academic/docs/waifu_plugin/autoload.js b/spaces/xxccc/gpt-academic/docs/waifu_plugin/autoload.js
deleted file mode 100644
index 3464a5cd44b0d4e1b0f2528bd01fc1793275b964..0000000000000000000000000000000000000000
--- a/spaces/xxccc/gpt-academic/docs/waifu_plugin/autoload.js
+++ /dev/null
@@ -1,30 +0,0 @@
-try {
- $("").attr({href: "file=docs/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css"}).appendTo('head');
- $('body').append('
');
- $.ajax({url: "file=docs/waifu_plugin/waifu-tips.js", dataType:"script", cache: true, success: function() {
- $.ajax({url: "file=docs/waifu_plugin/live2d.js", dataType:"script", cache: true, success: function() {
- /* 可直接修改部分参数 */
- live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
- live2d_settings['modelId'] = 5; // 默认模型 ID
- live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
- live2d_settings['modelStorage'] = false; // 不储存模型 ID
- live2d_settings['waifuSize'] = '210x187';
- live2d_settings['waifuTipsSize'] = '187x52';
- live2d_settings['canSwitchModel'] = true;
- live2d_settings['canSwitchTextures'] = true;
- live2d_settings['canSwitchHitokoto'] = false;
- live2d_settings['canTakeScreenshot'] = false;
- live2d_settings['canTurnToHomePage'] = false;
- live2d_settings['canTurnToAboutPage'] = false;
- live2d_settings['showHitokoto'] = false; // 显示一言
- live2d_settings['showF12Status'] = false; // 显示加载状态
- live2d_settings['showF12Message'] = false; // 显示看板娘消息
- live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
- live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
- live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
-
- /* 在 initModel 前添加 */
- initModel("file=docs/waifu_plugin/waifu-tips.json");
- }});
- }});
-} catch(err) { console.log("[Error] JQuery is not defined.") }
diff --git a/spaces/yfyangd/PictureBookUnderstanding/BLIP/data/nlvr_dataset.py b/spaces/yfyangd/PictureBookUnderstanding/BLIP/data/nlvr_dataset.py
deleted file mode 100644
index a8d6b2d7cd8d3260bd279c7dca80de53bacc691a..0000000000000000000000000000000000000000
--- a/spaces/yfyangd/PictureBookUnderstanding/BLIP/data/nlvr_dataset.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import os
-import json
-import random
-
-from torch.utils.data import Dataset
-from torchvision.datasets.utils import download_url
-
-from PIL import Image
-
-from data.utils import pre_caption
-
-class nlvr_dataset(Dataset):
- def __init__(self, transform, image_root, ann_root, split):
- '''
- image_root (string): Root directory of images
- ann_root (string): directory to store the annotation file
- split (string): train, val or test
- '''
- urls = {'train':'https://storage.googleapis.com/sfr-vision-language-research/datasets/nlvr_train.json',
- 'val':'https://storage.googleapis.com/sfr-vision-language-research/datasets/nlvr_dev.json',
- 'test':'https://storage.googleapis.com/sfr-vision-language-research/datasets/nlvr_test.json'}
- filenames = {'train':'nlvr_train.json','val':'nlvr_dev.json','test':'nlvr_test.json'}
-
- download_url(urls[split],ann_root)
- self.annotation = json.load(open(os.path.join(ann_root,filenames[split]),'r'))
-
- self.transform = transform
- self.image_root = image_root
-
-
- def __len__(self):
- return len(self.annotation)
-
-
- def __getitem__(self, index):
-
- ann = self.annotation[index]
-
- image0_path = os.path.join(self.image_root,ann['images'][0])
- image0 = Image.open(image0_path).convert('RGB')
- image0 = self.transform(image0)
-
- image1_path = os.path.join(self.image_root,ann['images'][1])
- image1 = Image.open(image1_path).convert('RGB')
- image1 = self.transform(image1)
-
- sentence = pre_caption(ann['sentence'], 40)
-
- if ann['label']=='True':
- label = 1
- else:
- label = 0
-
- words = sentence.split(' ')
-
- if 'left' not in words and 'right' not in words:
- if random.random()<0.5:
- return image0, image1, sentence, label
- else:
- return image1, image0, sentence, label
- else:
- if random.random()<0.5:
- return image0, image1, sentence, label
- else:
- new_words = []
- for word in words:
- if word=='left':
- new_words.append('right')
- elif word=='right':
- new_words.append('left')
- else:
- new_words.append(word)
-
- sentence = ' '.join(new_words)
- return image1, image0, sentence, label
-
-
-
\ No newline at end of file
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/funnel/tokenization_funnel.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/funnel/tokenization_funnel.py
deleted file mode 100644
index 9b0d3c1b6c5221f24118d8ac5518cdea2085ab44..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/funnel/tokenization_funnel.py
+++ /dev/null
@@ -1,562 +0,0 @@
-# coding=utf-8
-# Copyright 2020 The HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Tokenization class for Funnel Transformer."""
-
-import collections
-import os
-import unicodedata
-from typing import List, Optional, Tuple
-
-from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
-from ...utils import logging
-
-
-logger = logging.get_logger(__name__)
-
-VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
-
-_model_names = [
- "small",
- "small-base",
- "medium",
- "medium-base",
- "intermediate",
- "intermediate-base",
- "large",
- "large-base",
- "xlarge",
- "xlarge-base",
-]
-
-PRETRAINED_VOCAB_FILES_MAP = {
- "vocab_file": {
- "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
- "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
- "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
- "funnel-transformer/medium-base": (
- "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
- ),
- "funnel-transformer/intermediate": (
- "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
- ),
- "funnel-transformer/intermediate-base": (
- "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
- ),
- "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
- "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
- "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
- "funnel-transformer/xlarge-base": (
- "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
- ),
- }
-}
-PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {f"funnel-transformer/{name}": 512 for name in _model_names}
-PRETRAINED_INIT_CONFIGURATION = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
-
-
-# Copied from transformers.models.bert.tokenization_bert.load_vocab
-def load_vocab(vocab_file):
- """Loads a vocabulary file into a dictionary."""
- vocab = collections.OrderedDict()
- with open(vocab_file, "r", encoding="utf-8") as reader:
- tokens = reader.readlines()
- for index, token in enumerate(tokens):
- token = token.rstrip("\n")
- vocab[token] = index
- return vocab
-
-
-# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
-def whitespace_tokenize(text):
- """Runs basic whitespace cleaning and splitting on a piece of text."""
- text = text.strip()
- if not text:
- return []
- tokens = text.split()
- return tokens
-
-
-class FunnelTokenizer(PreTrainedTokenizer):
- r"""
- Construct a Funnel Transformer tokenizer. Based on WordPiece.
-
- This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
- this superclass for more information regarding those methods.
-
- Args:
- vocab_file (`str`):
- File containing the vocabulary.
- do_lower_case (`bool`, *optional*, defaults to `True`):
- Whether or not to lowercase the input when tokenizing.
- do_basic_tokenize (`bool`, *optional*, defaults to `True`):
- Whether or not to do basic tokenization before WordPiece.
- never_split (`Iterable`, *optional*):
- Collection of tokens which will never be split during tokenization. Only has an effect when
- `do_basic_tokenize=True`
- unk_token (`str`, *optional*, defaults to `""`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- sep_token (`str`, *optional*, defaults to `""`):
- The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
- sequence classification or for a text and a question for question answering. It is also used as the last
- token of a sequence built with special tokens.
- pad_token (`str`, *optional*, defaults to `""`):
- The token used for padding, for example when batching sequences of different lengths.
- cls_token (`str`, *optional*, defaults to `""`):
- The classifier token which is used when doing sequence classification (classification of the whole sequence
- instead of per-token classification). It is the first token of the sequence when built with special tokens.
- mask_token (`str`, *optional*, defaults to `""`):
- The token used for masking values. This is the token used when training this model with masked language
- modeling. This is the token which the model will try to predict.
- bos_token (`str`, *optional*, defaults to `""`):
- The beginning of sentence token.
- eos_token (`str`, *optional*, defaults to `""`):
- The end of sentence token.
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
- Whether or not to tokenize Chinese characters.
-
- This should likely be deactivated for Japanese (see this
- [issue](https://github.com/huggingface/transformers/issues/328)).
- strip_accents (`bool`, *optional*):
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
- value for `lowercase` (as in the original BERT).
- """
-
- vocab_files_names = VOCAB_FILES_NAMES
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
- pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
- cls_token_type_id: int = 2
-
- def __init__(
- self,
- vocab_file,
- do_lower_case=True,
- do_basic_tokenize=True,
- never_split=None,
- unk_token="",
- sep_token="",
- pad_token="",
- cls_token="",
- mask_token="",
- bos_token="",
- eos_token="",
- tokenize_chinese_chars=True,
- strip_accents=None,
- **kwargs,
- ):
- if not os.path.isfile(vocab_file):
- raise ValueError(
- f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
- " model use `tokenizer = FunnelTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
- )
- self.vocab = load_vocab(vocab_file)
- self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
- self.do_basic_tokenize = do_basic_tokenize
- if do_basic_tokenize:
- self.basic_tokenizer = BasicTokenizer(
- do_lower_case=do_lower_case,
- never_split=never_split,
- tokenize_chinese_chars=tokenize_chinese_chars,
- strip_accents=strip_accents,
- )
- self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
-
- super().__init__(
- do_lower_case=do_lower_case,
- do_basic_tokenize=do_basic_tokenize,
- never_split=never_split,
- unk_token=unk_token,
- sep_token=sep_token,
- pad_token=pad_token,
- cls_token=cls_token,
- mask_token=mask_token,
- bos_token=bos_token,
- eos_token=eos_token,
- tokenize_chinese_chars=tokenize_chinese_chars,
- strip_accents=strip_accents,
- **kwargs,
- )
-
- @property
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
- def do_lower_case(self):
- return self.basic_tokenizer.do_lower_case
-
- @property
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
- def vocab_size(self):
- return len(self.vocab)
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
- def get_vocab(self):
- return dict(self.vocab, **self.added_tokens_encoder)
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
- def _tokenize(self, text, split_special_tokens=False):
- split_tokens = []
- if self.do_basic_tokenize:
- for token in self.basic_tokenizer.tokenize(
- text, never_split=self.all_special_tokens if not split_special_tokens else None
- ):
- # If the token is part of the never_split set
- if token in self.basic_tokenizer.never_split:
- split_tokens.append(token)
- else:
- split_tokens += self.wordpiece_tokenizer.tokenize(token)
- else:
- split_tokens = self.wordpiece_tokenizer.tokenize(text)
- return split_tokens
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
- def _convert_token_to_id(self, token):
- """Converts a token (str) in an id using the vocab."""
- return self.vocab.get(token, self.vocab.get(self.unk_token))
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- return self.ids_to_tokens.get(index, self.unk_token)
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- out_string = " ".join(tokens).replace(" ##", "").strip()
- return out_string
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
- def build_inputs_with_special_tokens(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. A BERT sequence has the following format:
-
- - single sequence: `[CLS] X [SEP]`
- - pair of sequences: `[CLS] A [SEP] B [SEP]`
-
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
-
- Returns:
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
- """
- if token_ids_1 is None:
- return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
- cls = [self.cls_token_id]
- sep = [self.sep_token_id]
- return cls + token_ids_0 + sep + token_ids_1 + sep
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
- def get_special_tokens_mask(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- """
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer `prepare_for_model` method.
-
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the token list is already formatted with special tokens for the model.
-
- Returns:
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
-
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
- )
-
- if token_ids_1 is not None:
- return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
- return [1] + ([0] * len(token_ids_0)) + [1]
-
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
- Transformer sequence pair mask has the following format:
-
- ```
- 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
- | first sequence | second sequence |
- ```
-
- If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
-
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
-
- Returns:
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
- """
- sep = [self.sep_token_id]
- cls = [self.cls_token_id]
- if token_ids_1 is None:
- return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
- return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
-
- # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- index = 0
- if os.path.isdir(save_directory):
- vocab_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
- )
- else:
- vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
- with open(vocab_file, "w", encoding="utf-8") as writer:
- for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
- if index != token_index:
- logger.warning(
- f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
- " Please check that the vocabulary is not corrupted!"
- )
- index = token_index
- writer.write(token + "\n")
- index += 1
- return (vocab_file,)
-
-
-# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
-class BasicTokenizer(object):
- """
- Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
-
- Args:
- do_lower_case (`bool`, *optional*, defaults to `True`):
- Whether or not to lowercase the input when tokenizing.
- never_split (`Iterable`, *optional*):
- Collection of tokens which will never be split during tokenization. Only has an effect when
- `do_basic_tokenize=True`
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
- Whether or not to tokenize Chinese characters.
-
- This should likely be deactivated for Japanese (see this
- [issue](https://github.com/huggingface/transformers/issues/328)).
- strip_accents (`bool`, *optional*):
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
- value for `lowercase` (as in the original BERT).
- do_split_on_punc (`bool`, *optional*, defaults to `True`):
- In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
- the full context of the words, such as contractions.
- """
-
- def __init__(
- self,
- do_lower_case=True,
- never_split=None,
- tokenize_chinese_chars=True,
- strip_accents=None,
- do_split_on_punc=True,
- ):
- if never_split is None:
- never_split = []
- self.do_lower_case = do_lower_case
- self.never_split = set(never_split)
- self.tokenize_chinese_chars = tokenize_chinese_chars
- self.strip_accents = strip_accents
- self.do_split_on_punc = do_split_on_punc
-
- def tokenize(self, text, never_split=None):
- """
- Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
-
- Args:
- never_split (`List[str]`, *optional*)
- Kept for backward compatibility purposes. Now implemented directly at the base class level (see
- [`PreTrainedTokenizer.tokenize`]) List of token not to split.
- """
- # union() returns a new set by concatenating the two sets.
- never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
- text = self._clean_text(text)
-
- # This was added on November 1st, 2018 for the multilingual and Chinese
- # models. This is also applied to the English models now, but it doesn't
- # matter since the English models were not trained on any Chinese data
- # and generally don't have any Chinese data in them (there are Chinese
- # characters in the vocabulary because Wikipedia does have some Chinese
- # words in the English Wikipedia.).
- if self.tokenize_chinese_chars:
- text = self._tokenize_chinese_chars(text)
- # prevents treating the same character with different unicode codepoints as different characters
- unicode_normalized_text = unicodedata.normalize("NFC", text)
- orig_tokens = whitespace_tokenize(unicode_normalized_text)
- split_tokens = []
- for token in orig_tokens:
- if token not in never_split:
- if self.do_lower_case:
- token = token.lower()
- if self.strip_accents is not False:
- token = self._run_strip_accents(token)
- elif self.strip_accents:
- token = self._run_strip_accents(token)
- split_tokens.extend(self._run_split_on_punc(token, never_split))
-
- output_tokens = whitespace_tokenize(" ".join(split_tokens))
- return output_tokens
-
- def _run_strip_accents(self, text):
- """Strips accents from a piece of text."""
- text = unicodedata.normalize("NFD", text)
- output = []
- for char in text:
- cat = unicodedata.category(char)
- if cat == "Mn":
- continue
- output.append(char)
- return "".join(output)
-
- def _run_split_on_punc(self, text, never_split=None):
- """Splits punctuation on a piece of text."""
- if not self.do_split_on_punc or (never_split is not None and text in never_split):
- return [text]
- chars = list(text)
- i = 0
- start_new_word = True
- output = []
- while i < len(chars):
- char = chars[i]
- if _is_punctuation(char):
- output.append([char])
- start_new_word = True
- else:
- if start_new_word:
- output.append([])
- start_new_word = False
- output[-1].append(char)
- i += 1
-
- return ["".join(x) for x in output]
-
- def _tokenize_chinese_chars(self, text):
- """Adds whitespace around any CJK character."""
- output = []
- for char in text:
- cp = ord(char)
- if self._is_chinese_char(cp):
- output.append(" ")
- output.append(char)
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
- def _is_chinese_char(self, cp):
- """Checks whether CP is the codepoint of a CJK character."""
- # This defines a "chinese character" as anything in the CJK Unicode block:
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
- #
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
- # despite its name. The modern Korean Hangul alphabet is a different block,
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
- # space-separated words, so they are not treated specially and handled
- # like the all of the other languages.
- if (
- (cp >= 0x4E00 and cp <= 0x9FFF)
- or (cp >= 0x3400 and cp <= 0x4DBF) #
- or (cp >= 0x20000 and cp <= 0x2A6DF) #
- or (cp >= 0x2A700 and cp <= 0x2B73F) #
- or (cp >= 0x2B740 and cp <= 0x2B81F) #
- or (cp >= 0x2B820 and cp <= 0x2CEAF) #
- or (cp >= 0xF900 and cp <= 0xFAFF)
- or (cp >= 0x2F800 and cp <= 0x2FA1F) #
- ): #
- return True
-
- return False
-
- def _clean_text(self, text):
- """Performs invalid character removal and whitespace cleanup on text."""
- output = []
- for char in text:
- cp = ord(char)
- if cp == 0 or cp == 0xFFFD or _is_control(char):
- continue
- if _is_whitespace(char):
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
-
-# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
-class WordpieceTokenizer(object):
- """Runs WordPiece tokenization."""
-
- def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
- self.vocab = vocab
- self.unk_token = unk_token
- self.max_input_chars_per_word = max_input_chars_per_word
-
- def tokenize(self, text):
- """
- Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
- tokenization using the given vocabulary.
-
- For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
-
- Args:
- text: A single token or whitespace separated tokens. This should have
- already been passed through *BasicTokenizer*.
-
- Returns:
- A list of wordpiece tokens.
- """
-
- output_tokens = []
- for token in whitespace_tokenize(text):
- chars = list(token)
- if len(chars) > self.max_input_chars_per_word:
- output_tokens.append(self.unk_token)
- continue
-
- is_bad = False
- start = 0
- sub_tokens = []
- while start < len(chars):
- end = len(chars)
- cur_substr = None
- while start < end:
- substr = "".join(chars[start:end])
- if start > 0:
- substr = "##" + substr
- if substr in self.vocab:
- cur_substr = substr
- break
- end -= 1
- if cur_substr is None:
- is_bad = True
- break
- sub_tokens.append(cur_substr)
- start = end
-
- if is_bad:
- output_tokens.append(self.unk_token)
- else:
- output_tokens.extend(sub_tokens)
- return output_tokens
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mbart50/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mbart50/__init__.py
deleted file mode 100644
index b889e374bb6d1e3afbf0b5f40cd34cbdc2ed468a..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mbart50/__init__.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2020 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import TYPE_CHECKING
-
-from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
-
-
-_import_structure = {}
-
-try:
- if not is_sentencepiece_available():
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- pass
-else:
- _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"]
-
-try:
- if not is_tokenizers_available():
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- pass
-else:
- _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"]
-
-
-if TYPE_CHECKING:
- try:
- if not is_sentencepiece_available():
- raise OptionalDependencyNotAvailable()
- except OptionalDependencyNotAvailable:
- pass
- else:
- from .tokenization_mbart50 import MBart50Tokenizer
-
- try:
- if not is_tokenizers_available():
- raise OptionalDependencyNotAvailable()
- except OptionalDependencyNotAvailable:
- pass
- else:
- from .tokenization_mbart50_fast import MBart50TokenizerFast
-
-else:
- import sys
-
- sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/resample.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/resample.py
deleted file mode 100644
index 301e2924ec588ec61a55a2a6f2b2f68726dfda5b..0000000000000000000000000000000000000000
--- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/resample.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-from scipy.io import wavfile
-from tqdm import tqdm
-
-
-def process(item):
- spkdir, wav_name, args = item
- # speaker 's5', 'p280', 'p315' are excluded,
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=None)
- wav, _ = librosa.effects.trim(wav, top_db=40)
- peak = np.abs(wav).max()
- if peak > 1.0:
- wav = 0.98 * wav / peak
- wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2)
- if not args.skip_loudnorm:
- wav2 /= max(wav2.max(), -wav2.min())
- save_name = wav_name
- save_path2 = os.path.join(args.out_dir2, speaker, save_name)
- wavfile.write(
- save_path2,
- args.sr2,
- (wav2 * np.iinfo(np.int16).max).astype(np.int16)
- )
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr2", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir")
- parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir")
- parser.add_argument("--skip_loudnorm", action="store_true", help="Skip loudness matching if you have done it")
- args = parser.parse_args()
- processs = 30 if cpu_count() > 60 else (cpu_count()-2 if cpu_count() > 4 else 1)
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py
deleted file mode 100644
index cbb32e19ea518eee84941b20f58d1054e84d1937..0000000000000000000000000000000000000000
--- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import dataclasses
-import logging
-from collections import abc
-from typing import Any
-
-from detectron2.utils.registry import _convert_target_to_string, locate
-
-__all__ = ["dump_dataclass", "instantiate"]
-
-
-def dump_dataclass(obj: Any):
- """
- Dump a dataclass recursively into a dict that can be later instantiated.
-
- Args:
- obj: a dataclass object
-
- Returns:
- dict
- """
- assert dataclasses.is_dataclass(obj) and not isinstance(
- obj, type
- ), "dump_dataclass() requires an instance of a dataclass."
- ret = {"_target_": _convert_target_to_string(type(obj))}
- for f in dataclasses.fields(obj):
- v = getattr(obj, f.name)
- if dataclasses.is_dataclass(v):
- v = dump_dataclass(v)
- if isinstance(v, (list, tuple)):
- v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
- ret[f.name] = v
- return ret
-
-
-def instantiate(cfg):
- """
- Recursively instantiate objects defined in dictionaries by
- "_target_" and arguments.
-
- Args:
- cfg: a dict-like object with "_target_" that defines the caller, and
- other keys that define the arguments
-
- Returns:
- object instantiated by cfg
- """
- from omegaconf import ListConfig
-
- if isinstance(cfg, ListConfig):
- lst = [instantiate(x) for x in cfg]
- return ListConfig(lst, flags={"allow_objects": True})
- if isinstance(cfg, list):
- # Specialize for list, because many classes take
- # list[objects] as arguments, such as ResNet, DatasetMapper
- return [instantiate(x) for x in cfg]
-
- if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
- # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
- # but faster: https://github.com/facebookresearch/hydra/issues/1200
- cfg = {k: instantiate(v) for k, v in cfg.items()}
- cls = cfg.pop("_target_")
- cls = instantiate(cls)
-
- if isinstance(cls, str):
- cls_name = cls
- cls = locate(cls_name)
- assert cls is not None, cls_name
- else:
- try:
- cls_name = cls.__module__ + "." + cls.__qualname__
- except Exception:
- # target could be anything, so the above could fail
- cls_name = str(cls)
- assert callable(cls), f"_target_ {cls} does not define a callable object"
- try:
- return cls(**cfg)
- except TypeError:
- logger = logging.getLogger(__name__)
- logger.error(f"Error when instantiating {cls_name}!")
- raise
- return cfg # return as-is if don't know what to do
diff --git a/spaces/ysharma/LLaVA_v1/llava/eval/eval_gpt_review_bench.py b/spaces/ysharma/LLaVA_v1/llava/eval/eval_gpt_review_bench.py
deleted file mode 100644
index 06160f2422b5368f30fb967f7cae635208a1dc69..0000000000000000000000000000000000000000
--- a/spaces/ysharma/LLaVA_v1/llava/eval/eval_gpt_review_bench.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import argparse
-import json
-import os
-
-import openai
-import time
-
-NUM_SECONDS_TO_SLEEP = 0.5
-
-
-def get_eval(content: str, max_tokens: int):
- while True:
- try:
- response = openai.ChatCompletion.create(
- model='gpt-4-0314',
- messages=[{
- 'role': 'system',
- 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
- }, {
- 'role': 'user',
- 'content': content,
- }],
- temperature=0.2, # TODO: figure out which temperature is best for evaluation
- max_tokens=max_tokens,
- )
- break
- except openai.error.RateLimitError:
- pass
- except Exception as e:
- print(e)
- time.sleep(NUM_SECONDS_TO_SLEEP)
-
- return response['choices'][0]['message']['content']
-
-
-def parse_score(review):
- try:
- score_pair = review.split('\n')[0]
- score_pair = score_pair.replace(',', ' ')
- sp = score_pair.split(' ')
- if len(sp) == 2:
- return [float(sp[0]), float(sp[1])]
- else:
- print('error', review)
- return [-1, -1]
- except Exception as e:
- print(e)
- print('error', review)
- return [-1, -1]
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
- parser.add_argument('-q', '--question')
- parser.add_argument('-c', '--context')
- parser.add_argument('-a', '--answer-list', nargs='+', default=[])
- parser.add_argument('-r', '--rule')
- parser.add_argument('-o', '--output')
- parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
- args = parser.parse_args()
-
- f_q = open(os.path.expanduser(args.question))
- f_ans1 = open(os.path.expanduser(args.answer_list[0]))
- f_ans2 = open(os.path.expanduser(args.answer_list[1]))
- rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
-
- if os.path.isfile(os.path.expanduser(args.output)):
- cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
- else:
- cur_reviews = []
-
- review_file = open(f'{args.output}', 'a')
-
- context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
- image_to_context = {context['image']: context for context in context_list}
-
- handles = []
- idx = 0
- for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
- ques = json.loads(ques_js)
- ans1 = json.loads(ans1_js)
- ans2 = json.loads(ans2_js)
-
- inst = image_to_context[ques['image']]
-
- if isinstance(inst['caption'], list):
- cap_str = '\n'.join(inst['caption'])
- else:
- cap_str = inst['caption']
-
- category = 'llava_bench_' + json.loads(ques_js)['category']
- if category in rule_dict:
- rule = rule_dict[category]
- else:
- assert False, f"Visual QA category not found in rule file: {category}."
- prompt = rule['prompt']
- role = rule['role']
- content = (f'[Context]\n{cap_str}\n\n'
- f'[Question]\n{ques["text"]}\n\n'
- f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
- f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
- f'[System]\n{prompt}\n\n')
- cur_js = {
- 'id': idx+1,
- 'question_id': ques['question_id'],
- 'answer1_id': ans1.get('answer_id', ans1['question_id']),
- 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
- 'category': category
- }
- if idx >= len(cur_reviews):
- review = get_eval(content, args.max_tokens)
- scores = parse_score(review)
- cur_js['content'] = review
- cur_js['tuple'] = scores
- review_file.write(json.dumps(cur_js) + '\n')
- review_file.flush()
- else:
- print(f'Skipping {idx} as we already have it.')
- idx += 1
- print(idx)
- review_file.close()
diff --git a/spaces/yueranseo/mygpt/modules/overwrites.py b/spaces/yueranseo/mygpt/modules/overwrites.py
deleted file mode 100644
index e029f4a50285c64dcb286a34cb1c3b2680880e05..0000000000000000000000000000000000000000
--- a/spaces/yueranseo/mygpt/modules/overwrites.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from __future__ import annotations
-import logging
-
-from typing import List, Tuple
-from gradio_client import utils as client_utils
-from gradio import utils
-import inspect
-
-from modules.presets import *
-from modules.index_func import *
-
-
-def postprocess(
- self,
- y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple],
- ) -> List[List[str | Dict | None]]:
- """
- Parameters:
- y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
- Returns:
- List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
- """
- if y is None:
- return []
- processed_messages = []
- for message_pair in y:
- assert isinstance(
- message_pair, (tuple, list)
- ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
- assert (
- len(message_pair) == 2
- ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
-
- processed_messages.append(
- [
- self._postprocess_chat_messages(message_pair[0], "user"),
- self._postprocess_chat_messages(message_pair[1], "bot"),
- ]
- )
- return processed_messages
-
-def postprocess_chat_messages(
- self, chat_message: str | tuple | list | None, role: str
- ) -> str | dict | None:
- if chat_message is None:
- return None
- elif isinstance(chat_message, (tuple, list)):
- file_uri = chat_message[0]
- if utils.validate_url(file_uri):
- filepath = file_uri
- else:
- filepath = self.make_temp_copy_if_needed(file_uri)
-
- mime_type = client_utils.get_mimetype(filepath)
- return {
- "name": filepath,
- "mime_type": mime_type,
- "alt_text": chat_message[1] if len(chat_message) > 1 else None,
- "data": None, # These last two fields are filled in by the frontend
- "is_file": True,
- }
- elif isinstance(chat_message, str):
- # chat_message = inspect.cleandoc(chat_message)
- # escape html spaces
- # chat_message = chat_message.replace(" ", " ")
- if role == "bot":
- chat_message = convert_bot_before_marked(chat_message)
- elif role == "user":
- chat_message = convert_user_before_marked(chat_message)
- return chat_message
- else:
- raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
-
-with open("./assets/custom.js", "r", encoding="utf-8") as f, \
- open("./assets/external-scripts.js", "r", encoding="utf-8") as f1:
- customJS = f.read()
- externalScripts = f1.read()
-
-
-def reload_javascript():
- print("Reloading javascript...")
- js = f''
- # if render_latex:
- # js += """\"""
- def template_response(*args, **kwargs):
- res = GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(b'', f'{js}'.encode("utf8"))
- res.init_headers()
- return res
-
- gr.routes.templates.TemplateResponse = template_response
-
-GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
\ No newline at end of file
diff --git a/spaces/yunfei0710/gpt-academic/request_llm/test_llms.py b/spaces/yunfei0710/gpt-academic/request_llm/test_llms.py
deleted file mode 100644
index ae6967be7b0c48d4c2af7a51335bd9becbc24d88..0000000000000000000000000000000000000000
--- a/spaces/yunfei0710/gpt-academic/request_llm/test_llms.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# """
-# 对各个llm模型进行单元测试
-# """
-def validate_path():
- import os, sys
- dir_name = os.path.dirname(__file__)
- root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
- os.chdir(root_dir_assume)
- sys.path.append(root_dir_assume)
-
-validate_path() # validate path so you can run from base directory
-if __name__ == "__main__":
- from request_llm.bridge_newbingfree import predict_no_ui_long_connection
- # from request_llm.bridge_moss import predict_no_ui_long_connection
- # from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
- # from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
-
- llm_kwargs = {
- 'max_length': 512,
- 'top_p': 1,
- 'temperature': 1,
- }
-
- result = predict_no_ui_long_connection(inputs="你好",
- llm_kwargs=llm_kwargs,
- history=[],
- sys_prompt="")
- print('final result:', result)
-
-
- result = predict_no_ui_long_connection(inputs="what is a hero?",
- llm_kwargs=llm_kwargs,
- history=["hello world"],
- sys_prompt="")
- print('final result:', result)
-
- result = predict_no_ui_long_connection(inputs="如何理解传奇?",
- llm_kwargs=llm_kwargs,
- history=[],
- sys_prompt="")
- print('final result:', result)
-
- # # print(result)
- # from multiprocessing import Process, Pipe
- # class GetGLMHandle(Process):
- # def __init__(self):
- # super().__init__(daemon=True)
- # pass
- # def run(self):
- # # 子进程执行
- # # 第一次运行,加载参数
- # def validate_path():
- # import os, sys
- # dir_name = os.path.dirname(__file__)
- # root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
- # os.chdir(root_dir_assume + '/request_llm/jittorllms')
- # sys.path.append(root_dir_assume + '/request_llm/jittorllms')
- # validate_path() # validate path so you can run from base directory
-
- # jittorllms_model = None
- # import types
- # try:
- # if jittorllms_model is None:
- # from models import get_model
- # # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
- # args_dict = {'model': 'chatrwkv'}
- # print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
- # jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
- # print('done get model')
- # except:
- # # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
- # raise RuntimeError("不能正常加载jittorllms的参数!")
-
- # x = GetGLMHandle()
- # x.start()
-
-
- # input()
\ No newline at end of file
diff --git a/spaces/zhoupin30/zhoupin30/src/components/chat-image.tsx b/spaces/zhoupin30/zhoupin30/src/components/chat-image.tsx
deleted file mode 100644
index 6c4a8f5784aafa0321e8634295048dc4ba931bae..0000000000000000000000000000000000000000
--- a/spaces/zhoupin30/zhoupin30/src/components/chat-image.tsx
+++ /dev/null
@@ -1,172 +0,0 @@
-import {
- useEffect,
- useState,
- useCallback,
- ChangeEvent,
- ClipboardEvent,
- MouseEventHandler,
- useRef,
- KeyboardEvent
-} from "react"
-import Image from 'next/image'
-import { toast } from "react-hot-toast"
-import PasteIcon from '@/assets/images/paste.svg'
-import UploadIcon from '@/assets/images/upload.svg'
-import CameraIcon from '@/assets/images/camera.svg'
-import { useBing } from '@/lib/hooks/use-bing'
-import { cn } from '@/lib/utils'
-import { ImageUtils } from "@/lib/image"
-
-interface ChatImageProps extends Pick, 'uploadImage'> {}
-
-const preventDefault: MouseEventHandler = (event) => {
- event.nativeEvent.stopImmediatePropagation()
-}
-
-export function ChatImage({ children, uploadImage }: React.PropsWithChildren) {
- const videoRef = useRef(null)
- const canvasRef = useRef(null)
- const mediaStream = useRef()
- const [panel, setPanel] = useState('none')
- const [inputUrl, setInputUrl] = useState('')
-
- const upload = useCallback((url: string) => {
- if (url) {
- uploadImage(url)
- }
- setPanel('none')
- }, [panel])
-
- const onUpload = useCallback(async (event: ChangeEvent) => {
- const file = event.target.files?.[0]
- if (file) {
- const fileDataUrl = await ImageUtils.getCompressedImageDataAsync(file)
- if (fileDataUrl) {
- upload(fileDataUrl)
- }
- }
- }, [])
-
- const onPaste = useCallback((event: ClipboardEvent) => {
- const pasteUrl = event.clipboardData.getData('text') ?? ''
- upload(pasteUrl)
- }, [])
-
- const onEnter = useCallback((event: KeyboardEvent) => {
- // @ts-ignore
- event.preventDefault()
- // @ts-ignore
- event.stopPropagation()
- if (/^https?:\/\/.+/.test(inputUrl)) {
- upload(inputUrl)
- } else {
- toast.error('请输入有效的图片链接')
- }
- }, [inputUrl])
-
- const openVideo: MouseEventHandler = async (event) => {
- event.stopPropagation()
- setPanel('camera-mode')
- }
-
- const onCapture = () => {
- if (canvasRef.current && videoRef.current) {
- const canvas = canvasRef.current
- canvas.width = videoRef.current!.videoWidth
- canvas.height = videoRef.current!.videoHeight
- canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height)
- const cameraUrl = canvas.toDataURL('image/jpeg')
- upload(cameraUrl)
- }
- }
-
- useEffect(() => {
- const handleBlur = () => {
- if (panel !== 'none') {
- setPanel('none')
- }
- }
- document.addEventListener('click', handleBlur)
- return () => {
- document.removeEventListener('click', handleBlur)
- }
- }, [panel])
-
- useEffect(() => {
- if (panel === 'camera-mode') {
- navigator.mediaDevices.getUserMedia({ video: true, audio: false })
- .then(videoStream => {
- mediaStream.current = videoStream
- if (videoRef.current) {
- videoRef.current.srcObject = videoStream
- }
- })
- } else {
- if (mediaStream.current) {
- mediaStream.current.getTracks().forEach(function(track) {
- track.stop()
- })
- mediaStream.current = undefined
- }
- }
- }, [panel])
-
- return (
-