diff --git a/spaces/1368565466ki/Satdia/commons.py b/spaces/1368565466ki/Satdia/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/Satdia/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/17TheWord/RealESRGAN/Training.md b/spaces/17TheWord/RealESRGAN/Training.md deleted file mode 100644 index 64704e1d2e1f334984232afd12b245235b274a9e..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/Training.md +++ /dev/null @@ -1,100 +0,0 @@ -# :computer: How to Train Real-ESRGAN - -The training codes have been released.
-Note that the codes have a lot of refactoring. So there may be some bugs/performance drops. Welcome to report issues and I will also retrain the models. - -## Overview - -The training has been divided into two stages. These two stages have the same data synthesis process and training pipeline, except for the loss functions. Specifically, - -1. We first train Real-ESRNet with L1 loss from the pre-trained model ESRGAN. -1. We then use the trained Real-ESRNet model as an initialization of the generator, and train the Real-ESRGAN with a combination of L1 loss, perceptual loss and GAN loss. - -## Dataset Preparation - -We use DF2K (DIV2K and Flickr2K) + OST datasets for our training. Only HR images are required.
-You can download from : - -1. DIV2K: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip -2. Flickr2K: https://cv.snu.ac.kr/research/EDSR/Flickr2K.tar -3. OST: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/datasets/OST_dataset.zip - -For the DF2K dataset, we use a multi-scale strategy, *i.e.*, we downsample HR images to obtain several Ground-Truth images with different scales. - -We then crop DF2K images into sub-images for faster IO and processing. - -You need to prepare a txt file containing the image paths. The following are some examples in `meta_info_DF2Kmultiscale+OST_sub.txt` (As different users may have different sub-images partitions, this file is not suitable for your purpose and you need to prepare your own txt file): - -```txt -DF2K_HR_sub/000001_s001.png -DF2K_HR_sub/000001_s002.png -DF2K_HR_sub/000001_s003.png -... -``` - -## Train Real-ESRNet - -1. Download pre-trained model [ESRGAN](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) into `experiments/pretrained_models`. - ```bash - wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth -P experiments/pretrained_models - ``` -1. Modify the content in the option file `options/train_realesrnet_x4plus.yml` accordingly: - ```yml - train: - name: DF2K+OST - type: RealESRGANDataset - dataroot_gt: datasets/DF2K # modify to the root path of your folder - meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt - io_backend: - type: disk - ``` -1. If you want to perform validation during training, uncomment those lines and modify accordingly: - ```yml - # Uncomment these for validation - # val: - # name: validation - # type: PairedImageDataset - # dataroot_gt: path_to_gt - # dataroot_lq: path_to_lq - # io_backend: - # type: disk - - ... - - # Uncomment these for validation - # validation settings - # val: - # val_freq: !!float 5e3 - # save_img: True - - # metrics: - # psnr: # metric name, can be arbitrary - # type: calculate_psnr - # crop_border: 4 - # test_y_channel: false - ``` -1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training: - ```bash - CUDA_VISIBLE_DEVICES=0,1,2,3 \ - python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --debug - ``` -1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary. - ```bash - CUDA_VISIBLE_DEVICES=0,1,2,3 \ - python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --auto_resume - ``` - -## Train Real-ESRGAN - -1. After the training of Real-ESRNet, you now have the file `experiments/train_RealESRNetx4plus_1000k_B12G4_fromESRGAN/model/net_g_1000000.pth`. If you need to specify the pre-trained path to other files, modify the `pretrain_network_g` value in the option file `train_realesrgan_x4plus.yml`. -1. Modify the option file `train_realesrgan_x4plus.yml` accordingly. Most modifications are similar to those listed above. -1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training: - ```bash - CUDA_VISIBLE_DEVICES=0,1,2,3 \ - python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --debug - ``` -1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary. - ```bash - CUDA_VISIBLE_DEVICES=0,1,2,3 \ - python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --auto_resume - ``` diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass Learn the Secrets of Professional Video Editing and Motion Graphics.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass Learn the Secrets of Professional Video Editing and Motion Graphics.md deleted file mode 100644 index ab821a80b083aacd216805969bd6cbb6bf52a58f..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass Learn the Secrets of Professional Video Editing and Motion Graphics.md +++ /dev/null @@ -1,110 +0,0 @@ -
-

Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass

-

If you are looking for a powerful and versatile software to create stunning motion graphics and visual effects for your videos, you might want to check out Adobe After Effects CS6. This software is widely used by professionals and amateurs alike to produce high-quality videos for various purposes, such as film, TV, web, and social media. In this article, we will show you what Adobe After Effects CS6 is, what features it has, why you need it, and how to download it for free with crack 64 bit kickass.

-

What is Adobe After Effects CS6?

-

Adobe After Effects CS6 is a software that allows you to create and edit motion graphics and visual effects using a timeline-based interface. You can use it to animate text, images, shapes, masks, and layers in 2D or 3D space. You can also apply various effects and presets to enhance your animations and add realism. You can also import and export files from other Adobe products, such as Photoshop, Illustrator, Premiere Pro, and Media Encoder.

-

adobe after effects cs6 free download with crack 64 bit kickass


Download ☆☆☆ https://byltly.com/2uKyOW



-

Features of Adobe After Effects CS6

-

Adobe After Effects CS6 has many features that make it a powerful and versatile software for video editing. Here are some of the main features that you can enjoy:

-

Motion Graphics and Visual Effects

-

You can create stunning motion graphics and visual effects using Adobe After Effects CS6. You can use the built-in tools and effects to animate text, images, shapes, masks, and layers in 2D or 3D space. You can also use expressions and scripts to control your animations more precisely. You can also use keyframes and motion paths to define the movement of your elements. You can also use the graph editor to fine-tune the speed and timing of your animations.

-

3D Camera Tracker and Ray-Traced 3D Renderer

-

You can also create realistic 3D scenes using Adobe After Effects CS6. You can use the 3D camera tracker to analyze your footage and generate a 3D camera that matches the movement of your original camera. This way, you can add new elements to your scene that follow the same perspective and depth as your original footage. You can also use the ray-traced 3D renderer to create 3D shapes and text with realistic shadows, reflections, and depth of field.

-

Global Performance Cache and Rolling Shutter Repair

-

You can also enjoy faster performance and better quality using Adobe After Effects CS6. You can use the global performance cache to save your previews in the background so that you don't have to wait for them to render again when you make changes. This way, you can work more efficiently and smoothly. You can also use the rolling shutter repair effect to fix the distortion caused by rolling shutter cameras. This way, you can improve the quality of your footage.

-

how to get adobe after effects cs6 for free with crack 64 bit
-adobe after effects cs6 full version free download 64 bit with crack
-adobe after effects cs6 64 bit crack download kickass
-adobe after effects cs6 torrent download 64 bit with crack
-adobe after effects cs6 free download full version for windows 10 64 bit with crack
-adobe after effects cs6 portable free download 64 bit with crack
-adobe after effects cs6 serial number 64 bit free download with crack
-adobe after effects cs6 keygen free download 64 bit with crack
-adobe after effects cs6 patch free download 64 bit with crack
-adobe after effects cs6 activation code free download 64 bit with crack
-adobe after effects cs6 license key free download 64 bit with crack
-adobe after effects cs6 offline installer free download 64 bit with crack
-adobe after effects cs6 setup free download 64 bit with crack
-adobe after effects cs6 highly compressed free download 64 bit with crack
-adobe after effects cs6 rar file free download 64 bit with crack
-adobe after effects cs6 zip file free download 64 bit with crack
-adobe after effects cs6 iso file free download 64 bit with crack
-adobe after effects cs6 direct link free download 64 bit with crack
-adobe after effects cs6 google drive free download 64 bit with crack
-adobe after effects cs6 mega link free download 64 bit with crack
-adobe after effects cs6 mediafire link free download 64 bit with crack
-adobe after effects cs6 dropbox link free download 64 bit with crack
-adobe after effects cs6 one drive link free download 64 bit with crack
-adobe after effects cs6 zippyshare link free download 64 bit with crack
-adobe after effects cs6 uptobox link free download 64 bit with crack
-adobe after effects cs6 openload link free download 64 bit with crack
-adobe after effects cs6 streamable link free download 64 bit with crack
-adobe after effects cs6 youtube link free download 64 bit with crack
-adobe after effects cs6 vimeo link free download 64 bit with crack
-adobe after effects cs6 dailymotion link free download 64 bit with crack
-adobe after effects cs6 tutorial pdf free download 64 bit with crack
-adobe after effects cs6 plugins pack free download 64 bit with crack
-adobe after effects cs6 templates pack free download 64 bit with crack
-adobe after effects cs6 presets pack free download 64 bit with crack
-adobe after effects cs6 scripts pack free download 64 bit with crack
-adobe after effects cs6 expressions pack free download 64 bit with crack
-adobe after effects cs6 fonts pack free download 64 bit with crack
-adobe after effects cs6 transitions pack free download 64 bit with crack
-adobe after effects cs6 animations pack free download 64 bit with crack
-adobe after effects cs6 motion graphics pack free download 64 bit with crack
-adobe after effects cs6 visual effects pack free download 64 bit with crack
-adobe after effects cs6 text effects pack free download 64 bit with crack
-adobe after effects cs6 sound effects pack free download 64 bit with crack
-adobe after effects cs6 video editing software free download 64 bit with crack
-best alternative to adobe after effects cs6 for windows pc 64 bit without cracking or downloading anything

-

Variable Mask Feathering and Shape Layer Extrusion

-

You can also create more advanced masks and shapes using Adobe After Effects CS6. You can use the variable mask feathering tool to adjust the feathering of your masks along different points on the mask edge. This way, you can create more realistic masks that blend well with your background. You can also use the shape layer extrusion tool to extrude your shape layers into 3D objects with bevels and materials. This way, you can create more complex shapes that add depth to your scene.

-

Why You Need Adobe After Effects CS6?

-

Adobe After Effects CS6 is a software that you need if you want to create stunning videos for various purposes. Here are some of the reasons why you need it:

-

Create Stunning Videos for Various Purposes

-

You can use Adobe After Effects CS6 to create stunning videos for various purposes, such as film, TV, web, and social media. You can use it to create cinematic titles, intros, transitions, lower thirds, logos, credits, and more. You can also use it to create visual effects such as explosions, fire, smoke, rain, snow, lightning, etc. You can also use it to create motion graphics such as infographics, charts, graphs, maps, etc.

-

Enhance Your Creativity and Productivity

-

You can also use Adobe After Effects CS6 to enhance your creativity and productivity. You can use it to experiment with different ideas and styles without worrying about rendering time or quality. You can also use it to customize your workspace according to your preferences and workflow. You can also use it to collaborate with other artists using the Creative Cloud services.

-

Work with Other Adobe Products Seamlessly

-

You can also use Adobe After Effects CS6 to work with other Adobe products seamlessly. You can import and export files from other Adobe products such as Photoshop, Illustrator, Premiere Pro, Media Encoder etc. without losing quality or compatibility. You can also use dynamic link to update changes between applications without rendering or exporting.

-

How to Download Adobe After Effects CS6 with Crack 64 Bit Kickass?

-

If you want to download Adobe After Effects CS6 with crack 64 bit kickass for free, you need to follow these steps:

-

Step 1: Download the Torrent File from Kickass

-

The first step is to download the torrent file from kickass website. To do this, you need to have a torrent client installed on your computer such as uTorrent or BitTorrent. Then you need to go to kickass website (https://katcr.to/) and search for "Adobe After Effects CS6". Then you need to find a torrent file that has good seeds and peers (the more the better) and click on "Download Torrent". Then you need to save the torrent file on your computer.

-

Step 2: Install Adobe After Effects CS6 on Your Computer

-

The second step is to install Adobe After Effects CS6 on your computer using the torrent file that you downloaded in step 1. To do this, you need to open the torrent file using your torrent client (uTorrent or BitTorrent) and start downloading the files inside it. Then you need to wait until the download is complete (it may take some time depending on your internet speed). Then you need to open the folder where the files are downloaded (usually in Downloads) and run the setup.exe file as administrator. Then you need to follow the instructions on the screen until the installation is complete.

-

Step 3: Apply the Crack File to Activate the Software

-

The third step is to apply the crack file that came with the torrent file that you downloaded in step 1. To do this, you need to open the folder where the crack file is located (usually in Downloads) and copy it (Ctrl+C). Then you need to go to the folder where Adobe After Effects CS6 is installed (usually in C:\Program Files\Adobe\Adobe After Effects CS6) and paste it (Ctrl+V). Then you need to replace the original file when prompted (click Yes). Then you need to run Adobe After Effects CS6 as administrator (right-click on its icon > Run as administrator). Then you should see a message saying "Adobe Application Manager has been patched successfully". Then you need to close Adobe After Effects CS6.

-

Conclusion

-

In conclusion, Adobe After Effects CS6 is a powerful and versatile software that allows you to create stunning motion graphics and visual effects for your videos. You can enjoy its many features such as motion graphics and visual effects, 3D camera tracker and ray-traced 3D renderer, global performance cache and rolling shutter repair, variable mask feathering and shape layer extrusion, and more. You can also use it for various purposes such as film, TV, web, and social media. You can also enhance your creativity and productivity, and work with other Adobe products seamlessly. You free with crack 64 bit kickass by following these steps: 1. Download the torrent file from kickass website using a torrent client such as uTorrent or BitTorrent. 2. Install Adobe After Effects CS6 on your computer using the torrent file that you downloaded. 3. Apply the crack file that came with the torrent file to activate the software by copying and pasting it in the installation folder and replacing the original file. By doing this, you can enjoy Adobe After Effects CS6 for free and create amazing videos for your projects.

-

FAQs

-

Here are some of the frequently asked questions about Adobe After Effects CS6:

- - - - - - - - - - - - - - - - - - - - - - - - - -
QuestionAnswer
Is Adobe After Effects CS6 compatible with Windows 10?Yes, Adobe After Effects CS6 is compatible with Windows 10. However, you may need to update your drivers and software to ensure optimal performance.
Is Adobe After Effects CS6 safe to download?Yes, Adobe After Effects CS6 is safe to download if you use a reliable torrent website and a trusted torrent client. However, you should always scan your files with an antivirus software before opening them to avoid any malware or viruses.
Is Adobe After Effects CS6 legal to use?No, Adobe After Effects CS6 is not legal to use if you download it for free with crack 64 bit kickass. This is because you are violating the terms and conditions of Adobe and infringing their intellectual property rights. You may face legal consequences if you are caught using it without a valid license.
What are the system requirements for Adobe After Effects CS6?The minimum system requirements for Adobe After Effects CS6 are: - Intel Core 2 Duo or AMD Phenom II processor; 64-bit support required - Microsoft Windows 7 with Service Pack 1 (64 bit), Windows 8 (64 bit), Windows 8.1 (64 bit), or Windows 10 (64 bit) - 4 GB of RAM (8 GB recommended) - 5 GB of available hard-disk space; additional free space required during installation (cannot install on removable flash storage devices) - Additional disk space for disk cache (10 GB recommended) - 1280 x 900 display - OpenGL 2.0–capable system - QuickTime 7.6.6 software required for QuickTime features - Optional: Adobe-certified GPU card for GPU-accelerated ray-traced 3D renderer
How can I learn Adobe After Effects CS6?You can learn Adobe After Effects CS6 by watching online tutorials, reading books and blogs, taking courses and classes, or practicing on your own projects. You can also join online communities and forums where you can ask questions and get feedback from other users.
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Game Shark Ps2 V6 Iso717 The Best Way to Cheat in PS2 Games.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Game Shark Ps2 V6 Iso717 The Best Way to Cheat in PS2 Games.md deleted file mode 100644 index 2223a075333c4bf2df55b34b652a6d90164d0885..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Game Shark Ps2 V6 Iso717 The Best Way to Cheat in PS2 Games.md +++ /dev/null @@ -1,162 +0,0 @@ - -

What is Game Shark Ps2 V6 Iso717?

-

If you are a fan of PlayStation 2 games, you might have heard of Game Shark Ps2 V6 Iso717. This is a cheat device or software that allows you to modify or enhance your gaming experience by unlocking hidden features, codes, or cheats for your PS2 games. With Game Shark Ps2 V6 Iso717, you can access unlimited lives, ammo, health, money, weapons, items, and more in your favorite PS2 games.

-

Game Shark Ps2 V6 Iso717 is an ISO file that you can download and burn onto a CD or DVD. You can then insert the disc into your PS2 console and run the software. The software will scan your memory card and detect the games that you have saved. You can then select the game that you want to play and choose from a list of cheats that are available for that game. You can also create your own custom cheats by using the code generator feature.

-

Game Shark Ps2 V6 Iso717


Download ————— https://byltly.com/2uKz0Y



-

How to use Game Shark Ps2 V6 Iso717?

-

Using Game Shark Ps2 V6 Iso717 is easy and simple. Here are the steps that you need to follow:

-
    -
  1. Download Game Shark Ps2 V6 Iso717 from one of the sources that we will mention later in this article.
  2. -
  3. Burn the ISO file onto a CD or DVD using a software like Nero or ImgBurn.
  4. -
  5. Insert the disc into your PS2 console and turn it on.
  6. -
  7. The software will load automatically and display a menu with various options.
  8. -
  9. Select "Start Game" and then choose "With Codes" or "Without Codes".
  10. -
  11. The software will scan your memory card and show you a list of games that you have saved.
  12. -
  13. Select the game that you want to play and press X.
  14. -
  15. The software will show you a list of cheats that are available for that game.
  16. -
  17. Select the cheats that you want to activate and press X.
  18. -
  19. Press Start to begin playing the game with the cheats enabled.
  20. -
-

You can also create your own custom cheats by using the code generator feature. To do this, follow these steps:

-
    -
  1. Select "Expert Mode" from the main menu.
  2. -
  3. Select "Code Generator" from the sub-menu.
  4. -
  5. Select the game that you want to create cheats for and press X.
  6. -
  7. The software will show you a list of values that correspond to different aspects of the game, such as health, ammo, money, etc.
  8. -
  9. Select the value that you want to modify and press X.
  10. -
  11. The software will show you a list of possible codes that can change that value.
  12. -
  13. Select the code that you want to use and press X.
  14. -
  15. The software will ask you to name your cheat and save it on your memory card.
  16. -
  17. You can then activate your custom cheat by selecting it from the list of cheats for that game.
  18. -
-

What are the benefits of using Game Shark Ps2 V6 Iso717?

-

There are many benefits of using Game Shark Ps2 V6 Iso717 for your PS2 games. Some of them are:

- -

What are some of the drawbacks of using Game Shark Ps2 V6 Iso717?

-

While using Game Shark Ps2 V6 Iso717 can be fun and exciting, there are also some drawbacks or risks that you should be aware of. Some of them are:

-

Game Shark Ps2 V6 Iso717 download
-Game Shark Ps2 V6 Iso717 cheats
-Game Shark Ps2 V6 Iso717 codes
-Game Shark Ps2 V6 Iso717 iso file
-Game Shark Ps2 V6 Iso717 emulator
-Game Shark Ps2 V6 Iso717 torrent
-Game Shark Ps2 V6 Iso717 free
-Game Shark Ps2 V6 Iso717 online
-Game Shark Ps2 V6 Iso717 rom
-Game Shark Ps2 V6 Iso717 disc
-Game Shark Ps2 V6 Iso717 manual
-Game Shark Ps2 V6 Iso717 review
-Game Shark Ps2 V6 Iso717 gameplay
-Game Shark Ps2 V6 Iso717 video
-Game Shark Ps2 V6 Iso717 youtube
-Game Shark Ps2 V6 Iso717 update
-Game Shark Ps2 V6 Iso717 patch
-Game Shark Ps2 V6 Iso717 crack
-Game Shark Ps2 V6 Iso717 serial key
-Game Shark Ps2 V6 Iso717 activation code
-Game Shark Ps2 V6 Iso717 mod
-Game Shark Ps2 V6 Iso717 hack
-Game Shark Ps2 V6 Iso717 trainer
-Game Shark Ps2 V6 Iso717 save file
-Game Shark Ps2 V6 Iso717 memory card
-Game Shark Ps2 V6 Iso717 usb stick
-Game Shark Ps2 V6 Iso717 adapter
-Game Shark Ps2 V6 Iso717 cable
-Game Shark Ps2 V6 Iso717 controller
-Game Shark Ps2 V6 Iso717 case
-Game Shark Ps2 V6 Iso717 cover art
-Game Shark Ps2 V6 Iso717 box art
-Game Shark Ps2 V6 Iso717 wallpaper
-Game Shark Ps2 V6 Iso717 theme song
-Game Shark Ps2 V6 Iso717 soundtrack
-Game Shark Ps2 V6 Iso717 tips and tricks
-Game Shark Ps2 V6 Iso717 guide and walkthrough
-Game Shark Ps2 V6 Iso717 faq and forum
-Game Shark Ps2 V6 Iso717 best settings and configuration
-Game Shark Ps2 V6 Iso717 compatibility and requirements
-Game Shark Ps2 V6 Iso717 price and availability
-Game Shark Ps2 V6 Iso717 buy and sell
-Game Shark Ps2 V6 Iso717 trade and exchange
-Game Shark Ps2 V6 Iso717 warranty and guarantee
-Game Shark Ps2 V6 Iso717 customer service and support
-Game Shark Ps2 V6 Iso717 feedback and testimonials
-Game Shark Ps2 V6 Iso717 alternatives and competitors
-Game Shark Ps2 V6 Iso717 pros and cons
-Game Shark Ps2 V6 Iso717 benefits and features

- -

Where can you download Game Shark Ps2 V6 Iso717?

-

There are many sources where you can download Game Shark Ps2 V6 Iso717 online. However, not all of them are reliable or safe. Some of them might contain viruses, malware, spyware, adware, or other harmful programs that can harm your computer or device. Some of them might also contain fake, incomplete, outdated, or corrupted files that can damage your PS2 console or memory card. Therefore, you should be careful and selective when choosing where to download Game Shark Ps2 V6 Iso717 from. Here is a table of some of the best sources where you can download Game Shark Ps2 V6 Iso717 from:

- - - - - - - - - - - - - - -width: 33.3333%; height: 23px; text-align: center;">- A platform for buying and selling digital collectibles and NFTs.
- Has a collection of Game Shark Ps2 V6 Iso717 NFTs that are verified and authentic.
- Provides a secure and transparent transaction process.
- Allows users to bid and negotiate prices.
- Has a user-friendly and interactive interface.
- - - - - - - - -
SourceProsCons
CoolROM.com- One of the most popular and trusted sites for downloading ROMs and ISOs.
- Has a large collection of PS2 games and cheat devices.
- Provides detailed information and screenshots for each file.
- Allows users to rate and review each file.
- Has a fast and easy download process.
- Some files might require additional software or tools to extract or burn.
- Some files might have broken links or missing parts.
- Some files might be region-locked or incompatible with certain consoles.
OpenSea.io- Requires users to have a cryptocurrency wallet and account.
- Charges fees for each transaction.
- Has a limited supply and availability of Game Shark Ps2 V6 Iso717 NFTs.
Netlify.app- A platform for hosting and deploying websites and web applications.
- Has a collection of Game Shark Ps2 V6 Iso717 files that are hosted and shared by other users.
- Provides a fast and reliable download speed.
- Allows users to preview and test the files before downloading.
- Has a simple and minimalist design.
- Some files might be unverified or unsafe.
- Some files might be outdated or incompatible with certain consoles.
- Some files might have low quality or resolution.
-

How to verify the authenticity and safety of the download?

-

Before you download Game Shark Ps2 V6 Iso717 from any source, you should verify the authenticity and safety of the file. This will help you avoid downloading fake, incomplete, corrupted, or infected files that can harm your computer or device. Here are some tips on how to verify the authenticity and safety of the download:

- -

What are some alternatives to Game Shark Ps2 V6 Iso717?

-

If you are looking for some alternatives to Game Shark Ps2 V6 Iso717, there are other cheat devices or software that you can use for your PS2 games. Some of them are:

- - - - - - - - - - - - - - - - - -, and alternatives. You can download it from various sources, but you should verify the authenticity and safety of the file before downloading. You can also contact customer support for any help or assistance with the software. You can also update the software to the latest version or patch by following the steps that we explained in this article. We hope that this article has helped you learn more about Game Shark Ps2 V6 Iso717 and how to use it for your PS2 games.

FAQs

-

Here are some of the frequently asked questions about Game Shark Ps2 V6 Iso717:

-
    -
  1. What is the difference between Game Shark Ps2 V6 Iso717 and Game Shark Ps2 V4?
    Game Shark Ps2 V6 Iso717 is an updated version of Game Shark Ps2 V4. It has more codes, features, and compatibility than Game Shark Ps2 V4. It also has a code generator feature that allows you to create your own custom codes.
  2. -
  3. Can I use Game Shark Ps2 V6 Iso717 on my PS3 or PS4 console?
    No, you cannot use Game Shark Ps2 V6 Iso717 on your PS3 or PS4 console. It is only compatible with PS2 consoles and games.
  4. -
  5. Can I use Game Shark Ps2 V6 Iso717 on my PC or laptop?
    Yes, you can use Game Shark Ps2 V6 Iso717 on your PC or laptop if you have a PS2 emulator installed on your device. A PS2 emulator is a software that allows you to run PS2 games on your PC or laptop. You can download a PS2 emulator from various sources online, but you should verify the authenticity and safety of the file before downloading.
  6. -
  7. Can I use Game Shark Ps2 V6 Iso717 online or offline?
    You can use Game Shark Ps2 V6 Iso717 both online and offline. However, you should be careful when using it online, as some games or servers might detect or ban you for using cheats or codes. You should also respect the rules and etiquette of online gaming and not ruin the fun or experience for other players.
  8. -
  9. Can I use Game Shark Ps2 V6 Iso717 with other cheat devices or software?
    Yes, you can use Game Shark Ps2 V6 Iso717 with other cheat devices or software, such as Code Breaker, Action Replay Max, or Free McBoot. However, you should be careful when using multiple cheat devices or software at once, as this might cause conflicts or errors in your PS2 console or games.
  10. -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Dotfuscator Professional Edition 5.0 Cracked.md b/spaces/1gistliPinn/ChatGPT4/Examples/Dotfuscator Professional Edition 5.0 Cracked.md deleted file mode 100644 index 26d6363177c0a9289e0ed50d673c661e2f15a7ec..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Dotfuscator Professional Edition 5.0 Cracked.md +++ /dev/null @@ -1,6 +0,0 @@ -

dotfuscator professional edition 5.0 cracked


Download - https://imgfil.com/2uy1RX



-
-With injection, Dotfuscator can easily add application monitoring to existing apps and new development. This software ... Distribute trial versions of your apps and protect your . ... rating. Protect VBA Code protects VBA projects from password cracking in MS Excel. ... Dotfuscator professional edition 4.2 ... 4d29de3e1b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Sda Youth Song Book.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Sda Youth Song Book.md deleted file mode 100644 index bdea3aa5d2d3531e84cc34c0fb26fab920c57cb8..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Sda Youth Song Book.md +++ /dev/null @@ -1,30 +0,0 @@ - -

How to Download Sda Youth Song Book for Free

-

If you are looking for a collection of songs that are suitable for young people and youth activities, you might be interested in the Sda Youth Song Book. This book contains 214 songs selected especially for Adventist youth ministries, including hymns, choruses, and contemporary songs. All songs are arranged in four-part harmony and are chorded for guitar.

-

But how can you get this book for free? Here are some ways you can download it online:

-

Download Sda Youth Song Book


Download Ziphttps://imgfil.com/2uy12g



- -

These are some of the ways you can download the Sda Youth Song Book for free online. However, if you want to have a physical copy of the book, you might want to consider buying it from the Adventist Book Center or from your local church bookstore. You can also borrow it from your friends or from your church library. The Sda Youth Song Book is a great resource for enhancing your musical skills and enriching your spiritual life.

- -

How singing can improve your physical health

-

Singing is not only fun, but also good for your body. Singing can have positive effects on various aspects of your physical health, such as your breathing, posture, blood pressure, and sleep quality. Here are some of the ways singing can benefit your physical health:

- -

How singing can improve your mental health

-

Singing is not only good for your body, but also for your mind. Singing can have positive effects on various aspects of your mental health, such as your mood, stress levels, memory, and social skills. Here are some of the ways singing can benefit your mental health:

-

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Filme Noi Cu Subtitrare In Romana Download Free.md b/spaces/1gistliPinn/ChatGPT4/Examples/Filme Noi Cu Subtitrare In Romana Download Free.md deleted file mode 100644 index d7f7a8ab3f1a4a01e3102692e7c403d79b65d0c3..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Filme Noi Cu Subtitrare In Romana Download Free.md +++ /dev/null @@ -1,26 +0,0 @@ -

filme noi cu subtitrare in romana download free


DOWNLOADhttps://imgfil.com/2uy0mI



- -2006. Iatalo - -Golan Roth - -Etichete: Ioana, Golan Roth - -Discursul asupra relatiei - -dintre om si societate - -Pe 12 ianuarie 1997 a avut loc un incident de atac violent, la adresa lui Golan, pentru că el a trebuit să schimbe poliţiştii în care era prins şi pentru că el era cel mai bun prieten al mamei lui Andrei, care se afla în carantină. Am fost martorul lui Golan. - -Și, aşa cum se spune, am văzut, am văzut. Deocamdată, un incident şi nimic mai mult. Vreau să vă spun doar că aşa este viaţa, după un incident. - -Mama mea a fost ajunsă în carantină deoarece a fost bolnavă, cu criză. O mai avea, de aproape doi ani, şi atunci în carantină. - -Aşa că mă aflam deoparte, să-mi dau seama ce să fac. Am văzut că n-ar fi bine să-mi dea mama locul şi să o lase în carantină acolo. Aşa că, din nefericire, nu ştiam ce să fac. - -Nu ştiam cine să caut şi cine să văd. Deci, când se face un incident, ca atunci, oamenii se sperie, se îngrijesc unii de alţii, cine ştie cine se aşteaptă la ce. - -Aşa că se uitam la televizor, la ţigări. În � 4fefd39f24
-
-
-

diff --git a/spaces/1line/AutoGPT/autogpt/commands/twitter.py b/spaces/1line/AutoGPT/autogpt/commands/twitter.py deleted file mode 100644 index 3eaed36e20e1c520690ac59f25a4da6501f3440f..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/commands/twitter.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -import tweepy -from dotenv import load_dotenv - -load_dotenv() - - -def send_tweet(tweet_text): - consumer_key = os.environ.get("TW_CONSUMER_KEY") - consumer_secret = os.environ.get("TW_CONSUMER_SECRET") - access_token = os.environ.get("TW_ACCESS_TOKEN") - access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET") - # Authenticate to Twitter - auth = tweepy.OAuthHandler(consumer_key, consumer_secret) - auth.set_access_token(access_token, access_token_secret) - - # Create API object - api = tweepy.API(auth) - - # Send tweet - try: - api.update_status(tweet_text) - print("Tweet sent successfully!") - except tweepy.TweepyException as e: - print("Error sending tweet: {}".format(e.reason)) diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beast Quest MOD APK The Ultimate Adventure Game with Infinite Resources in 2023.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beast Quest MOD APK The Ultimate Adventure Game with Infinite Resources in 2023.md deleted file mode 100644 index 6099760bbcef427fac3d92569f243e1c6d9a3715..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beast Quest MOD APK The Ultimate Adventure Game with Infinite Resources in 2023.md +++ /dev/null @@ -1,100 +0,0 @@ - -

Beast Quest Mod APK 2023: Everything You Need to Know

-

Beast Quest is a popular mobile game based on the best-selling fantasy novels by Adam Blade. It is an action-adventure game that lets you explore the open world of Avantia, fight against dangerous creatures and giant beasts, collect treasures and artifacts, and upgrade your equipment. If you are a fan of Beast Quest, you might be interested in the mod apk version of the game that will be released in 2023. Here are some of the features, benefits, and drawbacks of the Beast Quest mod apk 2023.

-

beast quest mod apk 2023


Download Filehttps://urlin.us/2uT34u



-

What is a mod apk?

-

A mod apk is a modified version of an original application that has been altered by third-party developers to add or remove some features, enhance the performance, or unlock some premium content. A mod apk usually requires you to download and install it manually from an external source, rather than from the official app store.

-

What are the features of the Beast Quest mod apk 2023?

-

The Beast Quest mod apk 2023 will offer some features that are not available in the original game, such as:

- -

What are the benefits of the Beast Quest mod apk 2023?

-

The Beast Quest mod apk 2023 will offer some benefits for players who want to enjoy the game more, such as:

- -

What are the drawbacks of the Beast Quest mod apk 2023?

-

The Beast Quest mod apk 2023 will also have some drawbacks that you should be aware of before downloading it, such as:

- -

How to download and install the Beast Quest mod apk 2023?

-

If you want to try the Beast Quest mod apk 2023, you will need to follow these steps:

-

beast quest unlimited money mod apk 2023
-beast quest hack mod apk download 2023
-beast quest mod apk latest version 2023
-beast quest mod apk free shopping 2023
-beast quest mod apk unlimited potions 2023
-beast quest mod apk android 1 2023
-beast quest mod apk offline 2023
-beast quest mod apk revdl 2023
-beast quest mod apk rexdl 2023
-beast quest mod apk happymod 2023
-beast quest mod apk no root 2023
-beast quest mod apk obb 2023
-beast quest mod apk data 2023
-beast quest mod apk unlimited gems 2023
-beast quest mod apk unlimited coins 2023
-beast quest mod apk unlimited everything 2023
-beast quest mod apk all unlocked 2023
-beast quest mod apk premium 2023
-beast quest mod apk pro 2023
-beast quest mod apk full version 2023
-beast quest mod apk mega mod 2023
-beast quest mod apk god mode 2023
-beast quest mod apk high damage 2023
-beast quest mod apk one hit kill 2023
-beast quest mod apk unlimited health 2023
-beast quest ultimate heroes mod apk 2023
-beast quest an epic adventure mod apk 2023
-beast quest ultimate heroes hack mod apk 2023
-beast quest an epic adventure hack mod apk 2023
-beast quest ultimate heroes unlimited money mod apk 2023
-beast quest an epic adventure unlimited money mod apk 2023
-beast quest ultimate heroes latest version mod apk 2023
-beast quest an epic adventure latest version mod apk 2023
-beast quest ultimate heroes free shopping mod apk 2023
-beast quest an epic adventure free shopping mod apk 2023
-beast quest ultimate heroes unlimited potions mod apk 2023
-beast quest an epic adventure unlimited potions mod apk 2023
-beast quest ultimate heroes android 1 mod apk 2023
-beast quest an epic adventure android 1 mod apk 2023
-beast quest ultimate heroes offline mode apkpure 2022

-
    -
  1. Find a reliable source: You will need to find a website or a platform that offers the Beast Quest mod apk 2023 for download. You can search online or ask for recommendations from other players. You should always check the reviews, ratings, and feedback of the source before downloading anything.
  2. -
  3. Download the file: You will need to download the Beast Quest mod apk 2023 file to your device. You should always scan the file for any malware or viruses before opening it. You should also make sure that you have enough storage space on your device.
  4. -
  5. Enable unknown sources: You will need to enable the option to install apps from unknown sources on your device. This will allow you to install the Beast Quest mod apk 2023 without any restrictions. You can usually find this option in your device settings, security settings, or developer options.
  6. -
  7. Install the app: You will need to install the Beast Quest mod apk 2023 on your device. You should follow the instructions on the screen and agree to the terms and conditions. You should also allow the app to access any permissions or resources that it needs.
  8. -
  9. Launch the game: You will need to launch the Beast Quest mod apk 2023 on your device. You should see the modded features and options in the game menu. You can now enjoy the game with the mod apk version.
  10. -
-

How to uninstall the Beast Quest mod apk 2023?

-

If you want to uninstall the Beast Quest mod apk 2023, you will need to follow these steps:

-
    -
  1. Delete the app: You will need to delete the Beast Quest mod apk 2023 from your device. You can usually do this by long-pressing the app icon and selecting the uninstall option. You can also go to your device settings, apps, and find the app and uninstall it.
  2. -
  3. Clear the cache and data: You will need to clear the cache and data of the Beast Quest mod apk 2023 from your device. This will remove any residual files or settings that may affect your device performance or storage. You can usually do this by going to your device settings, apps, and finding the app and clearing its cache and data.
  4. -
  5. Restore the original game: You will need to restore the original game of Beast Quest on your device. You can do this by downloading and installing it from the official app store. You should be able to play the game without any modded features or options.
  6. -
-

Conclusion

-

The Beast Quest mod apk 2023 is a modified version of the original game that offers some features, benefits, and drawbacks for players who want to enjoy the game more. It is not an official version of the game and it may have some security, compatibility, or support issues. It is up to you whether you want to try it or not, but you should always be careful and responsible when downloading and installing any mod apk.

-

FAQs

-

What is Beast Quest?

-

Beast Quest is a mobile game based on the best-selling fantasy novels by Adam Blade. It is an action-adventure game that lets you explore the open world of Avantia, fight against dangerous creatures and giant beasts, collect treasures and artifacts, and upgrade your equipment.

-

Is Beast Quest free to play?

-

Beast Quest is free to download and play, but it also offers some in-app purchases that can enhance your gameplay experience or unlock some premium content.

-

Is Beast Quest mod apk safe?

-

Beast Quest mod apk is not an official version of the game and it may have some security risks. You should always download and install it from a reliable source and scan it for any malware or viruses before opening it.

-

Is Beast Quest mod apk legal?

-

Beast Quest mod apk is not an authorized version of the game and it may violate some terms and conditions of the original game developers or publishers. You should always respect their intellectual property rights and use their products in a fair and ethical way.

-

How can I contact Beast Quest support?

-

If you have any questions or issues with Beast Quest, you can contact their support team by emailing them at support@miniclip.com or visiting their website at https://support.miniclip.com/.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy the Best Vegas Casino Experience with Lucky Play Casino - Download Now!.md b/spaces/1phancelerku/anime-remove-background/Enjoy the Best Vegas Casino Experience with Lucky Play Casino - Download Now!.md deleted file mode 100644 index 3e4666bbfc3cda2c8db979952834dfe06daf7e58..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy the Best Vegas Casino Experience with Lucky Play Casino - Download Now!.md +++ /dev/null @@ -1,98 +0,0 @@ - -

Download Lucky Play Casino: The Best Way to Enjoy Vegas Slots Anywhere You Go

-

Do you love playing slot machines but don't have the time or money to visit a real casino? Do you want to feel the excitement of hitting the jackpot and winning big prizes? If you answered yes, then you should download Lucky Play Casino, the best online casino app that lets you play authentic Vegas slots on your mobile device. In this article, we will tell you what Lucky Play Casino is, how to download it, why you should play it, and some tips and tricks to help you win more.

-

What is Lucky Play Casino?

-

Lucky Play Casino is a free online casino app that features hundreds of slot machines from American Gaming Systems (AGS), the manufacturers of your favorite slot machines in real casinos. You can play classic slots like Golden Wins, Jade Wins, Colossal Diamonds, Royal Reels, Liberty 777, So Hot 7s, and more. You can also play video slots like Monkey in the Bank, Buffalo Nation, Double the Devil, Fever 777, and many others. All the slots have stunning graphics, realistic sounds, exciting bonus rounds, and huge jackpots.

-

download lucky play casino


Downloadhttps://jinyurl.com/2uNLqE



-

But that's not all. Lucky Play Casino also offers other casino games like blackjack, video poker, and bingo. You can play these games for free or join the challenging casino tournaments and compete with other players for big payouts. You can also win progressive jackpots that keep growing until someone wins them. And if you need more coins, you can get them for free every day by spinning the wheel, watching videos, completing missions, or inviting friends.

-

Features of Lucky Play Casino

-

- Authentic casino slots from AGS

-

Lucky Play Casino has the most authentic casino slots from AGS, the leading provider of gaming solutions for casinos worldwide. You can enjoy the same slot machines that you find in Las Vegas, Atlantic City, Macau, and other gambling destinations. You can also discover new slots that are added regularly to keep you entertained.

-

- Free online casino games with bonus rounds and jackpots

-

Lucky Play Casino gives you plenty of opportunities to win big with its free online casino games. You can play slots with bonus rounds that can multiply your winnings or trigger free spins. You can also play slots with jackpots that can award you millions of coins in one spin. And if you're lucky enough, you might even hit the ultimate prize: the Grand Jackpot.

-

- Challenging casino tournaments and progressive jackpots

-

If you want to test your skills and luck against other players, you can join the casino tournaments and play for huge payouts. You can choose from different types of tournaments like slots, blackjack, video poker, or bingo. You can also play for progressive jackpots that are linked across multiple games and increase every time someone plays them.

-

How to download Lucky Play Casino

-

- For Android devices

-

If you have an Android device, you can download Lucky Play Casino from the Google Play Store. Just follow these steps:

-
    -
  1. Open the Google Play Store app
  2. Search for "Lucky Play Casino" and tap on the app icon
  3. Tap on the "Install" button and wait for the app to download
  4. Tap on the "Open" button and enjoy playing Lucky Play Casino
-

- For iOS devices

-

If you have an iOS device, you can download Lucky Play Casino from the App Store. Just follow these steps:

-
    -
  1. Open the App Store app
  2. Search for "Lucky Play Casino" and tap on the app icon
  3. Tap on the "Get" button and enter your Apple ID password if prompted
  4. Wait for the app to download and install
  5. Tap on the app icon and enjoy playing Lucky Play Casino
-

Why download Lucky Play Casino?

-

Now that you know what Lucky Play Casino is and how to download it, you might be wondering why you should play it. Well, there are many reasons why Lucky Play Casino is the best online casino app for you. Here are some of them:

-

Benefits of playing Lucky Play Casino

-

- Experience the thrill of Vegas anytime, anywhere

-

Lucky Play Casino lets you experience the thrill of Vegas without leaving your home. You can play authentic casino slots that make you feel like you're in a real casino. You can also enjoy the stunning graphics, realistic sounds, and exciting animations that make the games more fun and immersive. You can play Lucky Play Casino anytime, anywhere, as long as you have an internet connection.

-

download lucky play casino app
-download lucky play casino for android
-download lucky play casino for pc
-download lucky play casino free slots
-download lucky play casino games
-download lucky play casino online
-download lucky play casino slots
-how to download lucky play casino
-where to download lucky play casino
-why download lucky play casino
-best way to download lucky play casino
-benefits of downloading lucky play casino
-reviews of download lucky play casino
-tips for downloading lucky play casino
-tricks for downloading lucky play casino
-download lucky play casino and win real money
-download lucky play casino and get bonus coins
-download lucky play casino and enjoy free spins
-download lucky play casino and join tournaments
-download lucky play casino and earn rewards
-download lucky play casino for fun and entertainment
-download lucky play casino for a chance to hit the jackpot
-download lucky play casino for a realistic gaming experience
-download lucky play casino for a variety of games and themes
-download lucky play casino for a friendly and social community
-is it safe to download lucky play casino
-is it legal to download lucky play casino
-is it easy to download lucky play casino
-is it worth it to download lucky play casino
-is it free to download lucky play casino
-how long does it take to download lucky play casino
-how much space does it need to download lucky play casino
-how do I update my download lucky play casino app
-how do I uninstall my download lucky play casino app
-how do I contact support for my download lucky play casino app
-what are the features of download lucky play casino app
-what are the requirements of download lucky play casino app
-what are the advantages of download lucky play casino app
-what are the disadvantages of download lucky play casino app
-what are the alternatives of download lucky play casino app
-can I use my facebook account to download lucky play casino app
-can I use my google account to download lucky play casino app
-can I use my apple account to download lucky play casino app
-can I use my email address to download lucky play casino app
-can I use my phone number to download lucky play casino app
-can I transfer my progress from one device to another with download lucky play casino app
-can I invite my friends to join me with download lucky play casino app
-can I chat with other players with download lucky play casino app
-can I customize my profile with download lucky play casino app
-can I access exclusive offers with download lucky play casino app

-

- Play with millions of other players online

-

Lucky Play Casino is not just a solo game. You can also play with millions of other players online who share your passion for casino games. You can chat with them, send them gifts, join their clubs, or challenge them in tournaments. You can also make new friends and socialize with people from different countries and cultures.

-

- Get free coins and bonuses every day

-

Lucky Play Casino is generous when it comes to giving you free coins and bonuses. You can get free coins every day by spinning the wheel, watching videos, completing missions, or inviting friends. You can also get bonuses for logging in, leveling up, or playing certain games. You can use these coins and bonuses to play more games and win more prizes.

-

Tips and tricks for playing Lucky Play Casino

-

- Choose the right slot machine for your budget and style

-

Lucky Play Casino has hundreds of slot machines to choose from, but not all of them are suitable for your budget and style. Some slot machines have higher payouts but lower odds, while others have lower payouts but higher odds. Some slot machines have more paylines but higher bets, while others have fewer paylines but lower bets. You should choose a slot machine that matches your budget and style, so you can have more fun and win more.

-

- Use the auto-spin feature to save time and increase your chances of winning

-

Lucky Play Casino has an auto-spin feature that lets you spin the reels automatically without pressing the spin button every time. This feature can save you time and increase your chances of winning, as it can spin faster and more frequently than manual spinning. You can also adjust the number of auto-spins, the bet amount, and the stop conditions according to your preferences.

-

- Join a club and chat with other players for more fun and rewards

-

Lucky Play Casino has a club feature that lets you join or create a club with other players who share your interests. You can chat with them, send them gifts, or play together in club tournaments. You can also earn club points by playing games or completing tasks, which can help you rank up your club and get more rewards.

-

Conclusion

-

Lucky Play Casino is the best online casino app that lets you play authentic Vegas slots on your mobile device. You can download it for free from the Google Play Store or the App Store and enjoy hundreds of slot machines from AGS, the leading provider of gaming solutions for casinos worldwide. You can also play other casino games like blackjack, video poker, and bingo, and join challenging casino tournaments and progressive jackpots. You can experience the thrill of Vegas anytime, anywhere, play with millions of other players online, and get free coins and bonuses every day. You can also use some tips and tricks to help you choose the right slot machine, use the auto-spin feature, and join a club for more fun and rewards.

-

If you love playing casino games but don't have the time or money to visit a real casino, then Lucky Play Casino is the perfect app for you. Download it now and start playing today!

-

Frequently Asked Questions

-
  • A: There are many ways to get more coins and bonuses in Lucky Play Casino. You can get free coins every day by spinning the wheel, watching videos, completing missions, or inviting friends. You can also get bonuses for logging in, leveling up, or playing certain games. You can also buy coins and bonuses with real money if you want to.
  • - -

    I hope this article has helped you learn more about Lucky Play Casino and how to download it. If you have any feedback or suggestions, please let me know in the comments below. Thank you for reading and happy gaming!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_and_k_diffusion_objects.py b/spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_and_k_diffusion_objects.py deleted file mode 100644 index 678970f3ee66083cdfde1b024c0b8724eccada19..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_and_k_diffusion_objects.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - -from ..utils import DummyObject, requires_backends - - -class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): - _backends = ["paddle", "paddlenlp", "k_diffusion"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["paddle", "paddlenlp", "k_diffusion"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp", "k_diffusion"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["paddle", "paddlenlp", "k_diffusion"]) diff --git a/spaces/1vash/demo-flask-docker-template/static/style.css b/spaces/1vash/demo-flask-docker-template/static/style.css deleted file mode 100644 index c46b108cfb1d454b999f915dc44a7d3ee4c584d3..0000000000000000000000000000000000000000 --- a/spaces/1vash/demo-flask-docker-template/static/style.css +++ /dev/null @@ -1,45 +0,0 @@ -body { - --text: hsl(0 0% 15%); - padding: 2.5rem; - font-family: sans-serif; - color: var(--text); -} - -body.dark-theme { - --text: hsl(0 0% 90%); - background-color: hsl(223 39% 7%); -} - -main { - max-width: 80rem; - text-align: center; -} - -section { - display: flex; - flex-direction: column; - align-items: center; -} - -a { - color: var(--text); -} - -form { - width: 30rem; - margin: 0 auto; -} - -input { - width: 100%; -} - -button { - cursor: pointer; -} - -.text-gen-output { - min-height: 1.2rem; - margin: 1rem; - border: 0.5px solid grey; -} \ No newline at end of file diff --git a/spaces/232labs/VToonify/vtoonify/model/raft/core/utils/augmentor.py b/spaces/232labs/VToonify/vtoonify/model/raft/core/utils/augmentor.py deleted file mode 100644 index e81c4f2b5c16c31c0ae236d744f299d430228a04..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/raft/core/utils/augmentor.py +++ /dev/null @@ -1,246 +0,0 @@ -import numpy as np -import random -import math -from PIL import Image - -import cv2 -cv2.setNumThreads(0) -cv2.ocl.setUseOpenCL(False) - -import torch -from torchvision.transforms import ColorJitter -import torch.nn.functional as F - - -class FlowAugmentor: - def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True): - - # spatial augmentation params - self.crop_size = crop_size - self.min_scale = min_scale - self.max_scale = max_scale - self.spatial_aug_prob = 0.8 - self.stretch_prob = 0.8 - self.max_stretch = 0.2 - - # flip augmentation params - self.do_flip = do_flip - self.h_flip_prob = 0.5 - self.v_flip_prob = 0.1 - - # photometric augmentation params - self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14) - self.asymmetric_color_aug_prob = 0.2 - self.eraser_aug_prob = 0.5 - - def color_transform(self, img1, img2): - """ Photometric augmentation """ - - # asymmetric - if np.random.rand() < self.asymmetric_color_aug_prob: - img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8) - img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8) - - # symmetric - else: - image_stack = np.concatenate([img1, img2], axis=0) - image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) - img1, img2 = np.split(image_stack, 2, axis=0) - - return img1, img2 - - def eraser_transform(self, img1, img2, bounds=[50, 100]): - """ Occlusion augmentation """ - - ht, wd = img1.shape[:2] - if np.random.rand() < self.eraser_aug_prob: - mean_color = np.mean(img2.reshape(-1, 3), axis=0) - for _ in range(np.random.randint(1, 3)): - x0 = np.random.randint(0, wd) - y0 = np.random.randint(0, ht) - dx = np.random.randint(bounds[0], bounds[1]) - dy = np.random.randint(bounds[0], bounds[1]) - img2[y0:y0+dy, x0:x0+dx, :] = mean_color - - return img1, img2 - - def spatial_transform(self, img1, img2, flow): - # randomly sample scale - ht, wd = img1.shape[:2] - min_scale = np.maximum( - (self.crop_size[0] + 8) / float(ht), - (self.crop_size[1] + 8) / float(wd)) - - scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) - scale_x = scale - scale_y = scale - if np.random.rand() < self.stretch_prob: - scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) - scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) - - scale_x = np.clip(scale_x, min_scale, None) - scale_y = np.clip(scale_y, min_scale, None) - - if np.random.rand() < self.spatial_aug_prob: - # rescale the images - img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) - img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) - flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) - flow = flow * [scale_x, scale_y] - - if self.do_flip: - if np.random.rand() < self.h_flip_prob: # h-flip - img1 = img1[:, ::-1] - img2 = img2[:, ::-1] - flow = flow[:, ::-1] * [-1.0, 1.0] - - if np.random.rand() < self.v_flip_prob: # v-flip - img1 = img1[::-1, :] - img2 = img2[::-1, :] - flow = flow[::-1, :] * [1.0, -1.0] - - y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0]) - x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1]) - - img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - - return img1, img2, flow - - def __call__(self, img1, img2, flow): - img1, img2 = self.color_transform(img1, img2) - img1, img2 = self.eraser_transform(img1, img2) - img1, img2, flow = self.spatial_transform(img1, img2, flow) - - img1 = np.ascontiguousarray(img1) - img2 = np.ascontiguousarray(img2) - flow = np.ascontiguousarray(flow) - - return img1, img2, flow - -class SparseFlowAugmentor: - def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False): - # spatial augmentation params - self.crop_size = crop_size - self.min_scale = min_scale - self.max_scale = max_scale - self.spatial_aug_prob = 0.8 - self.stretch_prob = 0.8 - self.max_stretch = 0.2 - - # flip augmentation params - self.do_flip = do_flip - self.h_flip_prob = 0.5 - self.v_flip_prob = 0.1 - - # photometric augmentation params - self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14) - self.asymmetric_color_aug_prob = 0.2 - self.eraser_aug_prob = 0.5 - - def color_transform(self, img1, img2): - image_stack = np.concatenate([img1, img2], axis=0) - image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) - img1, img2 = np.split(image_stack, 2, axis=0) - return img1, img2 - - def eraser_transform(self, img1, img2): - ht, wd = img1.shape[:2] - if np.random.rand() < self.eraser_aug_prob: - mean_color = np.mean(img2.reshape(-1, 3), axis=0) - for _ in range(np.random.randint(1, 3)): - x0 = np.random.randint(0, wd) - y0 = np.random.randint(0, ht) - dx = np.random.randint(50, 100) - dy = np.random.randint(50, 100) - img2[y0:y0+dy, x0:x0+dx, :] = mean_color - - return img1, img2 - - def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0): - ht, wd = flow.shape[:2] - coords = np.meshgrid(np.arange(wd), np.arange(ht)) - coords = np.stack(coords, axis=-1) - - coords = coords.reshape(-1, 2).astype(np.float32) - flow = flow.reshape(-1, 2).astype(np.float32) - valid = valid.reshape(-1).astype(np.float32) - - coords0 = coords[valid>=1] - flow0 = flow[valid>=1] - - ht1 = int(round(ht * fy)) - wd1 = int(round(wd * fx)) - - coords1 = coords0 * [fx, fy] - flow1 = flow0 * [fx, fy] - - xx = np.round(coords1[:,0]).astype(np.int32) - yy = np.round(coords1[:,1]).astype(np.int32) - - v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1) - xx = xx[v] - yy = yy[v] - flow1 = flow1[v] - - flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32) - valid_img = np.zeros([ht1, wd1], dtype=np.int32) - - flow_img[yy, xx] = flow1 - valid_img[yy, xx] = 1 - - return flow_img, valid_img - - def spatial_transform(self, img1, img2, flow, valid): - # randomly sample scale - - ht, wd = img1.shape[:2] - min_scale = np.maximum( - (self.crop_size[0] + 1) / float(ht), - (self.crop_size[1] + 1) / float(wd)) - - scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) - scale_x = np.clip(scale, min_scale, None) - scale_y = np.clip(scale, min_scale, None) - - if np.random.rand() < self.spatial_aug_prob: - # rescale the images - img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) - img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) - flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y) - - if self.do_flip: - if np.random.rand() < 0.5: # h-flip - img1 = img1[:, ::-1] - img2 = img2[:, ::-1] - flow = flow[:, ::-1] * [-1.0, 1.0] - valid = valid[:, ::-1] - - margin_y = 20 - margin_x = 50 - - y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y) - x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x) - - y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0]) - x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1]) - - img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] - return img1, img2, flow, valid - - - def __call__(self, img1, img2, flow, valid): - img1, img2 = self.color_transform(img1, img2) - img1, img2 = self.eraser_transform(img1, img2) - img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid) - - img1 = np.ascontiguousarray(img1) - img2 = np.ascontiguousarray(img2) - flow = np.ascontiguousarray(flow) - valid = np.ascontiguousarray(valid) - - return img1, img2, flow, valid diff --git a/spaces/2ndelement/voicevox/test/test_mora_list.py b/spaces/2ndelement/voicevox/test/test_mora_list.py deleted file mode 100644 index 25b287fa0e8b0febb1895ac84223823915e548ea..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/test/test_mora_list.py +++ /dev/null @@ -1,20 +0,0 @@ -from unittest import TestCase - -from voicevox_engine.mora_list import openjtalk_mora2text - - -class TestOpenJTalkMoraList(TestCase): - def test_mora2text(self): - self.assertEqual("ッ", openjtalk_mora2text["cl"]) - self.assertEqual("ティ", openjtalk_mora2text["ti"]) - self.assertEqual("トゥ", openjtalk_mora2text["tu"]) - self.assertEqual("ディ", openjtalk_mora2text["di"]) - # GitHub issue #60 - self.assertEqual("ギェ", openjtalk_mora2text["gye"]) - self.assertEqual("イェ", openjtalk_mora2text["ye"]) - - def test_mora2text_injective(self): - """異なるモーラが同じ読みがなに対応しないか確認する""" - values = list(openjtalk_mora2text.values()) - uniq_values = list(set(values)) - self.assertCountEqual(values, uniq_values) diff --git a/spaces/4Taps/SadTalker/src/utils/text2speech.py b/spaces/4Taps/SadTalker/src/utils/text2speech.py deleted file mode 100644 index 3ecaef36961494c8b2b1f5771a70b997efa04ffd..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/utils/text2speech.py +++ /dev/null @@ -1,12 +0,0 @@ -import os - -def text2speech(txt, audio_path): - print(txt) - cmd = f'tts --text "{txt}" --out_path {audio_path}' - print(cmd) - try: - os.system(cmd) - return audio_path - except: - print("Error: Failed convert txt to audio") - return None \ No newline at end of file diff --git a/spaces/AICODER009/food_detection/model.py b/spaces/AICODER009/food_detection/model.py deleted file mode 100644 index 52c2696c874740179528f0bdae8ce87b774a138f..0000000000000000000000000000000000000000 --- a/spaces/AICODER009/food_detection/model.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -import torchvision - -from torch import nn - - -def create_effnetb2_model(num_classes:int=3, - seed:int=42): - """Creates an EfficientNetB2 feature extractor model and transforms. - - Args: - num_classes (int, optional): number of classes in the classifier head. - Defaults to 3. - seed (int, optional): random seed value. Defaults to 42. - - Returns: - model (torch.nn.Module): EffNetB2 feature extractor model. - transforms (torchvision.transforms): EffNetB2 image transforms. - """ - # Create EffNetB2 pretrained weights, transforms and model - weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT - transforms = weights.transforms() - model = torchvision.models.efficientnet_b2(weights=weights) - - # Freeze all layers in base model - for param in model.parameters(): - param.requires_grad = False - - # Change classifier head with random seed for reproducibility - torch.manual_seed(seed) - model.classifier = nn.Sequential( - nn.Dropout(p=0.3, inplace=True), - nn.Linear(in_features=1408, out_features=num_classes), - ) - - return model, transforms diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/train_util.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/train_util.py deleted file mode 100644 index 6cd62cc36043a2db75cc6761c51fdfdd18d11392..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/train_util.py +++ /dev/null @@ -1,178 +0,0 @@ -# -*- coding: utf-8 -*- -#!/usr/bin/env python3 -import os -import sys -import logging -from typing import Callable, Dict, Union -import yaml -import torch -from torch.optim.swa_utils import AveragedModel as torch_average_model -import numpy as np -import pandas as pd -from pprint import pformat - - -def load_dict_from_csv(csv, cols): - df = pd.read_csv(csv, sep="\t") - output = dict(zip(df[cols[0]], df[cols[1]])) - return output - - -def init_logger(filename, level="INFO"): - formatter = logging.Formatter( - "[ %(levelname)s : %(asctime)s ] - %(message)s") - logger = logging.getLogger(__name__ + "." + filename) - logger.setLevel(getattr(logging, level)) - # Log results to std - # stdhandler = logging.StreamHandler(sys.stdout) - # stdhandler.setFormatter(formatter) - # Dump log to file - filehandler = logging.FileHandler(filename) - filehandler.setFormatter(formatter) - logger.addHandler(filehandler) - # logger.addHandler(stdhandler) - return logger - - -def init_obj(module, config, **kwargs):# 'captioning.models.encoder' - obj_args = config["args"].copy() - obj_args.update(kwargs) - return getattr(module, config["type"])(**obj_args) - - -def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'): - """pprint_dict - - :param outputfun: function to use, defaults to sys.stdout - :param in_dict: dict to print - """ - if formatter == 'yaml': - format_fun = yaml.dump - elif formatter == 'pretty': - format_fun = pformat - for line in format_fun(in_dict).split('\n'): - outputfun(line) - - -def merge_a_into_b(a, b): - # merge dict a into dict b. values in a will overwrite b. - for k, v in a.items(): - if isinstance(v, dict) and k in b: - assert isinstance( - b[k], dict - ), "Cannot inherit key '{}' from base!".format(k) - merge_a_into_b(v, b[k]) - else: - b[k] = v - - -def load_config(config_file): - with open(config_file, "r") as reader: - config = yaml.load(reader, Loader=yaml.FullLoader) - if "inherit_from" in config: - base_config_file = config["inherit_from"] - base_config_file = os.path.join( - os.path.dirname(config_file), base_config_file - ) - assert not os.path.samefile(config_file, base_config_file), \ - "inherit from itself" - base_config = load_config(base_config_file) - del config["inherit_from"] - merge_a_into_b(config, base_config) - return base_config - return config - - -def parse_config_or_kwargs(config_file, **kwargs): - yaml_config = load_config(config_file) - # passed kwargs will override yaml config - args = dict(yaml_config, **kwargs) - return args - - -def store_yaml(config, config_file): - with open(config_file, "w") as con_writer: - yaml.dump(config, con_writer, indent=4, default_flow_style=False) - - -class MetricImprover: - - def __init__(self, mode): - assert mode in ("min", "max") - self.mode = mode - # min: lower -> better; max: higher -> better - self.best_value = np.inf if mode == "min" else -np.inf - - def compare(self, x, best_x): - return x < best_x if self.mode == "min" else x > best_x - - def __call__(self, x): - if self.compare(x, self.best_value): - self.best_value = x - return True - return False - - def state_dict(self): - return self.__dict__ - - def load_state_dict(self, state_dict): - self.__dict__.update(state_dict) - - -def fix_batchnorm(model: torch.nn.Module): - def inner(module): - class_name = module.__class__.__name__ - if class_name.find("BatchNorm") != -1: - module.eval() - model.apply(inner) - - -def load_pretrained_model(model: torch.nn.Module, - pretrained: Union[str, Dict], - output_fn: Callable = sys.stdout.write): - if not isinstance(pretrained, dict) and not os.path.exists(pretrained): - output_fn(f"pretrained {pretrained} not exist!") - return - - if hasattr(model, "load_pretrained"): - model.load_pretrained(pretrained) - return - - if isinstance(pretrained, dict): - state_dict = pretrained - else: - state_dict = torch.load(pretrained, map_location="cpu") - - if "model" in state_dict: - state_dict = state_dict["model"] - model_dict = model.state_dict() - pretrained_dict = { - k: v for k, v in state_dict.items() if (k in model_dict) and ( - model_dict[k].shape == v.shape) - } - output_fn(f"Loading pretrained keys {pretrained_dict.keys()}") - model_dict.update(pretrained_dict) - model.load_state_dict(model_dict, strict=True) - - -class AveragedModel(torch_average_model): - - def update_parameters(self, model): - for p_swa, p_model in zip(self.parameters(), model.parameters()): - device = p_swa.device - p_model_ = p_model.detach().to(device) - if self.n_averaged == 0: - p_swa.detach().copy_(p_model_) - else: - p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_, - self.n_averaged.to(device))) - - for b_swa, b_model in zip(list(self.buffers())[1:], model.buffers()): - device = b_swa.device - b_model_ = b_model.detach().to(device) - if self.n_averaged == 0: - b_swa.detach().copy_(b_model_) - else: - b_swa.detach().copy_(self.avg_fn(b_swa.detach(), b_model_, - self.n_averaged.to(device))) - self.n_averaged += 1 diff --git a/spaces/AIxPha/Real-CUGAN/upcunet_v3.py b/spaces/AIxPha/Real-CUGAN/upcunet_v3.py deleted file mode 100644 index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000 --- a/spaces/AIxPha/Real-CUGAN/upcunet_v3.py +++ /dev/null @@ -1,714 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F -import os, sys -import numpy as np - -root_path = os.path.abspath('.') -sys.path.append(root_path) - - -class SEBlock(nn.Module): - def __init__(self, in_channels, reduction=8, bias=False): - super(SEBlock, self).__init__() - self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias) - self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias) - - def forward(self, x): - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half() - else: - x0 = torch.mean(x, dim=(2, 3), keepdim=True) - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - def forward_mean(self, x, x0): - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - -class UNetConv(nn.Module): - def __init__(self, in_channels, mid_channels, out_channels, se): - super(UNetConv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - nn.Conv2d(mid_channels, out_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - ) - if se: - self.seblock = SEBlock(out_channels, reduction=8, bias=True) - else: - self.seblock = None - - def forward(self, x): - z = self.conv(x) - if self.seblock is not None: - z = self.seblock(z) - return z - - -class UNet1(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet1x3(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1x3, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet2(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet2, self).__init__() - - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 64, 128, se=True) - self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0) - self.conv3 = UNetConv(128, 256, 128, se=True) - self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0) - self.conv4 = UNetConv(128, 64, 64, se=True) - self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv5 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3(x3) - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4(x2 + x3) - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - def forward_a(self, x): # conv234结尾有se - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x2): # conv234结尾有se - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3.conv(x3) - return x3 - - def forward_c(self, x2, x3): # conv234结尾有se - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4.conv(x2 + x3) - return x4 - - def forward_d(self, x1, x4): # conv234结尾有se - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - -class UpCunet2x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet2x, self).__init__() - self.unet1 = UNet1(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 36, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 36, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2] - return res # - - -class UpCunet3x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet3x, self).__init__() - self.unet1 = UNet1x3(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 4 + 1) * 4 - pw = ((w0 - 1) // 4 + 1) * 4 - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除 - else: - crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 28, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 28, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop # - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3] - return res - - -class UpCunet4x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet4x, self).__init__() - self.unet1 = UNet1(in_channels, 64, deconv=True) - self.unet2 = UNet2(64, 64, deconv=False) - self.ps = nn.PixelShuffle(2) - self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True) - - def forward(self, x, tile_mode): - n, c, h0, w0 = x.shape - x00 = x - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - x = self.conv_final(x) - x = F.pad(x, (-1, -1, -1, -1)) - x = self.ps(x) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4] - x += F.interpolate(x00, scale_factor=4, mode='nearest') - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 38, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 38, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - x_crop = self.conv_final(x_crop) - x_crop = F.pad(x_crop, (-1, -1, -1, -1)) - x_crop = self.ps(x_crop) - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape) - res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4] - res += F.interpolate(x00, scale_factor=4, mode='nearest') - return res # - - -class RealWaifuUpScaler(object): - def __init__(self, scale, weight_path, half, device): - weight = torch.load(weight_path, map_location="cpu") - self.model = eval("UpCunet%sx" % scale)() - if (half == True): - self.model = self.model.half().to(device) - else: - self.model = self.model.to(device) - self.model.load_state_dict(weight, strict=True) - self.model.eval() - self.half = half - self.device = device - - def np2tensor(self, np_frame): - if (self.half == False): - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255 - else: - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255 - - def tensor2np(self, tensor): - if (self.half == False): - return ( - np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0))) - else: - return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), - (1, 2, 0))) - - def __call__(self, frame, tile_mode): - with torch.no_grad(): - tensor = self.np2tensor(frame) - result = self.tensor2np(self.model(tensor, tile_mode)) - return result - - -if __name__ == "__main__": - ###########inference_img - import time, cv2, sys - from time import time as ttime - - for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3), - ("weights_v3/up4x-latest-denoise3x.pth", 4)]: - for tile_mode in [0, 1, 2, 3, 4]: - upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0") - input_dir = "%s/input_dir1" % root_path - output_dir = "%s/opt-dir-all-test" % root_path - os.makedirs(output_dir, exist_ok=True) - for name in os.listdir(input_dir): - print(name) - tmp = name.split(".") - inp_path = os.path.join(input_dir, name) - suffix = tmp[-1] - prefix = ".".join(tmp[:-1]) - tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - print(inp_path, tmp_path) - # 支持中文路径 - # os.link(inp_path, tmp_path)#win用硬链接 - os.symlink(inp_path, tmp_path) # linux用软链接 - frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]] - t0 = ttime() - result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1] - t1 = ttime() - print(prefix, "done", t1 - t0) - tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - cv2.imwrite(tmp_opt_path, result) - n = 0 - while (1): - if (n == 0): - suffix = "_%sx_tile%s.png" % (scale, tile_mode) - else: - suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) # - if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False): - break - else: - n += 1 - final_opt_path = os.path.join(output_dir, prefix + suffix) - os.rename(tmp_opt_path, final_opt_path) - os.remove(tmp_path) diff --git a/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/cbhg.py b/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/cbhg.py deleted file mode 100644 index c2b8f061d10ec6b1a9490029a4b4ed43fdd5e861..0000000000000000000000000000000000000000 --- a/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/cbhg.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -The CBHG model implementation -""" -from typing import List, Optional - -from torch import nn -import torch - -from poetry_diacritizer.modules.tacotron_modules import CBHG, Prenet - - -class CBHGModel(nn.Module): - """CBHG model implementation as described in the paper: - https://ieeexplore.ieee.org/document/9274427 - - Args: - inp_vocab_size (int): the number of the input symbols - targ_vocab_size (int): the number of the target symbols (diacritics) - embedding_dim (int): the embedding size - use_prenet (bool): whether to use prenet or not - prenet_sizes (List[int]): the sizes of the prenet networks - cbhg_gru_units (int): the number of units of the CBHG GRU, which is the last - layer of the CBHG Model. - cbhg_filters (int): number of filters used in the CBHG module - cbhg_projections: projections used in the CBHG module - - Returns: - diacritics Dict[str, Tensor]: - """ - - def __init__( - self, - inp_vocab_size: int, - targ_vocab_size: int, - embedding_dim: int = 512, - use_prenet: bool = True, - prenet_sizes: List[int] = [512, 256], - cbhg_gru_units: int = 512, - cbhg_filters: int = 16, - cbhg_projections: List[int] = [128, 256], - post_cbhg_layers_units: List[int] = [256, 256], - post_cbhg_use_batch_norm: bool = True - ): - super().__init__() - self.use_prenet = use_prenet - self.embedding = nn.Embedding(inp_vocab_size, embedding_dim) - if self.use_prenet: - self.prenet = Prenet(embedding_dim, prenet_depth=prenet_sizes) - - self.cbhg = CBHG( - prenet_sizes[-1] if self.use_prenet else embedding_dim, - cbhg_gru_units, - K=cbhg_filters, - projections=cbhg_projections, - ) - - layers = [] - post_cbhg_layers_units = [cbhg_gru_units] + post_cbhg_layers_units - - for i in range(1, len(post_cbhg_layers_units)): - layers.append( - nn.LSTM( - post_cbhg_layers_units[i - 1] * 2, - post_cbhg_layers_units[i], - bidirectional=True, - batch_first=True, - ) - ) - if post_cbhg_use_batch_norm: - layers.append(nn.BatchNorm1d(post_cbhg_layers_units[i] * 2)) - - self.post_cbhg_layers = nn.ModuleList(layers) - self.projections = nn.Linear(post_cbhg_layers_units[-1] * 2, targ_vocab_size) - self.post_cbhg_layers_units = post_cbhg_layers_units - self.post_cbhg_use_batch_norm = post_cbhg_use_batch_norm - - - def forward( - self, - src: torch.Tensor, - lengths: Optional[torch.Tensor] = None, - target: Optional[torch.Tensor] = None, # not required in this model - ): - """Compute forward propagation""" - - # src = [batch_size, src len] - # lengths = [batch_size] - # target = [batch_size, trg len] - - embedding_out = self.embedding(src) - # embedding_out; [batch_size, src_len, embedding_dim] - - cbhg_input = embedding_out - if self.use_prenet: - cbhg_input = self.prenet(embedding_out) - - # cbhg_input = [batch_size, src_len, prenet_sizes[-1]] - - outputs = self.cbhg(cbhg_input, lengths) - - hn = torch.zeros((2, 2, 2)) - cn = torch.zeros((2, 2, 2)) - - for i, layer in enumerate(self.post_cbhg_layers): - if isinstance(layer, nn.BatchNorm1d): - outputs = layer(outputs.permute(0, 2, 1)) - outputs = outputs.permute(0, 2, 1) - continue - if i > 0: - outputs, (hn, cn) = layer(outputs, (hn, cn)) - else: - outputs, (hn, cn) = layer(outputs) - - - predictions = self.projections(outputs) - - # predictions = [batch_size, src len, targ_vocab_size] - - output = {"diacritics": predictions} - - return output diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/live2d.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/live2d.d.ts deleted file mode 100644 index a7643f51064340637f336640554ccf92c6da12bd..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/live2d.d.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { - Live2dCoreScriptFileCallback, - Live2dFileCallback, - Live2dGameObject -} from './gameobjects/live2d/index'; - -export { - Live2dCoreScriptFileCallback, - Live2dFileCallback, - Live2dGameObject -}; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/chess/RandomSymobl.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/chess/RandomSymobl.js deleted file mode 100644 index cc8acc0dbf480e6ff875a50fe52c77e162b2462a..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/chess/RandomSymobl.js +++ /dev/null @@ -1,38 +0,0 @@ -const GetRandom = Phaser.Utils.Array.GetRandom; - -var RandomSymbol = function (board, tileX, tileY, callback, scope, excluded) { - var symbol; - if (Array.isArray(callback)) { - // pick random symbol from symbol array - var symbols = callback; - // excluded: undefined or a symbol array - if (excluded !== undefined) { - for (var i = 0, cnt = symbols.length; i < cnt; i++) { - symbol = symbols[i]; - if (excluded.indexOf(symbol) !== -1) { - continue; - } - tmpSymbolArray.push(symbol); - } - symbol = GetRandom(tmpSymbolArray); - tmpSymbolArray.length = 0; - } else { - symbol = GetRandom(symbols); - } - - } else if (typeof (obj) === 'function') { - // symbols from return of callback - if (scope) { - symbol = callback.call(scope, board, tileX, tileY, excluded); - } else { - symbol = callback(board, tileX, tileY, excluded); - } - } else { - // symbol value - symbol = callback; - } - return symbol; -} - -var tmpSymbolArray = []; -export default RandomSymbol; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.js deleted file mode 100644 index e1802b97f642d78d73c567e21872d20728956fbc..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.js +++ /dev/null @@ -1,16 +0,0 @@ -import Press from './Press.js'; -import ObjectFactory from '../ObjectFactory.js'; -import IsGameObject from '../../../plugins/utils/system/IsGameObject.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('press', function (gameObject, config) { - if (!IsGameObject(gameObject)) { - config = gameObject; - gameObject = this.scene; - } - return new Press(gameObject, config); -}); - -SetValue(window, 'RexPlugins.UI.Press', Press); - -export default Press; \ No newline at end of file diff --git a/spaces/AjulorC/question_answering_bot_deployed_with_Gradio/app.py b/spaces/AjulorC/question_answering_bot_deployed_with_Gradio/app.py deleted file mode 100644 index c033545d64482e6b61b23036a98cfa1fdbe8cc47..0000000000000000000000000000000000000000 --- a/spaces/AjulorC/question_answering_bot_deployed_with_Gradio/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import tensorflow as tf - -#!pip install transformers - -from transformers import pipeline - -# importing necessary libraries -from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering - - -tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") -model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False) - -nlp = pipeline("question-answering", model=model, tokenizer=tokenizer) - -#!pip install gradio -import gradio as gr - -# creating the function -def func(context, question): - result = nlp(question = question, context=context) - return result['answer'] - -example_1 = "(1) My name is Ajulor Christian, I am a data scientist and machine learning engineer" -qst_1 = "what is christian's profession?" - -example_2 = "(2) Natural Language Processing (NLP) allows machines to break down and interpret human language. It's at the core of tools we use every day – from translation software, chatbots, spam filters, and search engines, to grammar correction software, voice assistants, and social media monitoring tools." -qst_2 = "What is NLP used for?" - -# creating the interface -app = gr.Interface(fn=func, inputs = ['textbox', 'text'], outputs = 'textbox', - title = 'Question Answering bot', theme = 'dark-grass', - description = 'Input context and question, then get answers!', - examples = [[example_1, qst_1], - [example_2, qst_2]] - ) - -# launching the app -app.launch(inline=False) \ No newline at end of file diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/vits/modules.py b/spaces/Akmyradov/TurkmenTTSweSTT/vits/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/TurkmenTTSweSTT/vits/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/AlexWang/lama/bin/mask_example.py b/spaces/AlexWang/lama/bin/mask_example.py deleted file mode 100644 index 59e25ca8eb3ed4141851c3af284fc66285444de0..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/bin/mask_example.py +++ /dev/null @@ -1,14 +0,0 @@ -import matplotlib.pyplot as plt -from skimage import io -from skimage.transform import resize - -from saicinpainting.evaluation.masks.mask import SegmentationMask - -im = io.imread('imgs/ex4.jpg') -im = resize(im, (512, 1024), anti_aliasing=True) -mask_seg = SegmentationMask(num_variants_per_mask=10) -mask_examples = mask_seg.get_masks(im) -for i, example in enumerate(mask_examples): - plt.imshow(example) - plt.show() - plt.imsave(f'tmp/img_masks/{i}.png', example) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text_inversion.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text_inversion.md deleted file mode 100644 index 050b0ca3d40384f3060429c0e6d377820eb78cd5..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text_inversion.md +++ /dev/null @@ -1,277 +0,0 @@ - - - - -# Textual Inversion - -[Textual Inversion](https://arxiv.org/abs/2208.01618) is a technique for capturing novel concepts from a small number of example images. While the technique was originally demonstrated with a [latent diffusion model](https://github.com/CompVis/latent-diffusion), it has since been applied to other model variants like [Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/conceptual/stable_diffusion). The learned concepts can be used to better control the images generated from text-to-image pipelines. It learns new "words" in the text encoder's embedding space, which are used within text prompts for personalized image generation. - -![Textual Inversion example](https://textual-inversion.github.io/static/images/editing/colorful_teapot.JPG) -By using just 3-5 images you can teach new concepts to a model such as Stable Diffusion for personalized image generation (image source). - -This guide will show you how to train a [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) model with Textual Inversion. All the training scripts for Textual Inversion used in this guide can be found [here](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) if you're interested in taking a closer look at how things work under the hood. - - - -There is a community-created collection of trained Textual Inversion models in the [Stable Diffusion Textual Inversion Concepts Library](https://huggingface.co/sd-concepts-library) which are readily available for inference. Over time, this'll hopefully grow into a useful resource as more concepts are added! - - - -Before you begin, make sure you install the library's training dependencies: - -```bash -pip install diffusers accelerate transformers -``` - -After all the dependencies have been set up, initialize a [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: - -```bash -accelerate config -``` - -To setup a default 🤗 Accelerate environment without choosing any configurations: - -```bash -accelerate config default -``` - -Or if your environment doesn't support an interactive shell like a notebook, you can use: - -```bash -from accelerate.utils import write_basic_config - -write_basic_config() -``` - -Finally, you try and [install xFormers](https://huggingface.co/docs/diffusers/main/en/training/optimization/xformers) to reduce your memory footprint with xFormers memory-efficient attention. Once you have xFormers installed, add the `--enable_xformers_memory_efficient_attention` argument to the training script. xFormers is not supported for Flax. - -## Upload model to Hub - -If you want to store your model on the Hub, add the following argument to the training script: - -```bash ---push_to_hub -``` - -## Save and load checkpoints - -It is often a good idea to regularly save checkpoints of your model during training. This way, you can resume training from a saved checkpoint if your training is interrupted for any reason. To save a checkpoint, pass the following argument to the training script to save the full training state in a subfolder in `output_dir` every 500 steps: - -```bash ---checkpointing_steps=500 -``` - -To resume training from a saved checkpoint, pass the following argument to the training script and the specific checkpoint you'd like to resume from: - -```bash ---resume_from_checkpoint="checkpoint-1500" -``` - -## Finetuning - -For your training dataset, download these [images of a cat toy](https://huggingface.co/datasets/diffusers/cat_toy_example) and store them in a directory. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide. - -```py -from huggingface_hub import snapshot_download - -local_dir = "./cat" -snapshot_download( - "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes" -) -``` - -Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument, and the `DATA_DIR` environment variable to the path of the directory containing the images. - -Now you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py). The script creates and saves the following files to your repository: `learned_embeds.bin`, `token_identifier.txt`, and `type_of_concept.txt`. - - - -💡 A full training run takes ~1 hour on one V100 GPU. While you're waiting for the training to complete, feel free to check out [how Textual Inversion works](#how-it-works) in the section below if you're curious! - - - - - -```bash -export MODEL_NAME="runwayml/stable-diffusion-v1-5" -export DATA_DIR="./cat" - -accelerate launch textual_inversion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --learnable_property="object" \ - --placeholder_token="" --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --max_train_steps=3000 \ - --learning_rate=5.0e-04 --scale_lr \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --output_dir="textual_inversion_cat" \ - --push_to_hub -``` - - - -💡 If you want to increase the trainable capacity, you can associate your placeholder token, *e.g.* `` to -multiple embedding vectors. This can help the model to better capture the style of more (complex) images. -To enable training multiple embedding vectors, simply pass: - -```bash ---num_vectors=5 -``` - - - - -If you have access to TPUs, try out the [Flax training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py) to train even faster (this'll also work for GPUs). With the same configuration settings, the Flax training script should be at least 70% faster than the PyTorch training script! ⚡️ - -Before you begin, make sure you install the Flax specific dependencies: - -```bash -pip install -U -r requirements_flax.txt -``` - -Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument. - -Then you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py): - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export DATA_DIR="./cat" - -python textual_inversion_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$DATA_DIR \ - --learnable_property="object" \ - --placeholder_token="" --initializer_token="toy" \ - --resolution=512 \ - --train_batch_size=1 \ - --max_train_steps=3000 \ - --learning_rate=5.0e-04 --scale_lr \ - --output_dir="textual_inversion_cat" \ - --push_to_hub -``` - - - -### Intermediate logging - -If you're interested in following along with your model training progress, you can save the generated images from the training process. Add the following arguments to the training script to enable intermediate logging: - -- `validation_prompt`, the prompt used to generate samples (this is set to `None` by default and intermediate logging is disabled) -- `num_validation_images`, the number of sample images to generate -- `validation_steps`, the number of steps before generating `num_validation_images` from the `validation_prompt` - -```bash ---validation_prompt="A backpack" ---num_validation_images=4 ---validation_steps=100 -``` - -## Inference - -Once you have trained a model, you can use it for inference with the [`StableDiffusionPipeline`]. - -The textual inversion script will by default only save the textual inversion embedding vector(s) that have -been added to the text encoder embedding matrix and consequently been trained. - - - - - -💡 The community has created a large library of different textual inversion embedding vectors, called [sd-concepts-library](https://huggingface.co/sd-concepts-library). -Instead of training textual inversion embeddings from scratch you can also see whether a fitting textual inversion embedding has already been added to the libary. - - - -To load the textual inversion embeddings you first need to load the base model that was used when training -your textual inversion embedding vectors. Here we assume that [`runwayml/stable-diffusion-v1-5`](runwayml/stable-diffusion-v1-5) -was used as a base model so we load it first: -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "runwayml/stable-diffusion-v1-5" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") -``` - -Next, we need to load the textual inversion embedding vector which can be done via the [`TextualInversionLoaderMixin.load_textual_inversion`] -function. Here we'll load the embeddings of the "" example from before. -```python -pipe.load_textual_inversion("sd-concepts-library/cat-toy") -``` - -Now we can run the pipeline making sure that the placeholder token `` is used in our prompt. - -```python -prompt = "A backpack" - -image = pipe(prompt, num_inference_steps=50).images[0] -image.save("cat-backpack.png") -``` - -The function [`TextualInversionLoaderMixin.load_textual_inversion`] can not only -load textual embedding vectors saved in Diffusers' format, but also embedding vectors -saved in [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) format. -To do so, you can first download an embedding vector from [civitAI](https://civitai.com/models/3036?modelVersionId=8387) -and then load it locally: -```python -pipe.load_textual_inversion("./charturnerv2.pt") -``` - - -Currently there is no `load_textual_inversion` function for Flax so one has to make sure the textual inversion -embedding vector is saved as part of the model after training. - -The model can then be run just like any other Flax model: - -```python -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from diffusers import FlaxStableDiffusionPipeline - -model_path = "path-to-your-trained-model" -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) - -prompt = "A backpack" -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 50 - -num_samples = jax.device_count() -prompt = num_samples * [prompt] -prompt_ids = pipeline.prepare_inputs(prompt) - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -image.save("cat-backpack.png") -``` - - - -## How it works - -![Diagram from the paper showing overview](https://textual-inversion.github.io/static/images/training/training.JPG) -Architecture overview from the Textual Inversion blog post. - -Usually, text prompts are tokenized into an embedding before being passed to a model, which is often a transformer. Textual Inversion does something similar, but it learns a new token embedding, `v*`, from a special token `S*` in the diagram above. The model output is used to condition the diffusion model, which helps the diffusion model understand the prompt and new concepts from just a few example images. - -To do this, Textual Inversion uses a generator model and noisy versions of the training images. The generator tries to predict less noisy versions of the images, and the token embedding `v*` is optimized based on how well the generator does. If the token embedding successfully captures the new concept, it gives more useful information to the diffusion model and helps create clearer images with less noise. This optimization process typically occurs after several thousand steps of exposure to a variety of prompt and image variants. diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet.py deleted file mode 100644 index a34f81ff8dd9067cab082b641f01210198c4fb39..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet.py +++ /dev/null @@ -1,1002 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import tempfile -import traceback -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - ControlNetModel, - DDIMScheduler, - EulerDiscreteScheduler, - StableDiffusionControlNetPipeline, - UNet2DConditionModel, -) -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel -from diffusers.utils import load_image, load_numpy, randn_tensor, slow, torch_device -from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import ( - enable_full_determinism, - require_torch_2, - require_torch_gpu, - run_test_in_subprocess, -) - -from ..pipeline_params import ( - IMAGE_TO_IMAGE_IMAGE_PARAMS, - TEXT_TO_IMAGE_BATCH_PARAMS, - TEXT_TO_IMAGE_IMAGE_PARAMS, - TEXT_TO_IMAGE_PARAMS, -) -from ..test_pipelines_common import ( - PipelineKarrasSchedulerTesterMixin, - PipelineLatentTesterMixin, - PipelineTesterMixin, -) - - -enable_full_determinism() - - -# Will be run via run_test_in_subprocess -def _test_stable_diffusion_compile(in_queue, out_queue, timeout): - error = None - try: - _ = in_queue.get(timeout=timeout) - - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.to("cuda") - pipe.set_progress_bar_config(disable=None) - - pipe.unet.to(memory_format=torch.channels_last) - pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) - - pipe.controlnet.to(memory_format=torch.channels_last) - pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "bird" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np") - image = output.images[0] - - assert image.shape == (768, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" - ) - - assert np.abs(expected_image - image).max() < 1.0 - - except Exception: - error = f"{traceback.format_exc()}" - - results = {"error": error} - out_queue.put(results, timeout=timeout) - out_queue.join() - - -class ControlNetPipelineFastTests( - PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase -): - pipeline_class = StableDiffusionControlNetPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS - image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - torch.manual_seed(0) - controlnet = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - torch.manual_seed(0) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "controlnet": controlnet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - controlnet_embedder_scale_factor = 2 - image = randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ) - - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "image": image, - } - - return inputs - - def test_attention_slicing_forward_pass(self): - return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_attention_forwardGenerator_pass(self): - self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) - - def test_inference_batch_single_identical(self): - self._test_inference_batch_single_identical(expected_max_diff=2e-3) - - -class StableDiffusionMultiControlNetPipelineFastTests( - PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase -): - pipeline_class = StableDiffusionControlNetPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - torch.manual_seed(0) - - def init_weights(m): - if isinstance(m, torch.nn.Conv2d): - torch.nn.init.normal(m.weight) - m.bias.data.fill_(1.0) - - controlnet1 = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - controlnet1.controlnet_down_blocks.apply(init_weights) - - torch.manual_seed(0) - controlnet2 = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - controlnet2.controlnet_down_blocks.apply(init_weights) - - torch.manual_seed(0) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - controlnet = MultiControlNetModel([controlnet1, controlnet2]) - - components = { - "unet": unet, - "controlnet": controlnet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - controlnet_embedder_scale_factor = 2 - - images = [ - randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ), - randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ), - ] - - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "image": images, - } - - return inputs - - def test_control_guidance_switch(self): - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(torch_device) - - scale = 10.0 - steps = 4 - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_1 = pipe(**inputs)[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] - - # make sure that all outputs are different - assert np.sum(np.abs(output_1 - output_2)) > 1e-3 - assert np.sum(np.abs(output_1 - output_3)) > 1e-3 - assert np.sum(np.abs(output_1 - output_4)) > 1e-3 - - def test_attention_slicing_forward_pass(self): - return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_attention_forwardGenerator_pass(self): - self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) - - def test_inference_batch_single_identical(self): - self._test_inference_batch_single_identical(expected_max_diff=2e-3) - - def test_save_pretrained_raise_not_implemented_exception(self): - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - with tempfile.TemporaryDirectory() as tmpdir: - try: - # save_pretrained is not implemented for Multi-ControlNet - pipe.save_pretrained(tmpdir) - except NotImplementedError: - pass - - -class StableDiffusionMultiControlNetOneModelPipelineFastTests( - PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase -): - pipeline_class = StableDiffusionControlNetPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - torch.manual_seed(0) - - def init_weights(m): - if isinstance(m, torch.nn.Conv2d): - torch.nn.init.normal(m.weight) - m.bias.data.fill_(1.0) - - controlnet = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - controlnet.controlnet_down_blocks.apply(init_weights) - - torch.manual_seed(0) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - controlnet = MultiControlNetModel([controlnet]) - - components = { - "unet": unet, - "controlnet": controlnet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - controlnet_embedder_scale_factor = 2 - - images = [ - randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ), - ] - - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "image": images, - } - - return inputs - - def test_control_guidance_switch(self): - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(torch_device) - - scale = 10.0 - steps = 4 - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_1 = pipe(**inputs)[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_3 = pipe( - **inputs, - control_guidance_start=[0.1], - control_guidance_end=[0.2], - )[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] - - # make sure that all outputs are different - assert np.sum(np.abs(output_1 - output_2)) > 1e-3 - assert np.sum(np.abs(output_1 - output_3)) > 1e-3 - assert np.sum(np.abs(output_1 - output_4)) > 1e-3 - - def test_attention_slicing_forward_pass(self): - return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_attention_forwardGenerator_pass(self): - self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) - - def test_inference_batch_single_identical(self): - self._test_inference_batch_single_identical(expected_max_diff=2e-3) - - def test_save_pretrained_raise_not_implemented_exception(self): - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - with tempfile.TemporaryDirectory() as tmpdir: - try: - # save_pretrained is not implemented for Multi-ControlNet - pipe.save_pretrained(tmpdir) - except NotImplementedError: - pass - - -@slow -@require_torch_gpu -class ControlNetPipelineSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_canny(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "bird" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (768, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy" - ) - - assert np.abs(expected_image - image).max() < 9e-2 - - def test_depth(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "Stormtrooper's lecture" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (512, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy" - ) - - assert np.abs(expected_image - image).max() < 8e-1 - - def test_hed(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "oil painting of handsome old man, masterpiece" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (704, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy" - ) - - assert np.abs(expected_image - image).max() < 8e-2 - - def test_mlsd(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "room" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (704, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy" - ) - - assert np.abs(expected_image - image).max() < 5e-2 - - def test_normal(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "cute toy" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (512, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy" - ) - - assert np.abs(expected_image - image).max() < 5e-2 - - def test_openpose(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "Chef in the kitchen" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (768, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy" - ) - - assert np.abs(expected_image - image).max() < 8e-2 - - def test_scribble(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(5) - prompt = "bag" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (640, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy" - ) - - assert np.abs(expected_image - image).max() < 8e-2 - - def test_seg(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(5) - prompt = "house" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (512, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy" - ) - - assert np.abs(expected_image - image).max() < 8e-2 - - def test_sequential_cpu_offloading(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - pipe.enable_sequential_cpu_offload() - - prompt = "house" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" - ) - - _ = pipe( - prompt, - image, - num_inference_steps=2, - output_type="np", - ) - - mem_bytes = torch.cuda.max_memory_allocated() - # make sure that less than 7 GB is allocated - assert mem_bytes < 4 * 10**9 - - def test_canny_guess_mode(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - - output = pipe( - prompt, - image, - generator=generator, - output_type="np", - num_inference_steps=3, - guidance_scale=3.0, - guess_mode=True, - ) - - image = output.images[0] - assert image.shape == (768, 512, 3) - - image_slice = image[-3:, -3:, -1] - expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_canny_guess_mode_euler(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - - output = pipe( - prompt, - image, - generator=generator, - output_type="np", - num_inference_steps=3, - guidance_scale=3.0, - guess_mode=True, - ) - - image = output.images[0] - assert image.shape == (768, 512, 3) - - image_slice = image[-3:, -3:, -1] - expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - @require_torch_2 - def test_stable_diffusion_compile(self): - run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None) - - def test_v11_shuffle_global_pool_conditions(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "New York" - image = load_image( - "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png" - ) - - output = pipe( - prompt, - image, - generator=generator, - output_type="np", - num_inference_steps=3, - guidance_scale=7.0, - ) - - image = output.images[0] - assert image.shape == (512, 640, 3) - - image_slice = image[-3:, -3:, -1] - expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_load_local(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") - pipe_1 = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - - controlnet = ControlNetModel.from_single_file( - "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" - ) - pipe_2 = StableDiffusionControlNetPipeline.from_single_file( - "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", - safety_checker=None, - controlnet=controlnet, - ) - pipes = [pipe_1, pipe_2] - images = [] - - for pipe in pipes: - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "bird" - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - - output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) - images.append(output.images[0]) - - del pipe - gc.collect() - torch.cuda.empty_cache() - - assert np.abs(images[0] - images[1]).sum() < 1e-3 - - -@slow -@require_torch_gpu -class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_pose_and_canny(self): - controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") - controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny] - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "bird and Chef" - image_canny = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ) - image_pose = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" - ) - - output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3) - - image = output.images[0] - - assert image.shape == (768, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy" - ) - - assert np.abs(expected_image - image).max() < 5e-2 diff --git a/spaces/AnimaLab/bias-test-gpt-pairs/README.md b/spaces/AnimaLab/bias-test-gpt-pairs/README.md deleted file mode 100644 index 84e4c8e6dd9dae470d422484bc9013a67457313f..0000000000000000000000000000000000000000 --- a/spaces/AnimaLab/bias-test-gpt-pairs/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Bias Test Gpt Pairs -emoji: 🦀 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: RKocielnik/bias-test-gpt-pairs ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/training.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/training.py deleted file mode 100644 index a86ebc7cfd5ed1c45bcfb0d66eb33b3e0f2b2fcd..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/training.py +++ /dev/null @@ -1,737 +0,0 @@ -import os - -os.environ["WANDB_MODE"] = "offline" -# os.environ["WANDB_DISABLED"] = "true" - -import json -import math -import random -import shutil -import sys -import threading -import time -import traceback -from datetime import datetime -from pathlib import Path - -import gradio as gr -import torch -import transformers -from datasets import Dataset, load_dataset -from peft import ( - LoraConfig, - get_peft_model, - prepare_model_for_kbit_training, - set_peft_model_state_dict -) -from peft.utils.other import \ - TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as model_to_lora_modules -from transformers.models.auto.modeling_auto import ( - MODEL_FOR_CAUSAL_LM_MAPPING_NAMES -) - -from modules import shared, ui, utils -from modules.evaluate import ( - calculate_perplexity, - generate_markdown_table, - save_past_evaluations -) -from modules.logging_colors import logger -from modules.models import reload_model -from modules.utils import natural_keys - -MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()} -PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "overlap_len", "newline_favor_len", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to"] -WANT_INTERRUPT = False - -train_log = {} -train_template = {} - - -def create_ui(): - mu = shared.args.multi_user - with gr.Tab("Training", elem_id="training-tab"): - with gr.Tab('Train LoRA', elem_id='lora-train-tab'): - tmp = gr.State('') - with gr.Row(): - with gr.Column(): - gr.Markdown("[Tutorial](https://github.com/oobabooga/text-generation-webui/blob/main/docs/Training-LoRAs.md)") - - with gr.Row(): - copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=utils.get_available_loras(), elem_classes=['slim-dropdown'], interactive=not mu) - ui.create_refresh_button(copy_from, lambda: None, lambda: {'choices': utils.get_available_loras()}, 'refresh-button', interactive=not mu) - - with gr.Row(): - with gr.Column(scale=5): - lora_name = gr.Textbox(label='Name', info='The name of your new LoRA file') - with gr.Column(): - always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background']) - - with gr.Row(): - with gr.Column(): - lora_rank = gr.Slider(label='LoRA Rank', value=32, minimum=0, maximum=1024, step=4, info='Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.') - lora_alpha = gr.Slider(label='LoRA Alpha', value=64, minimum=0, maximum=2048, step=4, info='This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.') - batch_size = gr.Slider(label='Batch Size', value=128, minimum=0, maximum=1024, step=4, info='Global batch size. The two batch sizes together determine gradient accumulation (gradientAccum = batch / microBatch). Higher gradient accum values lead to better quality training.') - micro_batch_size = gr.Slider(label='Micro Batch Size', value=4, minimum=1, maximum=128, step=1, info='Per-device batch size (NOTE: multiple devices not yet implemented). Increasing this will increase VRAM usage.') - cutoff_len = gr.Slider(label='Cutoff Length', minimum=0, maximum=2048, value=256, step=32, info='Cutoff length for text input. Essentially, how long of a line of text to feed in at a time. Higher values require drastically more VRAM.') - - with gr.Column(): - save_steps = gr.Number(label='Save every n steps', value=0, info='If above 0, a checkpoint of the LoRA will be saved every time this many steps pass.') - - epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.') - learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.') - lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt'], info='Learning rate scheduler - defines how the learning rate changes over time. "Constant" means never change, "linear" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.', elem_classes=['slim-dropdown']) - - with gr.Accordion(label='Advanced Options', open=False): - with gr.Row(): - with gr.Column(): - lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.05, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.') - stop_at_loss = gr.Slider(label='Stop at loss', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached. (reasonable numbers are 1.5-1.8)') - optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown']) - - with gr.Column(): - warmup_steps = gr.Number(label='Warmup Steps', value=100, info='For this many steps at the start, the learning rate will be lower than normal. This helps the trainer prepare the model and precompute statistics to improve the quality of training after the start.') - train_only_after = gr.Textbox(label='Train Only After', value='', info='Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use "### Response:" to only train the response and ignore the input.') - - add_eos_token = gr.Checkbox(label='Add EOS token', value=False, info="Adds EOS token for each dataset item. In case of raw text, the EOS will be added at the Hard Cut") - - higher_rank_limit = gr.Checkbox(label='Enable higher ranks', value=False, info='If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.') - report_to = gr.Radio(label="Save detailed logs with", value="None", choices=["None", "wandb", "tensorboard"], interactive=True) - - with gr.Column(): - with gr.Tab(label='Formatted Dataset'): - with gr.Row(): - format = gr.Dropdown(choices=utils.get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'], interactive=not mu) - ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('training/formats', 'json')}, 'refresh-button', interactive=not mu) - - with gr.Row(): - dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu) - ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu) - - with gr.Row(): - eval_dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu) - ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu) - - eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.') - - with gr.Tab(label="Raw text file"): - with gr.Row(): - raw_text_file = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu) - ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'txt')}, 'refresh-button', interactive=not mu) - - with gr.Row(): - with gr.Column(): - overlap_len = gr.Slider(label='Overlap Length', minimum=0, maximum=512, value=128, step=16, info='How many tokens from the prior chunk of text to include into the next chunk. (The chunks themselves will be of a size determined by Cutoff Length). Setting overlap to exactly half the cutoff length may be ideal.') - newline_favor_len = gr.Slider(label='Prefer Newline Cut Length', minimum=0, maximum=512, value=128, step=16, info='Length (in characters, not tokens) of the maximum distance to shift an overlap cut by to ensure chunks cut at newlines. If too low, cuts may occur in the middle of lines.') - - with gr.Column(): - hard_cut_string = gr.Textbox(label='Hard Cut String', value='\\n\\n\\n', info='String that indicates a hard cut between text parts. Helps prevent unwanted overlap.') - min_chars = gr.Number(label='Ignore small blocks', value=0, info='Ignore Hard Cut blocks that have less or equal characters than this number') - - with gr.Row(): - start_button = gr.Button("Start LoRA Training", variant='primary', interactive=not mu) - stop_button = gr.Button("Interrupt", interactive=not mu) - - output = gr.Markdown(value="Ready") - - with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'): - with gr.Row(): - with gr.Column(): - models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True, interactive=not mu) - evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.', interactive=not mu) - with gr.Row(): - with gr.Column(): - stride_length = gr.Slider(label='Stride', minimum=0, maximum=32768, value=512, step=256, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.') - - with gr.Column(): - max_length = gr.Slider(label='max_length', minimum=0, maximum=32768, value=0, step=256, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.') - - with gr.Row(): - start_current_evaluation = gr.Button("Evaluate loaded model", interactive=not mu) - start_evaluation = gr.Button("Evaluate selected models", interactive=not mu) - stop_evaluation = gr.Button("Interrupt", interactive=not mu) - - with gr.Column(): - evaluation_log = gr.Markdown(value='') - - evaluation_table = gr.Dataframe(value=generate_markdown_table(), interactive=True) - with gr.Row(): - save_comments = gr.Button('Save comments', elem_classes="small-button", interactive=not mu) - refresh_table = gr.Button('Refresh the table', elem_classes="small-button", interactive=not mu) - - # Training events - all_params = [lora_name, always_override, save_steps, micro_batch_size, batch_size, epochs, learning_rate, lr_scheduler_type, lora_rank, lora_alpha, lora_dropout, cutoff_len, dataset, eval_dataset, format, eval_steps, raw_text_file, overlap_len, newline_favor_len, higher_rank_limit, warmup_steps, optimizer, hard_cut_string, train_only_after, stop_at_loss, add_eos_token, min_chars, report_to] - - copy_from.change(do_copy_params, [copy_from] + all_params, all_params) - start_button.click(do_train, all_params, output) - stop_button.click(do_interrupt, None, None, queue=False) - higher_rank_limit.change(change_rank_limit, [higher_rank_limit], [lora_rank, lora_alpha]) - - # Evaluation events. For some reason, the interrupt event - # doesn't work with the .then() syntax, so I write them one - # by one in this ugly but functional way. - ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False) - start_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False) - - start_current_evaluation.click(lambda: ['current model'], None, tmp) - ev_cur = start_current_evaluation.click(calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False) - start_current_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False) - - stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False) - refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True) - save_comments.click( - save_past_evaluations, evaluation_table, None).then( - lambda: "Comments saved.", None, evaluation_log, show_progress=False) - - -def do_interrupt(): - global WANT_INTERRUPT - WANT_INTERRUPT = True - - -def do_copy_params(lora_name: str, *args): - f_name = f"{shared.args.lora_dir}/{clean_path(None, lora_name)}/training_parameters.json" - if Path(f_name).is_file(): - with open(f_name, 'r', encoding='utf-8') as format_file: - params: dict[str, str] = json.load(format_file) - else: - params = {} - - result = list() - for i in range(0, len(PARAMETERS)): - key = PARAMETERS[i] - if key in params: - result.append(params[key]) - else: - result.append(args[i]) - - return result - - -def change_rank_limit(use_higher_ranks: bool): - mult = 2 if use_higher_ranks else 1 - return {"maximum": 1024 * mult, "__type__": "update"}, {"maximum": 2048 * mult, "__type__": "update"} - - -def clean_path(base_path: str, path: str): - """Strips unusual symbols and forcibly builds a path as relative to the intended directory.""" - path = path.replace('\\', '/').replace('..', '_') - if base_path is None: - return path - - return f'{Path(base_path).absolute()}/{path}' - - -def backup_adapter(input_folder): - # Get the creation date of the file adapter_model.bin - try: - adapter_file = Path(f"{input_folder}/adapter_model.bin") - if adapter_file.is_file(): - - logger.info("Backing up existing LoRA adapter...") - creation_date = datetime.fromtimestamp(adapter_file.stat().st_ctime) - creation_date_str = creation_date.strftime("Backup-%Y-%m-%d") - - # Create the new subfolder - subfolder_path = Path(f"{input_folder}/{creation_date_str}") - subfolder_path.mkdir(parents=True, exist_ok=True) - - # Check if the file already exists in the subfolder - backup_adapter_file = Path(f"{input_folder}/{creation_date_str}/adapter_model.bin") - if backup_adapter_file.is_file(): - print(" - Backup already exists. Skipping backup process.") - return - - # Copy existing files to the new subfolder - existing_files = Path(input_folder).iterdir() - for file in existing_files: - if file.is_file(): - shutil.copy2(file, subfolder_path) - except Exception as e: - print("An error occurred in backup_adapter:", str(e)) - - -def calc_trainable_parameters(model): - trainable_params = 0 - all_param = 0 - for _, param in model.named_parameters(): - num_params = param.numel() - # if using DS Zero 3 and the weights are initialized empty - if num_params == 0 and hasattr(param, "ds_numel"): - num_params = param.ds_numel - - all_param += num_params - if param.requires_grad: - trainable_params += num_params - - return trainable_params, all_param - - -def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str): - - if shared.args.monkey_patch: - from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import ( - replace_peft_model_with_int4_lora_model - ) - replace_peft_model_with_int4_lora_model() - - global WANT_INTERRUPT - WANT_INTERRUPT = False - - # == Input validation / processing == - yield "Preparing the input..." - lora_file_path = clean_path(None, lora_name) - if lora_file_path.strip() == '': - yield "Missing or invalid LoRA file name input." - return - - lora_file_path = f"{Path(shared.args.lora_dir)}/{lora_file_path}" - actual_lr = float(learning_rate) - model_type = type(shared.model).__name__ - - if model_type in MODEL_CLASSES: - model_id = MODEL_CLASSES[model_type] - else: - model_id = "llama" - if model_type == "PeftModelForCausalLM": - if len(shared.lora_names) > 0: - yield "You are trying to train a LoRA while you already have another LoRA loaded. This will work, but may have unexpected effects. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*" - logger.warning("Training LoRA over top of another LoRA. May have unexpected effects.") - else: - yield "Model ID not matched due to LoRA loading. Consider reloading base model. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*" - logger.warning("Model ID not matched due to LoRA loading. Consider reloading base model.") - else: - yield "LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. Unexpected errors may follow. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*" - logger.warning(f"LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. (Found model type: {model_type})") - - time.sleep(5) - - if shared.args.loader == 'GPTQ-for-LLaMa' and not shared.args.monkey_patch: - yield "LoRA training with GPTQ-for-LLaMa requires loading with `--monkey-patch`" - return - - if cutoff_len <= 0 or micro_batch_size <= 0 or batch_size <= 0 or actual_lr <= 0 or lora_rank <= 0 or lora_alpha <= 0: - yield "Cannot input zeroes." - return - - gradient_accumulation_steps = batch_size // micro_batch_size - shared.tokenizer.pad_token_id = 0 - shared.tokenizer.padding_side = "left" - - def encode(text, add_bos_token): - result = shared.tokenizer.encode(text, truncation=True, max_length=cutoff_len) - # Check if the first two tokens are BOS - if len(result) >= 2 and result[:2] == [shared.tokenizer.bos_token_id, shared.tokenizer.bos_token_id]: - result = result[1:] - - if not add_bos_token and result[0] == shared.tokenizer.bos_token_id: - result = result[1:] - return result - - def tokenize(prompt, append_eos_token=False): - - if train_only_after == '' or train_only_after not in prompt: - input_ids = encode(prompt, True) - - if append_eos_token and input_ids[-1] != shared.tokenizer.eos_token_id and len(input_ids) < cutoff_len: - input_ids.append(shared.tokenizer.eos_token_id) - - input_ids = [shared.tokenizer.pad_token_id] * (cutoff_len - len(input_ids)) + input_ids - labels = [1] * len(input_ids) - - else: - ind = prompt.index(train_only_after) + len(train_only_after) - before_tokens = encode(prompt[:ind], True) - after_tokens = encode(prompt[ind:], False) - - if append_eos_token and after_tokens[-1] != shared.tokenizer.eos_token_id: - after_tokens.append(shared.tokenizer.eos_token_id) - - full_length = len(after_tokens) + len(before_tokens) - if full_length > cutoff_len: - after_tokens = after_tokens[:cutoff_len - len(before_tokens)] - else: - before_tokens = [shared.tokenizer.pad_token_id] * (cutoff_len - full_length) + before_tokens - - input_ids = before_tokens + after_tokens - labels = [-100] * len(before_tokens) + [1] * len(after_tokens) - - input_ids = torch.tensor(input_ids) - return { - "input_ids": input_ids, - "labels": labels, - "attention_mask": input_ids.ne(shared.tokenizer.pad_token_id), - } - - train_template.clear() - - # == Prep the dataset, format, etc == - if raw_text_file not in ['None', '']: - train_template["template_type"] = "raw_text" - logger.info("Loading raw text file dataset...") - fullpath = clean_path('training/datasets', f'{raw_text_file}') - fullpath = Path(fullpath) - if fullpath.is_dir(): - logger.info('Training path directory {}'.format(raw_text_file)) - raw_text = "" - file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name)) - for file_path in file_paths: - if file_path.is_file(): - with file_path.open('r', encoding='utf-8') as file: - raw_text += file.read().replace('\r', '') - - logger.info(f"Loaded training file: {file_path.name}") - else: - with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: - raw_text = file.read().replace('\r', '') - - cut_string = hard_cut_string.replace('\\n', '\n') - eos_added = 0 - out_tokens = [] - for text_part in raw_text.split(cut_string): - if len(text_part.strip()) <= min_chars: - continue - - tokens = shared.tokenizer.encode(text_part) - if add_eos_token: - tokens.append(shared.tokenizer.eos_token_id) - eos_added += 1 - - step = cutoff_len - overlap_len - if step <= 0: - yield f"Error: overlap_len ({overlap_len}) cannot be greater than or equal to cutoff_len ({cutoff_len})" - return - - out_tokens.extend(split_chunks(tokens, cutoff_len, step)) - - if eos_added > 0: - print(f"EOS added to {eos_added} text blocks") - - del raw_text # Note: could be a gig for a large dataset, so delete redundant data as we go to be safe on RAM - text_chunks = [shared.tokenizer.decode(x) for x in out_tokens] - del out_tokens - if newline_favor_len > 0: - text_chunks = [cut_chunk_for_newline(x, newline_favor_len) for x in text_chunks] - - train_data = Dataset.from_list([tokenize(x) for x in text_chunks]) - del text_chunks - eval_data = None - else: - if dataset in ['None', '']: - yield "Missing dataset choice input, cannot continue." - return - - if format in ['None', '']: - yield "Missing format choice input, cannot continue." - return - - train_template["template_type"] = "dataset" - - with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: - format_data: dict[str, str] = json.load(formatFile) - - # == store training prompt == - for _, value in format_data.items(): - prompt_key = f"template_{len(train_template)}" - train_template[prompt_key] = value - - def generate_prompt(data_point: dict[str, str]): - for options, data in format_data.items(): - if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)): - for key, val in data_point.items(): - if type(val) is str: - data = data.replace(f'%{key}%', val) - return data - raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"') - - def generate_and_tokenize_prompt(data_point): - prompt = generate_prompt(data_point) - return tokenize(prompt, add_eos_token) - - logger.info("Loading JSON datasets...") - data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json')) - train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) - - if eval_dataset == 'None': - eval_data = None - else: - eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json')) - eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) - - # == We MUST reload model if it went through any previous training, even failed one == - if shared.model_dirty_from_training: - selected_model = shared.model_name - if selected_model: - print("\033[1;31;1m(Model has been modified by previous training, it needs to be reloaded...)\033[0;37;0m") - try: - yield f"Reloading {selected_model}..." - reload_model() - if shared.model is not None: - print("Model reloaded OK, continue with training.") - else: - return f"Failed to load {selected_model}." - except: - exc = traceback.format_exc() - logger.error('Failed to reload the model.') - print(exc) - return exc.replace('\n', '\n\n') - - # == Start prepping the model itself == - if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'): - logger.info("Getting model ready...") - prepare_model_for_kbit_training(shared.model) - - # base model is now frozen and should not be reused for any other LoRA training than this one - shared.model_dirty_from_training = True - - logger.info("Preparing for training...") - config = LoraConfig( - r=lora_rank, - lora_alpha=lora_alpha, - target_modules=model_to_lora_modules[model_id], - lora_dropout=lora_dropout, - bias="none", - task_type="CAUSAL_LM" - ) - - # == Backup the existing adapter == - if not always_override: - backup_adapter(lora_file_path) - - # == get model trainable params - model_trainable_params, model_all_params = calc_trainable_parameters(shared.model) - - try: - logger.info("Creating LoRA model...") - lora_model = get_peft_model(shared.model, config) - if not always_override and Path(f"{lora_file_path}/adapter_model.bin").is_file(): - logger.info("Loading existing LoRA data...") - state_dict_peft = torch.load(f"{lora_file_path}/adapter_model.bin") - set_peft_model_state_dict(lora_model, state_dict_peft) - except: - yield traceback.format_exc().replace('\n', '\n\n') - return - - if shared.args.monkey_patch: - from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear - from alpaca_lora_4bit.models import Linear4bitLt - for _, m in lora_model.named_modules(): - if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt): - if m.is_v1_model: - m.zeros = m.zeros.half() - m.scales = m.scales.half() - - class Tracked(): - def __init__(self): - self.current_steps = 0 - self.max_steps = 0 - self.did_save = False - - tracked = Tracked() - actual_save_steps = math.ceil(save_steps / gradient_accumulation_steps) - - class Callbacks(transformers.TrainerCallback): - def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs): - tracked.current_steps = state.global_step * gradient_accumulation_steps - tracked.max_steps = state.max_steps * gradient_accumulation_steps - if WANT_INTERRUPT: - control.should_epoch_stop = True - control.should_training_stop = True - elif state.global_step > 0 and actual_save_steps > 0 and state.global_step % actual_save_steps == 0: - lora_model.save_pretrained(f"{lora_file_path}/checkpoint-{tracked.current_steps}/") - # Save log - with open(f"{lora_file_path}/checkpoint-{tracked.current_steps}/training_log.json", 'w', encoding='utf-8') as file: - json.dump(train_log, file, indent=2) - # == Save training prompt == - with open(f"{lora_file_path}/checkpoint-{tracked.current_steps}/training_prompt.json", 'w', encoding='utf-8') as file: - json.dump(train_template, file, indent=2) - - def on_substep_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs): - tracked.current_steps += 1 - if WANT_INTERRUPT: - control.should_epoch_stop = True - control.should_training_stop = True - - def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, logs, **kwargs): - train_log.update(logs) - train_log.update({"current_steps": tracked.current_steps}) - if WANT_INTERRUPT: - print("\033[1;31;1mInterrupted by user\033[0;37;0m") - - print(f"\033[1;30;40mStep: {tracked.current_steps} \033[0;37;0m", end='') - if 'loss' in logs: - loss = float(logs['loss']) - if loss <= stop_at_loss: - control.should_epoch_stop = True - control.should_training_stop = True - print(f"\033[1;31;1mStop Loss {stop_at_loss} reached.\033[0;37;0m") - - trainer = transformers.Trainer( - model=lora_model, - train_dataset=train_data, - eval_dataset=eval_data, - args=transformers.TrainingArguments( - report_to=report_to if report_to != "None" else None, - per_device_train_batch_size=micro_batch_size, - gradient_accumulation_steps=gradient_accumulation_steps, - warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps), - num_train_epochs=epochs, - learning_rate=actual_lr, - fp16=False if shared.args.cpu else True, - optim=optimizer, - logging_steps=2 if stop_at_loss > 0 else 5, - evaluation_strategy="steps" if eval_data is not None else "no", - eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None, - save_strategy="steps" if eval_data is not None else "no", - output_dir=lora_file_path, - lr_scheduler_type=lr_scheduler_type, - load_best_model_at_end=eval_data is not None, - # TODO: Enable multi-device support - ddp_find_unused_parameters=None, - no_cuda=shared.args.cpu, - ), - data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False), - callbacks=list([Callbacks()]) - ) - - lora_model.config.use_cache = False - - if torch.__version__ >= "2" and sys.platform != "win32": - lora_model = torch.compile(lora_model) - - # == Save parameters for reuse == - with open(f"{lora_file_path}/training_parameters.json", 'w', encoding='utf-8') as file: - vars = locals() - json.dump({x: vars[x] for x in PARAMETERS}, file, indent=2) - - # == Save training prompt == - with open(f"{lora_file_path}/training_prompt.json", 'w', encoding='utf-8') as file: - json.dump(train_template, file, indent=2) - - # == Main run and monitor loop == - logger.info("Starting training...") - yield "Starting..." - - lora_trainable_param, lora_all_param = calc_trainable_parameters(lora_model) - - projections_string = ", ".join([projection.replace("_proj", "") for projection in model_to_lora_modules[model_id]]) - - print(f"Training '{model_id}' model using ({projections_string}) projections") - - if lora_all_param > 0: - print(f"Trainable params: {lora_trainable_param:,d} ({100 * lora_trainable_param / lora_all_param:.4f} %), All params: {lora_all_param:,d} (Model: {model_all_params:,d})") - - train_log.update({"base_model_name": shared.model_name}) - train_log.update({"base_model_class": shared.model.__class__.__name__}) - train_log.update({"base_loaded_in_4bit": getattr(lora_model, "is_loaded_in_4bit", False)}) - train_log.update({"base_loaded_in_8bit": getattr(lora_model, "is_loaded_in_8bit", False)}) - train_log.update({"projections": projections_string}) - - if stop_at_loss > 0: - print(f"Monitoring loss \033[1;31;1m(Auto-Stop at: {stop_at_loss})\033[0;37;0m") - - if WANT_INTERRUPT: - yield "Interrupted before start." - return - - def log_train_dataset(trainer): - decoded_entries = [] - # Try to decode the entries and write the log file - try: - # Iterate over the first 10 elements in the dataset (or fewer if there are less than 10) - for i in range(min(10, len(trainer.train_dataset))): - decoded_text = shared.tokenizer.decode(trainer.train_dataset[i]['input_ids']) - decoded_entries.append({"value": decoded_text}) - - # Write the log file - Path('logs').mkdir(exist_ok=True) - with open(Path('logs/train_dataset_sample.json'), 'w') as json_file: - json.dump(decoded_entries, json_file, indent=4) - - logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.") - except Exception as e: - logger.error(f"Failed to create log file due to error: {e}") - - def threaded_run(): - log_train_dataset(trainer) - trainer.train() - # Note: save in the thread in case the gradio thread breaks (eg browser closed) - lora_model.save_pretrained(lora_file_path) - logger.info("LoRA training run is completed and saved.") - # Save log - with open(f"{lora_file_path}/training_log.json", 'w', encoding='utf-8') as file: - json.dump(train_log, file, indent=2) - - thread = threading.Thread(target=threaded_run) - thread.start() - last_step = 0 - start_time = time.perf_counter() - - while thread.is_alive(): - time.sleep(0.5) - if WANT_INTERRUPT: - yield "Interrupting, please wait... *(Run will stop after the current training step completes.)*" - - elif tracked.current_steps != last_step: - last_step = tracked.current_steps - time_elapsed = time.perf_counter() - start_time - if time_elapsed <= 0: - timer_info = "" - total_time_estimate = 999 - else: - its = tracked.current_steps / time_elapsed - if its > 1: - timer_info = f"`{its:.2f}` it/s" - else: - timer_info = f"`{1.0/its:.2f}` s/it" - - total_time_estimate = (1.0 / its) * (tracked.max_steps) - - yield f"Running... **{tracked.current_steps}** / **{tracked.max_steps}** ... {timer_info}, {format_time(time_elapsed)} / {format_time(total_time_estimate)} ... {format_time(total_time_estimate - time_elapsed)} remaining" - - # Saving in the train thread might fail if an error occurs, so save here if so. - if not tracked.did_save: - logger.info("Training complete, saving...") - lora_model.save_pretrained(lora_file_path) - - if WANT_INTERRUPT: - logger.info("Training interrupted.") - yield f"Interrupted. Incomplete LoRA saved to `{lora_file_path}`." - else: - logger.info("Training complete!") - yield f"Done! LoRA saved to `{lora_file_path}`.\n\nBefore testing your new LoRA, make sure to first reload the model, as it is currently dirty from training." - - -def split_chunks(arr, size, step): - for i in range(0, len(arr), step): - yield arr[i:i + size] - - -def cut_chunk_for_newline(chunk: str, max_length: int): - if '\n' not in chunk: - return chunk - - first_newline = chunk.index('\n') - if first_newline < max_length: - chunk = chunk[first_newline + 1:] - - if '\n' not in chunk: - return chunk - - last_newline = chunk.rindex('\n') - if len(chunk) - last_newline < max_length: - chunk = chunk[:last_newline] - - return chunk - - -def format_time(seconds: float): - if seconds < 120: - return f"`{seconds:.0f}` seconds" - - minutes = seconds / 60 - if minutes < 120: - return f"`{minutes:.0f}` minutes" - - hours = minutes / 60 - return f"`{hours:.0f}` hours" diff --git a/spaces/AnnasBlackHat/Image-Similarity/src/model/simlarity_model.py b/spaces/AnnasBlackHat/Image-Similarity/src/model/simlarity_model.py deleted file mode 100644 index da19670541877a906c342116c2ae7f899cc9ca07..0000000000000000000000000000000000000000 --- a/spaces/AnnasBlackHat/Image-Similarity/src/model/simlarity_model.py +++ /dev/null @@ -1,9 +0,0 @@ -from dataclasses import dataclass -from .similarity_interface import SimilarityInterface - -@dataclass -class SimilarityModel: - name: str - image_size: int - model_cls: SimilarityInterface - image_input_type: str = 'array' \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/bbox.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/bbox.py deleted file mode 100644 index 0c4d58b6c91f652933974f519acd3403a833e906..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/bbox.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['bbox_overlaps']) - - -def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0): - """Calculate overlap between two set of bboxes. - - If ``aligned`` is ``False``, then calculate the ious between each bbox - of bboxes1 and bboxes2, otherwise the ious between each aligned pair of - bboxes1 and bboxes2. - - Args: - bboxes1 (Tensor): shape (m, 4) in format or empty. - bboxes2 (Tensor): shape (n, 4) in format or empty. - If aligned is ``True``, then m and n must be equal. - mode (str): "iou" (intersection over union) or iof (intersection over - foreground). - - Returns: - ious(Tensor): shape (m, n) if aligned == False else shape (m, 1) - - Example: - >>> bboxes1 = torch.FloatTensor([ - >>> [0, 0, 10, 10], - >>> [10, 10, 20, 20], - >>> [32, 32, 38, 42], - >>> ]) - >>> bboxes2 = torch.FloatTensor([ - >>> [0, 0, 10, 20], - >>> [0, 10, 10, 19], - >>> [10, 10, 20, 20], - >>> ]) - >>> bbox_overlaps(bboxes1, bboxes2) - tensor([[0.5000, 0.0000, 0.0000], - [0.0000, 0.0000, 1.0000], - [0.0000, 0.0000, 0.0000]]) - - Example: - >>> empty = torch.FloatTensor([]) - >>> nonempty = torch.FloatTensor([ - >>> [0, 0, 10, 9], - >>> ]) - >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) - >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) - >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) - """ - - mode_dict = {'iou': 0, 'iof': 1} - assert mode in mode_dict.keys() - mode_flag = mode_dict[mode] - # Either the boxes are empty or the length of boxes' last dimension is 4 - assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) - assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) - assert offset == 1 or offset == 0 - - rows = bboxes1.size(0) - cols = bboxes2.size(0) - if aligned: - assert rows == cols - - if rows * cols == 0: - return bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols) - - if aligned: - ious = bboxes1.new_zeros(rows) - else: - ious = bboxes1.new_zeros((rows, cols)) - ext_module.bbox_overlaps( - bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset) - return ious diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/windows.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/windows.py deleted file mode 100644 index e7573c3d6ae773d852da06c107c07b253d44b496..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/windows.py +++ /dev/null @@ -1,195 +0,0 @@ -from __future__ import annotations - -import ctypes -import os -import sys -from functools import lru_cache -from typing import Callable - -from .api import PlatformDirsABC - - -class Windows(PlatformDirsABC): - """`MSDN on where to store app data files - `_. - Makes use of the - `appname `, - `appauthor `, - `version `, - `roaming `, - `opinion `, - `ensure_exists `. - """ - - @property - def user_data_dir(self) -> str: - """ - :return: data directory tied to the user, e.g. - ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or - ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming) - """ - const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(get_win_folder(const)) - return self._append_parts(path) - - def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str: - params = [] - if self.appname: - if self.appauthor is not False: - author = self.appauthor or self.appname - params.append(author) - params.append(self.appname) - if opinion_value is not None and self.opinion: - params.append(opinion_value) - if self.version: - params.append(self.version) - path = os.path.join(path, *params) - self._optionally_create_directory(path) - return path - - @property - def site_data_dir(self) -> str: - """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``""" - path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) - return self._append_parts(path) - - @property - def user_config_dir(self) -> str: - """:return: config directory tied to the user, same as `user_data_dir`""" - return self.user_data_dir - - @property - def site_config_dir(self) -> str: - """:return: config directory shared by the users, same as `site_data_dir`""" - return self.site_data_dir - - @property - def user_cache_dir(self) -> str: - """ - :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. - ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` - """ - path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) - return self._append_parts(path, opinion_value="Cache") - - @property - def site_cache_dir(self) -> str: - """:return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``""" - path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")) - return self._append_parts(path, opinion_value="Cache") - - @property - def user_state_dir(self) -> str: - """:return: state directory tied to the user, same as `user_data_dir`""" - return self.user_data_dir - - @property - def user_log_dir(self) -> str: - """ - :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it - """ - path = self.user_data_dir - if self.opinion: - path = os.path.join(path, "Logs") - self._optionally_create_directory(path) - return path - - @property - def user_documents_dir(self) -> str: - """ - :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents`` - """ - return os.path.normpath(get_win_folder("CSIDL_PERSONAL")) - - @property - def user_runtime_dir(self) -> str: - """ - :return: runtime directory tied to the user, e.g. - ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname`` - """ - path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) - return self._append_parts(path) - - -def get_win_folder_from_env_vars(csidl_name: str) -> str: - """Get folder from environment variables.""" - if csidl_name == "CSIDL_PERSONAL": # does not have an environment name - return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") - - env_var_name = { - "CSIDL_APPDATA": "APPDATA", - "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE", - "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA", - }.get(csidl_name) - if env_var_name is None: - raise ValueError(f"Unknown CSIDL name: {csidl_name}") - result = os.environ.get(env_var_name) - if result is None: - raise ValueError(f"Unset environment variable: {env_var_name}") - return result - - -def get_win_folder_from_registry(csidl_name: str) -> str: - """Get folder from the registry. - - This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - "CSIDL_PERSONAL": "Personal", - }.get(csidl_name) - if shell_folder_name is None: - raise ValueError(f"Unknown CSIDL name: {csidl_name}") - if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows - raise NotImplementedError - import winreg - - key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders") - directory, _ = winreg.QueryValueEx(key, shell_folder_name) - return str(directory) - - -def get_win_folder_via_ctypes(csidl_name: str) -> str: - """Get folder with ctypes.""" - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - "CSIDL_PERSONAL": 5, - }.get(csidl_name) - if csidl_const is None: - raise ValueError(f"Unknown CSIDL name: {csidl_name}") - - buf = ctypes.create_unicode_buffer(1024) - windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker - windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if it has highbit chars. - if any(ord(c) > 255 for c in buf): - buf2 = ctypes.create_unicode_buffer(1024) - if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - - -def _pick_get_win_folder() -> Callable[[str], str]: - if hasattr(ctypes, "windll"): - return get_win_folder_via_ctypes - try: - import winreg # noqa: F401 - except ImportError: - return get_win_folder_from_env_vars - else: - return get_win_folder_from_registry - - -get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder()) - -__all__ = [ - "Windows", -] diff --git a/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_l.py b/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_l.py deleted file mode 100644 index 50833ca38c51fe9ac5e327d7c1c0561fb62249aa..0000000000000000000000000000000000000000 --- a/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_l.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import os - -from yolox.exp import Exp as MyExp - - -class Exp(MyExp): - def __init__(self): - super(Exp, self).__init__() - self.depth = 1.0 - self.width = 1.0 - self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] diff --git a/spaces/Audio-AGI/AudioSep/data/audiotext_dataset.py b/spaces/Audio-AGI/AudioSep/data/audiotext_dataset.py deleted file mode 100644 index 1e9cc2c037516f7768ffca8d8083d137a4879dba..0000000000000000000000000000000000000000 --- a/spaces/Audio-AGI/AudioSep/data/audiotext_dataset.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -import random -import torch -import torchaudio -from torch.utils.data import Dataset - - -class AudioTextDataset(Dataset): - """Can sample data from audio-text databases - Params: - sampling_rate: audio sampling rate - max_clip_len: max length (seconds) of audio clip to be sampled - """ - def __init__( - self, - datafiles=[''], - sampling_rate=32000, - max_clip_len=5, - ): - all_data_json = [] - for datafile in datafiles: - with open(datafile, 'r') as fp: - data_json = json.load(fp)['data'] - all_data_json.extend(data_json) - self.all_data_json = all_data_json - - self.sampling_rate = sampling_rate - self.max_length = max_clip_len * sampling_rate - - def __len__(self): - return len(self.all_data_json) - - def _cut_or_randomcrop(self, waveform): - # waveform: [1, samples] - # random crop - if waveform.size(1) > self.max_length: - random_idx = random.randint(0, waveform.size(1)-self.max_length) - waveform = waveform[:, random_idx:random_idx+self.max_length] - else: - temp_wav = torch.zeros(1, self.max_length) - temp_wav[:, 0:waveform.size(1)] = waveform - waveform = temp_wav - - assert waveform.size(1) == self.max_length, \ - f"number of audio samples is {waveform.size(1)}" - - return waveform - - def _read_audio(self, index): - try: - audio_path = self.all_data_json[index]['wav'] - audio_data, audio_rate = torchaudio.load(audio_path, channels_first=True) - text = self.all_data_json[index]['caption'] - - # drop short utterance - if audio_data.size(1) < self.sampling_rate * 1: - raise Exception(f'{audio_path} is too short, drop it ...') - - return text, audio_data, audio_rate - - except Exception as e: - print(f'error: {e} occurs, when loading {audio_path}') - random_index = random.randint(0, len(self.all_data_json)-1) - return self._read_audio(index=random_index) - - def __getitem__(self, index): - # create a audio tensor - text, audio_data, audio_rate = self._read_audio(index) - audio_len = audio_data.shape[1] / audio_rate - # convert stero to single channel - if audio_data.shape[0] > 1: - # audio_data: [samples] - audio_data = (audio_data[0] + audio_data[1]) / 2 - else: - audio_data = audio_data.squeeze(0) - - # resample audio clip - if audio_rate != self.sampling_rate: - audio_data = torchaudio.functional.resample(audio_data, orig_freq=audio_rate, new_freq=self.sampling_rate) - - audio_data = audio_data.unsqueeze(0) - - audio_data = self._cut_or_randomcrop(audio_data) - - data_dict = { - 'text': text, - 'waveform': audio_data, - 'modality': 'audio_text' - } - - return data_dict diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/__init__.py deleted file mode 100644 index 259f669b78bd05815cb8d3351fd6c5fc9a1b85a1..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from . import transforms # isort:skip - -from .build import ( - build_batch_data_loader, - build_detection_test_loader, - build_detection_train_loader, - get_detection_dataset_dicts, - load_proposals_into_dataset, - print_instances_class_histogram, -) -from .catalog import DatasetCatalog, MetadataCatalog, Metadata -from .common import DatasetFromList, MapDataset, ToIterableDataset -from .dataset_mapper import DatasetMapper - -# ensure the builtin datasets are registered -from . import datasets, samplers # isort:skip - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/Bart92/RVC_HF/lib/infer_pack/commons.py b/spaces/Bart92/RVC_HF/lib/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/lib/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/BetterAPI/BetterChat/svelte.config.js b/spaces/BetterAPI/BetterChat/svelte.config.js deleted file mode 100644 index b856102c926a34f6bc655c8fbbc0f6acb9b939da..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat/svelte.config.js +++ /dev/null @@ -1,26 +0,0 @@ -import adapter from "@sveltejs/adapter-node"; -import { vitePreprocess } from "@sveltejs/kit/vite"; -import dotenv from "dotenv"; -import pkg from "./package.json" assert { type: "json" }; - -dotenv.config({ path: "./.env.local" }); -dotenv.config({ path: "./.env" }); - -process.env.PUBLIC_VERSION = pkg.version.replace(/\.0\b/g, ""); - -/** @type {import('@sveltejs/kit').Config} */ -const config = { - // Consult https://kit.svelte.dev/docs/integrations#preprocessors - // for more information about preprocessors - preprocess: vitePreprocess(), - - kit: { - adapter: adapter(), - - paths: { - base: process.env.APP_BASE || "", - }, - }, -}; - -export default config; diff --git a/spaces/BetterAPI/BetterChat_new/src/lib/actions/snapScrollToBottom.ts b/spaces/BetterAPI/BetterChat_new/src/lib/actions/snapScrollToBottom.ts deleted file mode 100644 index 0d9335466b5cd41ff49b8a7e6ed42c37c7562955..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat_new/src/lib/actions/snapScrollToBottom.ts +++ /dev/null @@ -1,54 +0,0 @@ -import { navigating } from "$app/stores"; -import { tick } from "svelte"; -import { get } from "svelte/store"; - -const detachedOffset = 10; - -/** - * @param node element to snap scroll to bottom - * @param dependency pass in a dependency to update scroll on changes. - */ -export const snapScrollToBottom = (node: HTMLElement, dependency: any) => { - let prevScrollValue = node.scrollTop; - let isDetached = false; - - const handleScroll = () => { - // if user scrolled up, we detach - if (node.scrollTop < prevScrollValue) { - isDetached = true; - } - - // if user scrolled back to within 10px of bottom, we reattach - if (node.scrollTop - (node.scrollHeight - node.clientHeight) >= -detachedOffset) { - isDetached = false; - } - - prevScrollValue = node.scrollTop; - }; - - const updateScroll = async (_options: { force?: boolean } = {}) => { - const defaultOptions = { force: false }; - const options = { ...defaultOptions, ..._options }; - const { force } = options; - - if (!force && isDetached && !get(navigating)) return; - - // wait for next tick to ensure that the DOM is updated - await tick(); - - node.scrollTo({ top: node.scrollHeight }); - }; - - node.addEventListener("scroll", handleScroll); - - if (dependency) { - updateScroll({ force: true }); - } - - return { - update: updateScroll, - destroy: () => { - node.removeEventListener("scroll", handleScroll); - }, - }; -}; diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distro/distro.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distro/distro.py deleted file mode 100644 index 89e1868047225bbcdfe04bdc4bea3281bf91bc20..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distro/distro.py +++ /dev/null @@ -1,1399 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015,2016,2017 Nir Cohen -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The ``distro`` package (``distro`` stands for Linux Distribution) provides -information about the Linux distribution it runs on, such as a reliable -machine-readable distro ID, or version information. - -It is the recommended replacement for Python's original -:py:func:`platform.linux_distribution` function, but it provides much more -functionality. An alternative implementation became necessary because Python -3.5 deprecated this function, and Python 3.8 removed it altogether. Its -predecessor function :py:func:`platform.dist` was already deprecated since -Python 2.6 and removed in Python 3.8. Still, there are many cases in which -access to OS distribution information is needed. See `Python issue 1322 -`_ for more information. -""" - -import argparse -import json -import logging -import os -import re -import shlex -import subprocess -import sys -import warnings -from typing import ( - Any, - Callable, - Dict, - Iterable, - Optional, - Sequence, - TextIO, - Tuple, - Type, -) - -try: - from typing import TypedDict -except ImportError: - # Python 3.7 - TypedDict = dict - -__version__ = "1.8.0" - - -class VersionDict(TypedDict): - major: str - minor: str - build_number: str - - -class InfoDict(TypedDict): - id: str - version: str - version_parts: VersionDict - like: str - codename: str - - -_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") -_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") -_OS_RELEASE_BASENAME = "os-release" - -#: Translation table for normalizing the "ID" attribute defined in os-release -#: files, for use by the :func:`distro.id` method. -#: -#: * Key: Value as defined in the os-release file, translated to lower case, -#: with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_OS_ID = { - "ol": "oracle", # Oracle Linux - "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap -} - -#: Translation table for normalizing the "Distributor ID" attribute returned by -#: the lsb_release command, for use by the :func:`distro.id` method. -#: -#: * Key: Value as returned by the lsb_release command, translated to lower -#: case, with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_LSB_ID = { - "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 - "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 - "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation - "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server - "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode -} - -#: Translation table for normalizing the distro ID derived from the file name -#: of distro release files, for use by the :func:`distro.id` method. -#: -#: * Key: Value as derived from the file name of a distro release file, -#: translated to lower case, with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_DISTRO_ID = { - "redhat": "rhel", # RHEL 6.x, 7.x -} - -# Pattern for content of distro release file (reversed) -_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( - r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" -) - -# Pattern for base file name of distro release file -_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") - -# Base file names to be looked up for if _UNIXCONFDIR is not readable. -_DISTRO_RELEASE_BASENAMES = [ - "SuSE-release", - "arch-release", - "base-release", - "centos-release", - "fedora-release", - "gentoo-release", - "mageia-release", - "mandrake-release", - "mandriva-release", - "mandrivalinux-release", - "manjaro-release", - "oracle-release", - "redhat-release", - "rocky-release", - "sl-release", - "slackware-version", -] - -# Base file names to be ignored when searching for distro release file -_DISTRO_RELEASE_IGNORE_BASENAMES = ( - "debian_version", - "lsb-release", - "oem-release", - _OS_RELEASE_BASENAME, - "system-release", - "plesk-release", - "iredmail-release", -) - - -def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: - """ - .. deprecated:: 1.6.0 - - :func:`distro.linux_distribution()` is deprecated. It should only be - used as a compatibility shim with Python's - :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, - :func:`distro.version` and :func:`distro.name` instead. - - Return information about the current OS distribution as a tuple - ``(id_name, version, codename)`` with items as follows: - - * ``id_name``: If *full_distribution_name* is false, the result of - :func:`distro.id`. Otherwise, the result of :func:`distro.name`. - - * ``version``: The result of :func:`distro.version`. - - * ``codename``: The extra item (usually in parentheses) after the - os-release version number, or the result of :func:`distro.codename`. - - The interface of this function is compatible with the original - :py:func:`platform.linux_distribution` function, supporting a subset of - its parameters. - - The data it returns may not exactly be the same, because it uses more data - sources than the original function, and that may lead to different data if - the OS distribution is not consistent across multiple data sources it - provides (there are indeed such distributions ...). - - Another reason for differences is the fact that the :func:`distro.id` - method normalizes the distro ID string to a reliable machine-readable value - for a number of popular OS distributions. - """ - warnings.warn( - "distro.linux_distribution() is deprecated. It should only be used as a " - "compatibility shim with Python's platform.linux_distribution(). Please use " - "distro.id(), distro.version() and distro.name() instead.", - DeprecationWarning, - stacklevel=2, - ) - return _distro.linux_distribution(full_distribution_name) - - -def id() -> str: - """ - Return the distro ID of the current distribution, as a - machine-readable string. - - For a number of OS distributions, the returned distro ID value is - *reliable*, in the sense that it is documented and that it does not change - across releases of the distribution. - - This package maintains the following reliable distro ID values: - - ============== ========================================= - Distro ID Distribution - ============== ========================================= - "ubuntu" Ubuntu - "debian" Debian - "rhel" RedHat Enterprise Linux - "centos" CentOS - "fedora" Fedora - "sles" SUSE Linux Enterprise Server - "opensuse" openSUSE - "amzn" Amazon Linux - "arch" Arch Linux - "buildroot" Buildroot - "cloudlinux" CloudLinux OS - "exherbo" Exherbo Linux - "gentoo" GenToo Linux - "ibm_powerkvm" IBM PowerKVM - "kvmibm" KVM for IBM z Systems - "linuxmint" Linux Mint - "mageia" Mageia - "mandriva" Mandriva Linux - "parallels" Parallels - "pidora" Pidora - "raspbian" Raspbian - "oracle" Oracle Linux (and Oracle Enterprise Linux) - "scientific" Scientific Linux - "slackware" Slackware - "xenserver" XenServer - "openbsd" OpenBSD - "netbsd" NetBSD - "freebsd" FreeBSD - "midnightbsd" MidnightBSD - "rocky" Rocky Linux - "aix" AIX - "guix" Guix System - ============== ========================================= - - If you have a need to get distros for reliable IDs added into this set, - or if you find that the :func:`distro.id` function returns a different - distro ID for one of the listed distros, please create an issue in the - `distro issue tracker`_. - - **Lookup hierarchy and transformations:** - - First, the ID is obtained from the following sources, in the specified - order. The first available and non-empty value is used: - - * the value of the "ID" attribute of the os-release file, - - * the value of the "Distributor ID" attribute returned by the lsb_release - command, - - * the first part of the file name of the distro release file, - - The so determined ID value then passes the following transformations, - before it is returned by this method: - - * it is translated to lower case, - - * blanks (which should not be there anyway) are translated to underscores, - - * a normalization of the ID is performed, based upon - `normalization tables`_. The purpose of this normalization is to ensure - that the ID is as reliable as possible, even across incompatible changes - in the OS distributions. A common reason for an incompatible change is - the addition of an os-release file, or the addition of the lsb_release - command, with ID values that differ from what was previously determined - from the distro release file name. - """ - return _distro.id() - - -def name(pretty: bool = False) -> str: - """ - Return the name of the current OS distribution, as a human-readable - string. - - If *pretty* is false, the name is returned without version or codename. - (e.g. "CentOS Linux") - - If *pretty* is true, the version and codename are appended. - (e.g. "CentOS Linux 7.1.1503 (Core)") - - **Lookup hierarchy:** - - The name is obtained from the following sources, in the specified order. - The first available and non-empty value is used: - - * If *pretty* is false: - - - the value of the "NAME" attribute of the os-release file, - - - the value of the "Distributor ID" attribute returned by the lsb_release - command, - - - the value of the "" field of the distro release file. - - * If *pretty* is true: - - - the value of the "PRETTY_NAME" attribute of the os-release file, - - - the value of the "Description" attribute returned by the lsb_release - command, - - - the value of the "" field of the distro release file, appended - with the value of the pretty version ("" and "" - fields) of the distro release file, if available. - """ - return _distro.name(pretty) - - -def version(pretty: bool = False, best: bool = False) -> str: - """ - Return the version of the current OS distribution, as a human-readable - string. - - If *pretty* is false, the version is returned without codename (e.g. - "7.0"). - - If *pretty* is true, the codename in parenthesis is appended, if the - codename is non-empty (e.g. "7.0 (Maipo)"). - - Some distributions provide version numbers with different precisions in - the different sources of distribution information. Examining the different - sources in a fixed priority order does not always yield the most precise - version (e.g. for Debian 8.2, or CentOS 7.1). - - Some other distributions may not provide this kind of information. In these - cases, an empty string would be returned. This behavior can be observed - with rolling releases distributions (e.g. Arch Linux). - - The *best* parameter can be used to control the approach for the returned - version: - - If *best* is false, the first non-empty version number in priority order of - the examined sources is returned. - - If *best* is true, the most precise version number out of all examined - sources is returned. - - **Lookup hierarchy:** - - In all cases, the version number is obtained from the following sources. - If *best* is false, this order represents the priority order: - - * the value of the "VERSION_ID" attribute of the os-release file, - * the value of the "Release" attribute returned by the lsb_release - command, - * the version number parsed from the "" field of the first line - of the distro release file, - * the version number parsed from the "PRETTY_NAME" attribute of the - os-release file, if it follows the format of the distro release files. - * the version number parsed from the "Description" attribute returned by - the lsb_release command, if it follows the format of the distro release - files. - """ - return _distro.version(pretty, best) - - -def version_parts(best: bool = False) -> Tuple[str, str, str]: - """ - Return the version of the current OS distribution as a tuple - ``(major, minor, build_number)`` with items as follows: - - * ``major``: The result of :func:`distro.major_version`. - - * ``minor``: The result of :func:`distro.minor_version`. - - * ``build_number``: The result of :func:`distro.build_number`. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.version_parts(best) - - -def major_version(best: bool = False) -> str: - """ - Return the major version of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The major version is the first - part of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.major_version(best) - - -def minor_version(best: bool = False) -> str: - """ - Return the minor version of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The minor version is the second - part of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.minor_version(best) - - -def build_number(best: bool = False) -> str: - """ - Return the build number of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The build number is the third part - of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.build_number(best) - - -def like() -> str: - """ - Return a space-separated list of distro IDs of distributions that are - closely related to the current OS distribution in regards to packaging - and programming interfaces, for example distributions the current - distribution is a derivative from. - - **Lookup hierarchy:** - - This information item is only provided by the os-release file. - For details, see the description of the "ID_LIKE" attribute in the - `os-release man page - `_. - """ - return _distro.like() - - -def codename() -> str: - """ - Return the codename for the release of the current OS distribution, - as a string. - - If the distribution does not have a codename, an empty string is returned. - - Note that the returned codename is not always really a codename. For - example, openSUSE returns "x86_64". This function does not handle such - cases in any special way and just returns the string it finds, if any. - - **Lookup hierarchy:** - - * the codename within the "VERSION" attribute of the os-release file, if - provided, - - * the value of the "Codename" attribute returned by the lsb_release - command, - - * the value of the "" field of the distro release file. - """ - return _distro.codename() - - -def info(pretty: bool = False, best: bool = False) -> InfoDict: - """ - Return certain machine-readable information items about the current OS - distribution in a dictionary, as shown in the following example: - - .. sourcecode:: python - - { - 'id': 'rhel', - 'version': '7.0', - 'version_parts': { - 'major': '7', - 'minor': '0', - 'build_number': '' - }, - 'like': 'fedora', - 'codename': 'Maipo' - } - - The dictionary structure and keys are always the same, regardless of which - information items are available in the underlying data sources. The values - for the various keys are as follows: - - * ``id``: The result of :func:`distro.id`. - - * ``version``: The result of :func:`distro.version`. - - * ``version_parts -> major``: The result of :func:`distro.major_version`. - - * ``version_parts -> minor``: The result of :func:`distro.minor_version`. - - * ``version_parts -> build_number``: The result of - :func:`distro.build_number`. - - * ``like``: The result of :func:`distro.like`. - - * ``codename``: The result of :func:`distro.codename`. - - For a description of the *pretty* and *best* parameters, see the - :func:`distro.version` method. - """ - return _distro.info(pretty, best) - - -def os_release_info() -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information items - from the os-release file data source of the current OS distribution. - - See `os-release file`_ for details about these information items. - """ - return _distro.os_release_info() - - -def lsb_release_info() -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information items - from the lsb_release command data source of the current OS distribution. - - See `lsb_release command output`_ for details about these information - items. - """ - return _distro.lsb_release_info() - - -def distro_release_info() -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information items - from the distro release file data source of the current OS distribution. - - See `distro release file`_ for details about these information items. - """ - return _distro.distro_release_info() - - -def uname_info() -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information items - from the distro release file data source of the current OS distribution. - """ - return _distro.uname_info() - - -def os_release_attr(attribute: str) -> str: - """ - Return a single named information item from the os-release file data source - of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `os-release file`_ for details about these information items. - """ - return _distro.os_release_attr(attribute) - - -def lsb_release_attr(attribute: str) -> str: - """ - Return a single named information item from the lsb_release command output - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `lsb_release command output`_ for details about these information - items. - """ - return _distro.lsb_release_attr(attribute) - - -def distro_release_attr(attribute: str) -> str: - """ - Return a single named information item from the distro release file - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `distro release file`_ for details about these information items. - """ - return _distro.distro_release_attr(attribute) - - -def uname_attr(attribute: str) -> str: - """ - Return a single named information item from the distro release file - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - """ - return _distro.uname_attr(attribute) - - -try: - from functools import cached_property -except ImportError: - # Python < 3.8 - class cached_property: # type: ignore - """A version of @property which caches the value. On access, it calls the - underlying function and sets the value in `__dict__` so future accesses - will not re-call the property. - """ - - def __init__(self, f: Callable[[Any], Any]) -> None: - self._fname = f.__name__ - self._f = f - - def __get__(self, obj: Any, owner: Type[Any]) -> Any: - assert obj is not None, f"call {self._fname} on an instance" - ret = obj.__dict__[self._fname] = self._f(obj) - return ret - - -class LinuxDistribution: - """ - Provides information about a OS distribution. - - This package creates a private module-global instance of this class with - default initialization arguments, that is used by the - `consolidated accessor functions`_ and `single source accessor functions`_. - By using default initialization arguments, that module-global instance - returns data about the current OS distribution (i.e. the distro this - package runs on). - - Normally, it is not necessary to create additional instances of this class. - However, in situations where control is needed over the exact data sources - that are used, instances of this class can be created with a specific - distro release file, or a specific os-release file, or without invoking the - lsb_release command. - """ - - def __init__( - self, - include_lsb: Optional[bool] = None, - os_release_file: str = "", - distro_release_file: str = "", - include_uname: Optional[bool] = None, - root_dir: Optional[str] = None, - include_oslevel: Optional[bool] = None, - ) -> None: - """ - The initialization method of this class gathers information from the - available data sources, and stores that in private instance attributes. - Subsequent access to the information items uses these private instance - attributes, so that the data sources are read only once. - - Parameters: - - * ``include_lsb`` (bool): Controls whether the - `lsb_release command output`_ is included as a data source. - - If the lsb_release command is not available in the program execution - path, the data source for the lsb_release command will be empty. - - * ``os_release_file`` (string): The path name of the - `os-release file`_ that is to be used as a data source. - - An empty string (the default) will cause the default path name to - be used (see `os-release file`_ for details). - - If the specified or defaulted os-release file does not exist, the - data source for the os-release file will be empty. - - * ``distro_release_file`` (string): The path name of the - `distro release file`_ that is to be used as a data source. - - An empty string (the default) will cause a default search algorithm - to be used (see `distro release file`_ for details). - - If the specified distro release file does not exist, or if no default - distro release file can be found, the data source for the distro - release file will be empty. - - * ``include_uname`` (bool): Controls whether uname command output is - included as a data source. If the uname command is not available in - the program execution path the data source for the uname command will - be empty. - - * ``root_dir`` (string): The absolute path to the root directory to use - to find distro-related information files. Note that ``include_*`` - parameters must not be enabled in combination with ``root_dir``. - - * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command - output is included as a data source. If the oslevel command is not - available in the program execution path the data source will be - empty. - - Public instance attributes: - - * ``os_release_file`` (string): The path name of the - `os-release file`_ that is actually used as a data source. The - empty string if no distro release file is used as a data source. - - * ``distro_release_file`` (string): The path name of the - `distro release file`_ that is actually used as a data source. The - empty string if no distro release file is used as a data source. - - * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. - This controls whether the lsb information will be loaded. - - * ``include_uname`` (bool): The result of the ``include_uname`` - parameter. This controls whether the uname information will - be loaded. - - * ``include_oslevel`` (bool): The result of the ``include_oslevel`` - parameter. This controls whether (AIX) oslevel information will be - loaded. - - * ``root_dir`` (string): The result of the ``root_dir`` parameter. - The absolute path to the root directory to use to find distro-related - information files. - - Raises: - - * :py:exc:`ValueError`: Initialization parameters combination is not - supported. - - * :py:exc:`OSError`: Some I/O issue with an os-release file or distro - release file. - - * :py:exc:`UnicodeError`: A data source has unexpected characters or - uses an unexpected encoding. - """ - self.root_dir = root_dir - self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR - self.usr_lib_dir = ( - os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR - ) - - if os_release_file: - self.os_release_file = os_release_file - else: - etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) - usr_lib_os_release_file = os.path.join( - self.usr_lib_dir, _OS_RELEASE_BASENAME - ) - - # NOTE: The idea is to respect order **and** have it set - # at all times for API backwards compatibility. - if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( - usr_lib_os_release_file - ): - self.os_release_file = etc_dir_os_release_file - else: - self.os_release_file = usr_lib_os_release_file - - self.distro_release_file = distro_release_file or "" # updated later - - is_root_dir_defined = root_dir is not None - if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): - raise ValueError( - "Including subprocess data sources from specific root_dir is disallowed" - " to prevent false information" - ) - self.include_lsb = ( - include_lsb if include_lsb is not None else not is_root_dir_defined - ) - self.include_uname = ( - include_uname if include_uname is not None else not is_root_dir_defined - ) - self.include_oslevel = ( - include_oslevel if include_oslevel is not None else not is_root_dir_defined - ) - - def __repr__(self) -> str: - """Return repr of all info""" - return ( - "LinuxDistribution(" - "os_release_file={self.os_release_file!r}, " - "distro_release_file={self.distro_release_file!r}, " - "include_lsb={self.include_lsb!r}, " - "include_uname={self.include_uname!r}, " - "include_oslevel={self.include_oslevel!r}, " - "root_dir={self.root_dir!r}, " - "_os_release_info={self._os_release_info!r}, " - "_lsb_release_info={self._lsb_release_info!r}, " - "_distro_release_info={self._distro_release_info!r}, " - "_uname_info={self._uname_info!r}, " - "_oslevel_info={self._oslevel_info!r})".format(self=self) - ) - - def linux_distribution( - self, full_distribution_name: bool = True - ) -> Tuple[str, str, str]: - """ - Return information about the OS distribution that is compatible - with Python's :func:`platform.linux_distribution`, supporting a subset - of its parameters. - - For details, see :func:`distro.linux_distribution`. - """ - return ( - self.name() if full_distribution_name else self.id(), - self.version(), - self._os_release_info.get("release_codename") or self.codename(), - ) - - def id(self) -> str: - """Return the distro ID of the OS distribution, as a string. - - For details, see :func:`distro.id`. - """ - - def normalize(distro_id: str, table: Dict[str, str]) -> str: - distro_id = distro_id.lower().replace(" ", "_") - return table.get(distro_id, distro_id) - - distro_id = self.os_release_attr("id") - if distro_id: - return normalize(distro_id, NORMALIZED_OS_ID) - - distro_id = self.lsb_release_attr("distributor_id") - if distro_id: - return normalize(distro_id, NORMALIZED_LSB_ID) - - distro_id = self.distro_release_attr("id") - if distro_id: - return normalize(distro_id, NORMALIZED_DISTRO_ID) - - distro_id = self.uname_attr("id") - if distro_id: - return normalize(distro_id, NORMALIZED_DISTRO_ID) - - return "" - - def name(self, pretty: bool = False) -> str: - """ - Return the name of the OS distribution, as a string. - - For details, see :func:`distro.name`. - """ - name = ( - self.os_release_attr("name") - or self.lsb_release_attr("distributor_id") - or self.distro_release_attr("name") - or self.uname_attr("name") - ) - if pretty: - name = self.os_release_attr("pretty_name") or self.lsb_release_attr( - "description" - ) - if not name: - name = self.distro_release_attr("name") or self.uname_attr("name") - version = self.version(pretty=True) - if version: - name = f"{name} {version}" - return name or "" - - def version(self, pretty: bool = False, best: bool = False) -> str: - """ - Return the version of the OS distribution, as a string. - - For details, see :func:`distro.version`. - """ - versions = [ - self.os_release_attr("version_id"), - self.lsb_release_attr("release"), - self.distro_release_attr("version_id"), - self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( - "version_id", "" - ), - self._parse_distro_release_content( - self.lsb_release_attr("description") - ).get("version_id", ""), - self.uname_attr("release"), - ] - if self.uname_attr("id").startswith("aix"): - # On AIX platforms, prefer oslevel command output. - versions.insert(0, self.oslevel_info()) - elif self.id() == "debian" or "debian" in self.like().split(): - # On Debian-like, add debian_version file content to candidates list. - versions.append(self._debian_version) - version = "" - if best: - # This algorithm uses the last version in priority order that has - # the best precision. If the versions are not in conflict, that - # does not matter; otherwise, using the last one instead of the - # first one might be considered a surprise. - for v in versions: - if v.count(".") > version.count(".") or version == "": - version = v - else: - for v in versions: - if v != "": - version = v - break - if pretty and version and self.codename(): - version = f"{version} ({self.codename()})" - return version - - def version_parts(self, best: bool = False) -> Tuple[str, str, str]: - """ - Return the version of the OS distribution, as a tuple of version - numbers. - - For details, see :func:`distro.version_parts`. - """ - version_str = self.version(best=best) - if version_str: - version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") - matches = version_regex.match(version_str) - if matches: - major, minor, build_number = matches.groups() - return major, minor or "", build_number or "" - return "", "", "" - - def major_version(self, best: bool = False) -> str: - """ - Return the major version number of the current distribution. - - For details, see :func:`distro.major_version`. - """ - return self.version_parts(best)[0] - - def minor_version(self, best: bool = False) -> str: - """ - Return the minor version number of the current distribution. - - For details, see :func:`distro.minor_version`. - """ - return self.version_parts(best)[1] - - def build_number(self, best: bool = False) -> str: - """ - Return the build number of the current distribution. - - For details, see :func:`distro.build_number`. - """ - return self.version_parts(best)[2] - - def like(self) -> str: - """ - Return the IDs of distributions that are like the OS distribution. - - For details, see :func:`distro.like`. - """ - return self.os_release_attr("id_like") or "" - - def codename(self) -> str: - """ - Return the codename of the OS distribution. - - For details, see :func:`distro.codename`. - """ - try: - # Handle os_release specially since distros might purposefully set - # this to empty string to have no codename - return self._os_release_info["codename"] - except KeyError: - return ( - self.lsb_release_attr("codename") - or self.distro_release_attr("codename") - or "" - ) - - def info(self, pretty: bool = False, best: bool = False) -> InfoDict: - """ - Return certain machine-readable information about the OS - distribution. - - For details, see :func:`distro.info`. - """ - return dict( - id=self.id(), - version=self.version(pretty, best), - version_parts=dict( - major=self.major_version(best), - minor=self.minor_version(best), - build_number=self.build_number(best), - ), - like=self.like(), - codename=self.codename(), - ) - - def os_release_info(self) -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information - items from the os-release file data source of the OS distribution. - - For details, see :func:`distro.os_release_info`. - """ - return self._os_release_info - - def lsb_release_info(self) -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information - items from the lsb_release command data source of the OS - distribution. - - For details, see :func:`distro.lsb_release_info`. - """ - return self._lsb_release_info - - def distro_release_info(self) -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information - items from the distro release file data source of the OS - distribution. - - For details, see :func:`distro.distro_release_info`. - """ - return self._distro_release_info - - def uname_info(self) -> Dict[str, str]: - """ - Return a dictionary containing key-value pairs for the information - items from the uname command data source of the OS distribution. - - For details, see :func:`distro.uname_info`. - """ - return self._uname_info - - def oslevel_info(self) -> str: - """ - Return AIX' oslevel command output. - """ - return self._oslevel_info - - def os_release_attr(self, attribute: str) -> str: - """ - Return a single named information item from the os-release file data - source of the OS distribution. - - For details, see :func:`distro.os_release_attr`. - """ - return self._os_release_info.get(attribute, "") - - def lsb_release_attr(self, attribute: str) -> str: - """ - Return a single named information item from the lsb_release command - output data source of the OS distribution. - - For details, see :func:`distro.lsb_release_attr`. - """ - return self._lsb_release_info.get(attribute, "") - - def distro_release_attr(self, attribute: str) -> str: - """ - Return a single named information item from the distro release file - data source of the OS distribution. - - For details, see :func:`distro.distro_release_attr`. - """ - return self._distro_release_info.get(attribute, "") - - def uname_attr(self, attribute: str) -> str: - """ - Return a single named information item from the uname command - output data source of the OS distribution. - - For details, see :func:`distro.uname_attr`. - """ - return self._uname_info.get(attribute, "") - - @cached_property - def _os_release_info(self) -> Dict[str, str]: - """ - Get the information items from the specified os-release file. - - Returns: - A dictionary containing all information items. - """ - if os.path.isfile(self.os_release_file): - with open(self.os_release_file, encoding="utf-8") as release_file: - return self._parse_os_release_content(release_file) - return {} - - @staticmethod - def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: - """ - Parse the lines of an os-release file. - - Parameters: - - * lines: Iterable through the lines in the os-release file. - Each line must be a unicode string or a UTF-8 encoded byte - string. - - Returns: - A dictionary containing all information items. - """ - props = {} - lexer = shlex.shlex(lines, posix=True) - lexer.whitespace_split = True - - tokens = list(lexer) - for token in tokens: - # At this point, all shell-like parsing has been done (i.e. - # comments processed, quotes and backslash escape sequences - # processed, multi-line values assembled, trailing newlines - # stripped, etc.), so the tokens are now either: - # * variable assignments: var=value - # * commands or their arguments (not allowed in os-release) - # Ignore any tokens that are not variable assignments - if "=" in token: - k, v = token.split("=", 1) - props[k.lower()] = v - - if "version" in props: - # extract release codename (if any) from version attribute - match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"]) - if match: - release_codename = match.group(1) or match.group(2) - props["codename"] = props["release_codename"] = release_codename - - if "version_codename" in props: - # os-release added a version_codename field. Use that in - # preference to anything else Note that some distros purposefully - # do not have code names. They should be setting - # version_codename="" - props["codename"] = props["version_codename"] - elif "ubuntu_codename" in props: - # Same as above but a non-standard field name used on older Ubuntus - props["codename"] = props["ubuntu_codename"] - - return props - - @cached_property - def _lsb_release_info(self) -> Dict[str, str]: - """ - Get the information items from the lsb_release command output. - - Returns: - A dictionary containing all information items. - """ - if not self.include_lsb: - return {} - try: - cmd = ("lsb_release", "-a") - stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) - # Command not found or lsb_release returned error - except (OSError, subprocess.CalledProcessError): - return {} - content = self._to_str(stdout).splitlines() - return self._parse_lsb_release_content(content) - - @staticmethod - def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: - """ - Parse the output of the lsb_release command. - - Parameters: - - * lines: Iterable through the lines of the lsb_release output. - Each line must be a unicode string or a UTF-8 encoded byte - string. - - Returns: - A dictionary containing all information items. - """ - props = {} - for line in lines: - kv = line.strip("\n").split(":", 1) - if len(kv) != 2: - # Ignore lines without colon. - continue - k, v = kv - props.update({k.replace(" ", "_").lower(): v.strip()}) - return props - - @cached_property - def _uname_info(self) -> Dict[str, str]: - if not self.include_uname: - return {} - try: - cmd = ("uname", "-rs") - stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) - except OSError: - return {} - content = self._to_str(stdout).splitlines() - return self._parse_uname_content(content) - - @cached_property - def _oslevel_info(self) -> str: - if not self.include_oslevel: - return "" - try: - stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) - except (OSError, subprocess.CalledProcessError): - return "" - return self._to_str(stdout).strip() - - @cached_property - def _debian_version(self) -> str: - try: - with open( - os.path.join(self.etc_dir, "debian_version"), encoding="ascii" - ) as fp: - return fp.readline().rstrip() - except FileNotFoundError: - return "" - - @staticmethod - def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: - if not lines: - return {} - props = {} - match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) - if match: - name, version = match.groups() - - # This is to prevent the Linux kernel version from - # appearing as the 'best' version on otherwise - # identifiable distributions. - if name == "Linux": - return {} - props["id"] = name.lower() - props["name"] = name - props["release"] = version - return props - - @staticmethod - def _to_str(bytestring: bytes) -> str: - encoding = sys.getfilesystemencoding() - return bytestring.decode(encoding) - - @cached_property - def _distro_release_info(self) -> Dict[str, str]: - """ - Get the information items from the specified distro release file. - - Returns: - A dictionary containing all information items. - """ - if self.distro_release_file: - # If it was specified, we use it and parse what we can, even if - # its file name or content does not match the expected pattern. - distro_info = self._parse_distro_release_file(self.distro_release_file) - basename = os.path.basename(self.distro_release_file) - # The file name pattern for user-specified distro release files - # is somewhat more tolerant (compared to when searching for the - # file), because we want to use what was specified as best as - # possible. - match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - else: - try: - basenames = [ - basename - for basename in os.listdir(self.etc_dir) - if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES - and os.path.isfile(os.path.join(self.etc_dir, basename)) - ] - # We sort for repeatability in cases where there are multiple - # distro specific files; e.g. CentOS, Oracle, Enterprise all - # containing `redhat-release` on top of their own. - basenames.sort() - except OSError: - # This may occur when /etc is not readable but we can't be - # sure about the *-release files. Check common entries of - # /etc for information. If they turn out to not be there the - # error is handled in `_parse_distro_release_file()`. - basenames = _DISTRO_RELEASE_BASENAMES - for basename in basenames: - match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match is None: - continue - filepath = os.path.join(self.etc_dir, basename) - distro_info = self._parse_distro_release_file(filepath) - # The name is always present if the pattern matches. - if "name" not in distro_info: - continue - self.distro_release_file = filepath - break - else: # the loop didn't "break": no candidate. - return {} - - if match is not None: - distro_info["id"] = match.group(1) - - # CloudLinux < 7: manually enrich info with proper id. - if "cloudlinux" in distro_info.get("name", "").lower(): - distro_info["id"] = "cloudlinux" - - return distro_info - - def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: - """ - Parse a distro release file. - - Parameters: - - * filepath: Path name of the distro release file. - - Returns: - A dictionary containing all information items. - """ - try: - with open(filepath, encoding="utf-8") as fp: - # Only parse the first line. For instance, on SLES there - # are multiple lines. We don't want them... - return self._parse_distro_release_content(fp.readline()) - except OSError: - # Ignore not being able to read a specific, seemingly version - # related file. - # See https://github.com/python-distro/distro/issues/162 - return {} - - @staticmethod - def _parse_distro_release_content(line: str) -> Dict[str, str]: - """ - Parse a line from a distro release file. - - Parameters: - * line: Line from the distro release file. Must be a unicode string - or a UTF-8 encoded byte string. - - Returns: - A dictionary containing all information items. - """ - matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) - distro_info = {} - if matches: - # regexp ensures non-None - distro_info["name"] = matches.group(3)[::-1] - if matches.group(2): - distro_info["version_id"] = matches.group(2)[::-1] - if matches.group(1): - distro_info["codename"] = matches.group(1)[::-1] - elif line: - distro_info["name"] = line.strip() - return distro_info - - -_distro = LinuxDistribution() - - -def main() -> None: - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(sys.stdout)) - - parser = argparse.ArgumentParser(description="OS distro info tool") - parser.add_argument( - "--json", "-j", help="Output in machine readable format", action="store_true" - ) - - parser.add_argument( - "--root-dir", - "-r", - type=str, - dest="root_dir", - help="Path to the root filesystem directory (defaults to /)", - ) - - args = parser.parse_args() - - if args.root_dir: - dist = LinuxDistribution( - include_lsb=False, - include_uname=False, - include_oslevel=False, - root_dir=args.root_dir, - ) - else: - dist = _distro - - if args.json: - logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) - else: - logger.info("Name: %s", dist.name(pretty=True)) - distribution_version = dist.version(pretty=True) - logger.info("Version: %s", distribution_version) - distribution_codename = dist.codename() - logger.info("Codename: %s", distribution_codename) - - -if __name__ == "__main__": - main() diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/config.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/config.py deleted file mode 100644 index 4492c89660c202acf882375258dffafff00a99ba..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/config.py +++ /dev/null @@ -1,377 +0,0 @@ -"""distutils.command.config - -Implements the Distutils 'config' command, a (mostly) empty command class -that exists mainly to be sub-classed by specific module distributions and -applications. The idea is that while every "config" command is different, -at least they're all named the same, and users always see "config" in the -list of standard commands. Also, this is a good place to put common -configure-like tasks: "try to compile this C code", or "figure out where -this header file lives". -""" - -import os -import re - -from distutils.core import Command -from distutils.errors import DistutilsExecError -from distutils.sysconfig import customize_compiler -from distutils import log - -LANG_EXT = {"c": ".c", "c++": ".cxx"} - - -class config(Command): - - description = "prepare to build" - - user_options = [ - ('compiler=', None, "specify the compiler type"), - ('cc=', None, "specify the compiler executable"), - ('include-dirs=', 'I', "list of directories to search for header files"), - ('define=', 'D', "C preprocessor macros to define"), - ('undef=', 'U', "C preprocessor macros to undefine"), - ('libraries=', 'l', "external C libraries to link with"), - ('library-dirs=', 'L', "directories to search for external C libraries"), - ('noisy', None, "show every action (compile, link, run, ...) taken"), - ( - 'dump-source', - None, - "dump generated source files before attempting to compile them", - ), - ] - - # The three standard command methods: since the "config" command - # does nothing by default, these are empty. - - def initialize_options(self): - self.compiler = None - self.cc = None - self.include_dirs = None - self.libraries = None - self.library_dirs = None - - # maximal output for now - self.noisy = 1 - self.dump_source = 1 - - # list of temporary files generated along-the-way that we have - # to clean at some point - self.temp_files = [] - - def finalize_options(self): - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - elif isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - if self.libraries is None: - self.libraries = [] - elif isinstance(self.libraries, str): - self.libraries = [self.libraries] - - if self.library_dirs is None: - self.library_dirs = [] - elif isinstance(self.library_dirs, str): - self.library_dirs = self.library_dirs.split(os.pathsep) - - def run(self): - pass - - # Utility methods for actual "config" commands. The interfaces are - # loosely based on Autoconf macros of similar names. Sub-classes - # may use these freely. - - def _check_compiler(self): - """Check that 'self.compiler' really is a CCompiler object; - if not, make it one. - """ - # We do this late, and only on-demand, because this is an expensive - # import. - from distutils.ccompiler import CCompiler, new_compiler - - if not isinstance(self.compiler, CCompiler): - self.compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=1 - ) - customize_compiler(self.compiler) - if self.include_dirs: - self.compiler.set_include_dirs(self.include_dirs) - if self.libraries: - self.compiler.set_libraries(self.libraries) - if self.library_dirs: - self.compiler.set_library_dirs(self.library_dirs) - - def _gen_temp_sourcefile(self, body, headers, lang): - filename = "_configtest" + LANG_EXT[lang] - with open(filename, "w") as file: - if headers: - for header in headers: - file.write("#include <%s>\n" % header) - file.write("\n") - file.write(body) - if body[-1] != "\n": - file.write("\n") - return filename - - def _preprocess(self, body, headers, include_dirs, lang): - src = self._gen_temp_sourcefile(body, headers, lang) - out = "_configtest.i" - self.temp_files.extend([src, out]) - self.compiler.preprocess(src, out, include_dirs=include_dirs) - return (src, out) - - def _compile(self, body, headers, include_dirs, lang): - src = self._gen_temp_sourcefile(body, headers, lang) - if self.dump_source: - dump_file(src, "compiling '%s':" % src) - (obj,) = self.compiler.object_filenames([src]) - self.temp_files.extend([src, obj]) - self.compiler.compile([src], include_dirs=include_dirs) - return (src, obj) - - def _link(self, body, headers, include_dirs, libraries, library_dirs, lang): - (src, obj) = self._compile(body, headers, include_dirs, lang) - prog = os.path.splitext(os.path.basename(src))[0] - self.compiler.link_executable( - [obj], - prog, - libraries=libraries, - library_dirs=library_dirs, - target_lang=lang, - ) - - if self.compiler.exe_extension is not None: - prog = prog + self.compiler.exe_extension - self.temp_files.append(prog) - - return (src, obj, prog) - - def _clean(self, *filenames): - if not filenames: - filenames = self.temp_files - self.temp_files = [] - log.info("removing: %s", ' '.join(filenames)) - for filename in filenames: - try: - os.remove(filename) - except OSError: - pass - - # XXX these ignore the dry-run flag: what to do, what to do? even if - # you want a dry-run build, you still need some sort of configuration - # info. My inclination is to make it up to the real config command to - # consult 'dry_run', and assume a default (minimal) configuration if - # true. The problem with trying to do it here is that you'd have to - # return either true or false from all the 'try' methods, neither of - # which is correct. - - # XXX need access to the header search path and maybe default macros. - - def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): - """Construct a source file from 'body' (a string containing lines - of C/C++ code) and 'headers' (a list of header files to include) - and run it through the preprocessor. Return true if the - preprocessor succeeded, false if there were any errors. - ('body' probably isn't of much use, but what the heck.) - """ - from distutils.ccompiler import CompileError - - self._check_compiler() - ok = True - try: - self._preprocess(body, headers, include_dirs, lang) - except CompileError: - ok = False - - self._clean() - return ok - - def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"): - """Construct a source file (just like 'try_cpp()'), run it through - the preprocessor, and return true if any line of the output matches - 'pattern'. 'pattern' should either be a compiled regex object or a - string containing a regex. If both 'body' and 'headers' are None, - preprocesses an empty file -- which can be useful to determine the - symbols the preprocessor and compiler set by default. - """ - self._check_compiler() - src, out = self._preprocess(body, headers, include_dirs, lang) - - if isinstance(pattern, str): - pattern = re.compile(pattern) - - with open(out) as file: - match = False - while True: - line = file.readline() - if line == '': - break - if pattern.search(line): - match = True - break - - self._clean() - return match - - def try_compile(self, body, headers=None, include_dirs=None, lang="c"): - """Try to compile a source file built from 'body' and 'headers'. - Return true on success, false otherwise. - """ - from distutils.ccompiler import CompileError - - self._check_compiler() - try: - self._compile(body, headers, include_dirs, lang) - ok = True - except CompileError: - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - def try_link( - self, - body, - headers=None, - include_dirs=None, - libraries=None, - library_dirs=None, - lang="c", - ): - """Try to compile and link a source file, built from 'body' and - 'headers', to executable form. Return true on success, false - otherwise. - """ - from distutils.ccompiler import CompileError, LinkError - - self._check_compiler() - try: - self._link(body, headers, include_dirs, libraries, library_dirs, lang) - ok = True - except (CompileError, LinkError): - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - def try_run( - self, - body, - headers=None, - include_dirs=None, - libraries=None, - library_dirs=None, - lang="c", - ): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Return true on success, false - otherwise. - """ - from distutils.ccompiler import CompileError, LinkError - - self._check_compiler() - try: - src, obj, exe = self._link( - body, headers, include_dirs, libraries, library_dirs, lang - ) - self.spawn([exe]) - ok = True - except (CompileError, LinkError, DistutilsExecError): - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - # -- High-level methods -------------------------------------------- - # (these are the ones that are actually likely to be useful - # when implementing a real-world config command!) - - def check_func( - self, - func, - headers=None, - include_dirs=None, - libraries=None, - library_dirs=None, - decl=0, - call=0, - ): - """Determine if function 'func' is available by constructing a - source file that refers to 'func', and compiles and links it. - If everything succeeds, returns true; otherwise returns false. - - The constructed source file starts out by including the header - files listed in 'headers'. If 'decl' is true, it then declares - 'func' (as "int func()"); you probably shouldn't supply 'headers' - and set 'decl' true in the same call, or you might get errors about - a conflicting declarations for 'func'. Finally, the constructed - 'main()' function either references 'func' or (if 'call' is true) - calls it. 'libraries' and 'library_dirs' are used when - linking. - """ - self._check_compiler() - body = [] - if decl: - body.append("int %s ();" % func) - body.append("int main () {") - if call: - body.append(" %s();" % func) - else: - body.append(" %s;" % func) - body.append("}") - body = "\n".join(body) + "\n" - - return self.try_link(body, headers, include_dirs, libraries, library_dirs) - - def check_lib( - self, - library, - library_dirs=None, - headers=None, - include_dirs=None, - other_libraries=[], - ): - """Determine if 'library' is available to be linked against, - without actually checking that any particular symbols are provided - by it. 'headers' will be used in constructing the source file to - be compiled, but the only effect of this is to check if all the - header files listed are available. Any libraries listed in - 'other_libraries' will be included in the link, in case 'library' - has symbols that depend on other libraries. - """ - self._check_compiler() - return self.try_link( - "int main (void) { }", - headers, - include_dirs, - [library] + other_libraries, - library_dirs, - ) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"): - """Determine if the system header file named by 'header_file' - exists and can be found by the preprocessor; return true if so, - false otherwise. - """ - return self.try_cpp( - body="/* No body */", headers=[header], include_dirs=include_dirs - ) - - -def dump_file(filename, head=None): - """Dumps a file content into log.info. - - If head is not None, will be dumped before the file content. - """ - if head is None: - log.info('%s', filename) - else: - log.info(head) - file = open(filename) - try: - log.info(file.read()) - finally: - file.close() diff --git a/spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md b/spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md deleted file mode 100644 index 62b1f9e40124f137c98a2e4b1ff5eca3d7c89625..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image Animation Using Thin Plate Spline Motion Model -emoji: 👁 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/cascade_rpn_head.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/cascade_rpn_head.py deleted file mode 100644 index e32ee461951e685fb44a461033293159e3439717..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/dense_heads/cascade_rpn_head.py +++ /dev/null @@ -1,784 +0,0 @@ -from __future__ import division -import copy -import warnings - -import torch -import torch.nn as nn -from mmcv import ConfigDict -from mmcv.cnn import normal_init -from mmcv.ops import DeformConv2d, batched_nms - -from mmdet.core import (RegionAssigner, build_assigner, build_sampler, - images_to_levels, multi_apply) -from ..builder import HEADS, build_head -from .base_dense_head import BaseDenseHead -from .rpn_head import RPNHead - - -class AdaptiveConv(nn.Module): - """AdaptiveConv used to adapt the sampling location with the anchors. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the conv kernel. Default: 3 - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 1 - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 3 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If set True, adds a learnable bias to the - output. Default: False. - type (str, optional): Type of adaptive conv, can be either 'offset' - (arbitrary anchors) or 'dilation' (uniform anchor). - Default: 'dilation'. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - dilation=3, - groups=1, - bias=False, - type='dilation'): - super(AdaptiveConv, self).__init__() - assert type in ['offset', 'dilation'] - self.adapt_type = type - - assert kernel_size == 3, 'Adaptive conv only supports kernels 3' - if self.adapt_type == 'offset': - assert stride == 1 and padding == 1 and groups == 1, \ - 'Adaptive conv offset mode only supports padding: {1}, ' \ - f'stride: {1}, groups: {1}' - self.conv = DeformConv2d( - in_channels, - out_channels, - kernel_size, - padding=padding, - stride=stride, - groups=groups, - bias=bias) - else: - self.conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size, - padding=dilation, - dilation=dilation) - - def init_weights(self): - """Init weights.""" - normal_init(self.conv, std=0.01) - - def forward(self, x, offset): - """Forward function.""" - if self.adapt_type == 'offset': - N, _, H, W = x.shape - assert offset is not None - assert H * W == offset.shape[1] - # reshape [N, NA, 18] to (N, 18, H, W) - offset = offset.permute(0, 2, 1).reshape(N, -1, H, W) - offset = offset.contiguous() - x = self.conv(x, offset) - else: - assert offset is None - x = self.conv(x) - return x - - -@HEADS.register_module() -class StageCascadeRPNHead(RPNHead): - """Stage of CascadeRPNHead. - - Args: - in_channels (int): Number of channels in the input feature map. - anchor_generator (dict): anchor generator config. - adapt_cfg (dict): adaptation config. - bridged_feature (bool, optional): whether update rpn feature. - Default: False. - with_cls (bool, optional): wheather use classification branch. - Default: True. - sampling (bool, optional): wheather use sampling. Default: True. - """ - - def __init__(self, - in_channels, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[1.0], - strides=[4, 8, 16, 32, 64]), - adapt_cfg=dict(type='dilation', dilation=3), - bridged_feature=False, - with_cls=True, - sampling=True, - **kwargs): - self.with_cls = with_cls - self.anchor_strides = anchor_generator['strides'] - self.anchor_scales = anchor_generator['scales'] - self.bridged_feature = bridged_feature - self.adapt_cfg = adapt_cfg - super(StageCascadeRPNHead, self).__init__( - in_channels, anchor_generator=anchor_generator, **kwargs) - - # override sampling and sampler - self.sampling = sampling - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - def _init_layers(self): - """Init layers of a CascadeRPN stage.""" - self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels, - **self.adapt_cfg) - if self.with_cls: - self.rpn_cls = nn.Conv2d(self.feat_channels, - self.num_anchors * self.cls_out_channels, - 1) - self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) - self.relu = nn.ReLU(inplace=True) - - def init_weights(self): - """Init weights of a CascadeRPN stage.""" - self.rpn_conv.init_weights() - normal_init(self.rpn_reg, std=0.01) - if self.with_cls: - normal_init(self.rpn_cls, std=0.01) - - def forward_single(self, x, offset): - """Forward function of single scale.""" - bridged_x = x - x = self.relu(self.rpn_conv(x, offset)) - if self.bridged_feature: - bridged_x = x # update feature - cls_score = self.rpn_cls(x) if self.with_cls else None - bbox_pred = self.rpn_reg(x) - return bridged_x, cls_score, bbox_pred - - def forward(self, feats, offset_list=None): - """Forward function.""" - if offset_list is None: - offset_list = [None for _ in range(len(feats))] - return multi_apply(self.forward_single, feats, offset_list) - - def _region_targets_single(self, - anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - featmap_sizes, - label_channels=1): - """Get anchor targets based on region for single level.""" - assign_result = self.assigner.assign( - anchors, - valid_flags, - gt_bboxes, - img_meta, - featmap_sizes, - self.anchor_scales[0], - self.anchor_strides, - gt_bboxes_ignore=gt_bboxes_ignore, - gt_labels=None, - allowed_border=self.train_cfg.allowed_border) - flat_anchors = torch.cat(anchors) - sampling_result = self.sampler.sample(assign_result, flat_anchors, - gt_bboxes) - - num_anchors = flat_anchors.shape[0] - bbox_targets = torch.zeros_like(flat_anchors) - bbox_weights = torch.zeros_like(flat_anchors) - labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long) - label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - else: - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - labels[pos_inds] = 1 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds) - - def region_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - featmap_sizes, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """See :func:`StageCascadeRPNHead.get_targets`.""" - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, - pos_inds_list, neg_inds_list) = multi_apply( - self._region_targets_single, - anchor_list, - valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - featmap_sizes=featmap_sizes, - label_channels=label_channels) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - featmap_sizes, - gt_bboxes_ignore=None, - label_channels=1): - """Compute regression and classification targets for anchors. - - Args: - anchor_list (list[list]): Multi level anchors of each image. - valid_flag_list (list[list]): Multi level valid flags of each - image. - gt_bboxes (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - featmap_sizes (list[Tensor]): Feature mapsize each level - gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images - label_channels (int): Channel of label. - - Returns: - cls_reg_targets (tuple) - """ - if isinstance(self.assigner, RegionAssigner): - cls_reg_targets = self.region_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - featmap_sizes, - gt_bboxes_ignore_list=gt_bboxes_ignore, - label_channels=label_channels) - else: - cls_reg_targets = super(StageCascadeRPNHead, self).get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - label_channels=label_channels) - return cls_reg_targets - - def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes): - """ Get offest for deformable conv based on anchor shape - NOTE: currently support deformable kernel_size=3 and dilation=1 - - Args: - anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of - multi-level anchors - anchor_strides (list[int]): anchor stride of each level - - Returns: - offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv - kernel. - """ - - def _shape_offset(anchors, stride, ks=3, dilation=1): - # currently support kernel_size=3 and dilation=1 - assert ks == 3 and dilation == 1 - pad = (ks - 1) // 2 - idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) - yy, xx = torch.meshgrid(idx, idx) # return order matters - xx = xx.reshape(-1) - yy = yy.reshape(-1) - w = (anchors[:, 2] - anchors[:, 0]) / stride - h = (anchors[:, 3] - anchors[:, 1]) / stride - w = w / (ks - 1) - dilation - h = h / (ks - 1) - dilation - offset_x = w[:, None] * xx # (NA, ks**2) - offset_y = h[:, None] * yy # (NA, ks**2) - return offset_x, offset_y - - def _ctr_offset(anchors, stride, featmap_size): - feat_h, feat_w = featmap_size - assert len(anchors) == feat_h * feat_w - - x = (anchors[:, 0] + anchors[:, 2]) * 0.5 - y = (anchors[:, 1] + anchors[:, 3]) * 0.5 - # compute centers on feature map - x = x / stride - y = y / stride - # compute predefine centers - xx = torch.arange(0, feat_w, device=anchors.device) - yy = torch.arange(0, feat_h, device=anchors.device) - yy, xx = torch.meshgrid(yy, xx) - xx = xx.reshape(-1).type_as(x) - yy = yy.reshape(-1).type_as(y) - - offset_x = x - xx # (NA, ) - offset_y = y - yy # (NA, ) - return offset_x, offset_y - - num_imgs = len(anchor_list) - num_lvls = len(anchor_list[0]) - dtype = anchor_list[0][0].dtype - device = anchor_list[0][0].device - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - offset_list = [] - for i in range(num_imgs): - mlvl_offset = [] - for lvl in range(num_lvls): - c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl], - anchor_strides[lvl], - featmap_sizes[lvl]) - s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl], - anchor_strides[lvl]) - - # offset = ctr_offset + shape_offset - offset_x = s_offset_x + c_offset_x[:, None] - offset_y = s_offset_y + c_offset_y[:, None] - - # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9) - offset = torch.stack([offset_y, offset_x], dim=-1) - offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2] - mlvl_offset.append(offset) - offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2] - offset_list = images_to_levels(offset_list, num_level_anchors) - return offset_list - - def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, - bbox_targets, bbox_weights, num_total_samples): - """Loss function on single scale.""" - # classification loss - if self.with_cls: - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - # regression loss - bbox_targets = bbox_targets.reshape(-1, 4) - bbox_weights = bbox_weights.reshape(-1, 4) - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - anchors = anchors.reshape(-1, 4) - bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) - loss_reg = self.loss_bbox( - bbox_pred, - bbox_targets, - bbox_weights, - avg_factor=num_total_samples) - if self.with_cls: - return loss_cls, loss_reg - return None, loss_reg - - def loss(self, - anchor_list, - valid_flag_list, - cls_scores, - bbox_preds, - gt_bboxes, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - anchor_list (list[list]): Multi level anchors of each image. - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - featmap_sizes, - gt_bboxes_ignore=gt_bboxes_ignore, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - if self.sampling: - num_total_samples = num_total_pos + num_total_neg - else: - # 200 is hard-coded average factor, - # which follows guided anchoring. - num_total_samples = sum([label.numel() - for label in labels_list]) / 200.0 - - # change per image, per level anchor_list to per_level, per_image - mlvl_anchor_list = list(zip(*anchor_list)) - # concat mlvl_anchor_list - mlvl_anchor_list = [ - torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list - ] - - losses = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - mlvl_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - if self.with_cls: - return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1]) - return dict(loss_rpn_reg=losses[1]) - - def get_bboxes(self, - anchor_list, - cls_scores, - bbox_preds, - img_metas, - cfg, - rescale=False): - """Get proposal predict.""" - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - - result_list = [] - for img_id in range(len(img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_pred_list = [ - bbox_preds[i][img_id].detach() for i in range(num_levels) - ] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, - anchor_list[img_id], img_shape, - scale_factor, cfg, rescale) - result_list.append(proposals) - return result_list - - def refine_bboxes(self, anchor_list, bbox_preds, img_metas): - """Refine bboxes through stages.""" - num_levels = len(bbox_preds) - new_anchor_list = [] - for img_id in range(len(img_metas)): - mlvl_anchors = [] - for i in range(num_levels): - bbox_pred = bbox_preds[i][img_id].detach() - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - img_shape = img_metas[img_id]['img_shape'] - bboxes = self.bbox_coder.decode(anchor_list[img_id][i], - bbox_pred, img_shape) - mlvl_anchors.append(bboxes) - new_anchor_list.append(mlvl_anchors) - return new_anchor_list - - # TODO: temporary plan - def _get_bboxes_single(self, - cls_scores, - bbox_preds, - mlvl_anchors, - img_shape, - scale_factor, - cfg, - rescale=False): - """Transform outputs for a single batch item into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (num_anchors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (num_anchors * 4, H, W). - mlvl_anchors (list[Tensor]): Box reference for each scale level - with shape (num_total_anchors, 4). - img_shape (tuple[int]): Shape of the input image, - (height, width, 3). - scale_factor (ndarray): Scale factor of the image arange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - - Returns: - Tensor: Labeled boxes have the shape of (n,5), where the - first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score - between 0 and 1. - """ - cfg = self.test_cfg if cfg is None else cfg - cfg = copy.deepcopy(cfg) - # bboxes from different level should be independent during NMS, - # level_ids are used as labels for batched NMS to separate them - level_ids = [] - mlvl_scores = [] - mlvl_bbox_preds = [] - mlvl_valid_anchors = [] - for idx in range(len(cls_scores)): - rpn_cls_score = cls_scores[idx] - rpn_bbox_pred = bbox_preds[idx] - assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] - rpn_cls_score = rpn_cls_score.permute(1, 2, 0) - if self.use_sigmoid_cls: - rpn_cls_score = rpn_cls_score.reshape(-1) - scores = rpn_cls_score.sigmoid() - else: - rpn_cls_score = rpn_cls_score.reshape(-1, 2) - # We set FG labels to [0, num_class-1] and BG label to - # num_class in RPN head since mmdet v2.5, which is unified to - # be consistent with other head since mmdet v2.0. In mmdet v2.0 - # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. - scores = rpn_cls_score.softmax(dim=1)[:, 0] - rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) - anchors = mlvl_anchors[idx] - if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: - # sort is faster than topk - # _, topk_inds = scores.topk(cfg.nms_pre) - if torch.onnx.is_in_onnx_export(): - # sort op will be converted to TopK in onnx - # and k<=3480 in TensorRT - _, topk_inds = scores.topk(cfg.nms_pre) - scores = scores[topk_inds] - else: - ranked_scores, rank_inds = scores.sort(descending=True) - topk_inds = rank_inds[:cfg.nms_pre] - scores = ranked_scores[:cfg.nms_pre] - rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] - anchors = anchors[topk_inds, :] - mlvl_scores.append(scores) - mlvl_bbox_preds.append(rpn_bbox_pred) - mlvl_valid_anchors.append(anchors) - level_ids.append( - scores.new_full((scores.size(0), ), idx, dtype=torch.long)) - - scores = torch.cat(mlvl_scores) - anchors = torch.cat(mlvl_valid_anchors) - rpn_bbox_pred = torch.cat(mlvl_bbox_preds) - proposals = self.bbox_coder.decode( - anchors, rpn_bbox_pred, max_shape=img_shape) - ids = torch.cat(level_ids) - - # Skip nonzero op while exporting to ONNX - if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()): - w = proposals[:, 2] - proposals[:, 0] - h = proposals[:, 3] - proposals[:, 1] - valid_inds = torch.nonzero( - (w >= cfg.min_bbox_size) - & (h >= cfg.min_bbox_size), - as_tuple=False).squeeze() - if valid_inds.sum().item() != len(proposals): - proposals = proposals[valid_inds, :] - scores = scores[valid_inds] - ids = ids[valid_inds] - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You ' \ - f'set max_num and ' \ - f'max_per_img at the same time, but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - 'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ - f' iou_threshold in nms and ' \ - f'nms_thr at the same time, but get' \ - f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the nms_thr ' \ - f'which will be deprecated.' - - dets, keep = batched_nms(proposals, scores, ids, cfg.nms) - return dets[:cfg.max_per_img] - - -@HEADS.register_module() -class CascadeRPNHead(BaseDenseHead): - """The CascadeRPNHead will predict more accurate region proposals, which is - required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN - consists of a sequence of RPNStage to progressively improve the accuracy of - the detected proposals. - - More details can be found in ``https://arxiv.org/abs/1909.06720``. - - Args: - num_stages (int): number of CascadeRPN stages. - stages (list[dict]): list of configs to build the stages. - train_cfg (list[dict]): list of configs at training time each stage. - test_cfg (dict): config at testing time. - """ - - def __init__(self, num_stages, stages, train_cfg, test_cfg): - super(CascadeRPNHead, self).__init__() - assert num_stages == len(stages) - self.num_stages = num_stages - self.stages = nn.ModuleList() - for i in range(len(stages)): - train_cfg_i = train_cfg[i] if train_cfg is not None else None - stages[i].update(train_cfg=train_cfg_i) - stages[i].update(test_cfg=test_cfg) - self.stages.append(build_head(stages[i])) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - def init_weights(self): - """Init weight of CascadeRPN.""" - for i in range(self.num_stages): - self.stages[i].init_weights() - - def loss(self): - """loss() is implemented in StageCascadeRPNHead.""" - pass - - def get_bboxes(self): - """get_bboxes() is implemented in StageCascadeRPNHead.""" - pass - - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None): - """Forward train function.""" - assert gt_labels is None, 'RPN does not require gt_labels' - - featmap_sizes = [featmap.size()[-2:] for featmap in x] - device = x[0].device - anchor_list, valid_flag_list = self.stages[0].get_anchors( - featmap_sizes, img_metas, device=device) - - losses = dict() - - for i in range(self.num_stages): - stage = self.stages[i] - - if stage.adapt_cfg['type'] == 'offset': - offset_list = stage.anchor_offset(anchor_list, - stage.anchor_strides, - featmap_sizes) - else: - offset_list = None - x, cls_score, bbox_pred = stage(x, offset_list) - rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, - bbox_pred, gt_bboxes, img_metas) - stage_loss = stage.loss(*rpn_loss_inputs) - for name, value in stage_loss.items(): - losses['s{}.{}'.format(i, name)] = value - - # refine boxes - if i < self.num_stages - 1: - anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, - img_metas) - if proposal_cfg is None: - return losses - else: - proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, - bbox_pred, img_metas, - self.test_cfg) - return losses, proposal_list - - def simple_test_rpn(self, x, img_metas): - """Simple forward test function.""" - featmap_sizes = [featmap.size()[-2:] for featmap in x] - device = x[0].device - anchor_list, _ = self.stages[0].get_anchors( - featmap_sizes, img_metas, device=device) - - for i in range(self.num_stages): - stage = self.stages[i] - if stage.adapt_cfg['type'] == 'offset': - offset_list = stage.anchor_offset(anchor_list, - stage.anchor_strides, - featmap_sizes) - else: - offset_list = None - x, cls_score, bbox_pred = stage(x, offset_list) - if i < self.num_stages - 1: - anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, - img_metas) - - proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, - bbox_pred, img_metas, - self.test_cfg) - return proposal_list - - def aug_test_rpn(self, x, img_metas): - """Augmented forward test function.""" - raise NotImplementedError diff --git a/spaces/CVPR/drawings-to-human/static/index.html b/spaces/CVPR/drawings-to-human/static/index.html deleted file mode 100644 index 3463a9e2f393f935f722ea0759c5a633fc76037a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/drawings-to-human/static/index.html +++ /dev/null @@ -1,209 +0,0 @@ - - - - - - - - - - - - - - - - - - - - -

    Drawings to Human

    -

    This is an unofficial drawing tool to explore the generative human generator Text2Human. Please check all the model features on this - Space. -

    -

    Thanks to

    -

    Authors: Yuming Jiang, - Shuai Yang, - Haonan Qiu, - Wayne Wu, - Chen Change Loy - and Ziwei Liu

    -

    @hysts for the original Space implementation -

    -
    More -

    The backend is powered by a Gradio - application running on - Spaces. You can - also check the source code and clone it locally if you want: -

    - -

    git clone https://huggingface.co/spaces/CVPR/Text2Human -

    -

    Set the Brush Type

    -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    -

    Set the Brush Size

    -
    -
    - -

    Select a Template

    -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    - -
    -
    -
    - - face -
    -
    -
    -
    - -
    - - -

    Texture Description

    -
    -

    Random Seed

    - - -

    Sample Steps

    -
    -
    - -
    - - - - - diff --git a/spaces/CVPR/regionclip-demo/detectron2/evaluation/__init__.py b/spaces/CVPR/regionclip-demo/detectron2/evaluation/__init__.py deleted file mode 100644 index 6be52b555f7fb49c7d8d2fa3792a51be81db0274..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/evaluation/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator -from .coco_evaluation import COCOEvaluator -from .rotated_coco_evaluation import RotatedCOCOEvaluator -from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset -from .lvis_evaluation import LVISEvaluator -from .panoptic_evaluation import COCOPanopticEvaluator -from .pascal_voc_evaluation import PascalVOCDetectionEvaluator -from .sem_seg_evaluation import SemSegEvaluator -from .testing import print_csv_format, verify_results -from .flickr30k_evaluation import FLICKR30KEvaluator - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/utils/metrics.py b/spaces/Caoyunkang/Segment-Any-Anomaly/utils/metrics.py deleted file mode 100644 index 370a8b0efdd43264a217a8aaf1bdfb1e905271f3..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/utils/metrics.py +++ /dev/null @@ -1,219 +0,0 @@ -import numpy as np -from skimage import measure -from sklearn.metrics import auc -from sklearn.metrics import precision_recall_curve -from sklearn.metrics import roc_auc_score -from sklearn.metrics import roc_curve - -def calculate_max_f1(gt, scores): - precision, recall, thresholds = precision_recall_curve(gt, scores) - a = 2 * precision * recall - b = precision + recall - f1s = np.divide(a, b, out=np.zeros_like(a), where=b != 0) - index = np.argmax(f1s) - max_f1 = f1s[index] - threshold = thresholds[index] - return max_f1, threshold - -def metric_cal(scores, gt_list, gt_mask_list, cal_pro=False): - # calculate image-level ROC AUC score - img_scores = scores.reshape(scores.shape[0], -1).max(axis=1) - gt_list = np.asarray(gt_list, dtype=int) - fpr, tpr, _ = roc_curve(gt_list, img_scores) - img_roc_auc = roc_auc_score(gt_list, img_scores) - # print('INFO: image ROCAUC: %.3f' % (img_roc_auc)) - - img_f1, img_threshold = calculate_max_f1(gt_list, img_scores) - - gt_mask = np.asarray(gt_mask_list, dtype=int) - pxl_f1, pxl_threshold = calculate_max_f1(gt_mask.flatten(), scores.flatten()) - - # calculate per-pixel level ROCAUC - fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten()) - per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten()) - - - # calculate max-f1 region - if cal_pro: - # pro_auc_score = cal_pro_metric(gt_mask_list, scores, fpr_thresh=0.3) - # calculate max-f1 region - max_f1_region = calculate_max_f1_region(gt_mask_list, scores) - - else: - # pro_auc_score = 0 - # calculate max-f1 region - max_f1_region = 0 - - result_dict = {'i_roc': img_roc_auc * 100, 'p_roc': per_pixel_rocauc * 100, - 'i_f1': img_f1 * 100, 'i_thresh': img_threshold, 'p_f1': pxl_f1 * 100, 'p_thresh': pxl_threshold, 'r_f1': max_f1_region * 100} - - return result_dict - - -def rescale(x): - return (x - x.min()) / (x.max() - x.min()) - - -def cal_pro_metric(labeled_imgs, score_imgs, fpr_thresh=0.3, max_steps=200): - labeled_imgs = np.array(labeled_imgs) - labeled_imgs[labeled_imgs <= 0.45] = 0 - labeled_imgs[labeled_imgs > 0.45] = 1 - labeled_imgs = labeled_imgs.astype(np.bool) - - max_th = score_imgs.max() - min_th = score_imgs.min() - delta = (max_th - min_th) / max_steps - - ious_mean = [] - ious_std = [] - pros_mean = [] - pros_std = [] - threds = [] - fprs = [] - binary_score_maps = np.zeros_like(score_imgs, dtype=bool) - for step in range(max_steps): - thred = max_th - step * delta - # segmentation - binary_score_maps[score_imgs <= thred] = 0 - binary_score_maps[score_imgs > thred] = 1 - - pro = [] # per region overlap - iou = [] # per image iou - # pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region - # iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map - for i in range(len(binary_score_maps)): # for i th image - # pro (per region level) - label_map = measure.label(labeled_imgs[i], connectivity=2) - props = measure.regionprops(label_map) - for prop in props: - x_min, y_min, x_max, y_max = prop.bbox - cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max] - # cropped_mask = masks[i][x_min:x_max, y_min:y_max] - cropped_mask = prop.filled_image # corrected! - intersection = np.logical_and(cropped_pred_label, cropped_mask).astype(np.float32).sum() - pro.append(intersection / prop.area) - # iou (per image level) - intersection = np.logical_and(binary_score_maps[i], labeled_imgs[i]).astype(np.float32).sum() - union = np.logical_or(binary_score_maps[i], labeled_imgs[i]).astype(np.float32).sum() - if labeled_imgs[i].any() > 0: # when the gt have no anomaly pixels, skip it - iou.append(intersection / union) - # against steps and average metrics on the testing data - ious_mean.append(np.array(iou).mean()) - # print("per image mean iou:", np.array(iou).mean()) - ious_std.append(np.array(iou).std()) - pros_mean.append(np.array(pro).mean()) - pros_std.append(np.array(pro).std()) - # fpr for pro-auc - masks_neg = ~labeled_imgs - fpr = np.logical_and(masks_neg, binary_score_maps).sum() / masks_neg.sum() - fprs.append(fpr) - threds.append(thred) - - # as array - threds = np.array(threds) - pros_mean = np.array(pros_mean) - pros_std = np.array(pros_std) - fprs = np.array(fprs) - - # default 30% fpr vs pro, pro_auc - idx = fprs <= fpr_thresh # find the indexs of fprs that is less than expect_fpr (default 0.3) - fprs_selected = fprs[idx] - fprs_selected = rescale(fprs_selected) # rescale fpr [0,0.3] -> [0, 1] - pros_mean_selected = pros_mean[idx] - pro_auc_score = auc(fprs_selected, pros_mean_selected) - # print("pro auc ({}% FPR):".format(int(expect_fpr * 100)), pro_auc_score) - return pro_auc_score - -def calculate_max_f1_region(labeled_imgs, score_imgs, pro_thresh=0.6, max_steps=200): - labeled_imgs = np.array(labeled_imgs) - # labeled_imgs[labeled_imgs <= 0.1] = 0 - # labeled_imgs[labeled_imgs > 0.1] = 1 - labeled_imgs = labeled_imgs.astype(bool) - - max_th = score_imgs.max() - min_th = score_imgs.min() - delta = (max_th - min_th) / max_steps - - f1_list = [] - recall_list = [] - precision_list = [] - - binary_score_maps = np.zeros_like(score_imgs, dtype=bool) - for step in range(max_steps): - thred = max_th - step * delta - # segmentation - binary_score_maps[score_imgs <= thred] = 0 - binary_score_maps[score_imgs > thred] = 1 - - pro = [] # per region overlap - - predict_region_number = 0 - gt_region_number = 0 - - # pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region - # iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map - for i in range(len(binary_score_maps)): # for i th image - # pro (per region level) - label_map = measure.label(labeled_imgs[i], connectivity=2) - props = measure.regionprops(label_map) - - score_map = measure.label(binary_score_maps[i], connectivity=2) - score_props = measure.regionprops(score_map) - - predict_region_number += len(score_props) - gt_region_number += len(props) - - # if len(score_props) == 0 or len(props) == 0: - # pro.append(0) - # continue - - for score_prop in score_props: - x_min_0, y_min_0, x_max_0, y_max_0 = score_prop.bbox - cur_pros = [0] - for prop in props: - x_min_1, y_min_1, x_max_1, y_max_1 = prop.bbox - - x_min = min(x_min_0, x_min_1) - y_min = min(y_min_0, y_min_1) - x_max = max(x_max_0, x_max_1) - y_max = max(y_max_0, y_max_1) - - cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max] - cropped_gt_label = labeled_imgs[i][x_min:x_max, y_min:y_max] - - # cropped_mask = masks[i][x_min:x_max, y_min:y_max] - # cropped_mask = prop.filled_image # corrected! - intersection = np.logical_and(cropped_pred_label, cropped_gt_label).astype(np.float32).sum() - union = np.logical_or(cropped_pred_label, cropped_gt_label).astype(np.float32).sum() - cur_pros.append(intersection / union) - - pro.append(max(cur_pros)) - - pro = np.array(pro) - - if gt_region_number == 0 or predict_region_number == 0: - print(f'gt_number: {gt_region_number}, pred_number: {predict_region_number}') - recall = 0 - precision = 0 - f1 = 0 - else: - recall = np.array(pro >= pro_thresh).astype(np.float32).sum() / gt_region_number - precision = np.array(pro >= pro_thresh).astype(np.float32).sum() / predict_region_number - - if recall == 0 or precision == 0: - f1 = 0 - else: - f1 = 2 * recall * precision / (recall + precision) - - - f1_list.append(f1) - recall_list.append(recall) - precision_list.append(precision) - - # as array - f1_list = np.array(f1_list) - max_f1 = f1_list.max() - cor_recall = recall_list[f1_list.argmax()] - cor_precision = precision_list[f1_list.argmax()] - print(f'cor recall: {cor_recall}, cor precision: {cor_precision}') - return max_f1 diff --git a/spaces/CarlDennis/HYTTS/text/mandarin.py b/spaces/CarlDennis/HYTTS/text/mandarin.py deleted file mode 100644 index ceae9fedf8e710607d598fe91f70f503926059bb..0000000000000000000000000000000000000000 --- a/spaces/CarlDennis/HYTTS/text/mandarin.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import re -import sys - -import jieba -import cn2an -import logging -from pypinyin import lazy_pinyin, BOPOMOFO - -logging.getLogger('jieba').setLevel(logging.WARNING) - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - if re.match('[\u3105-\u3129]', bopomofos[i][-1]): - bopomofos[i] += 'ˉ' - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i[aoe]', lambda x: 'y' + x.group(0)[1:], text) - text = re.sub('u[aoəe]', lambda x: 'w' + x.group(0)[1:], text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', lambda x: x.group(1) + - 'ɹ`' + x.group(2), text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', - lambda x: x.group(1) + 'ɹ' + x.group(2), text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/beat_head/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/beat_head/__init__.py deleted file mode 100644 index bc139b07ae703f255375b4fdd146adf55c16bbea..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/beat_head/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -from pathlib import Path -from typing import List - -from PIL.Image import Image as IMG -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.exception import TextOverLength -from meme_generator.utils import save_gif - -img_dir = Path(__file__).parent / "images" - - -def beat_head(images: List[BuildImage], texts: List[str], args): - text = texts[0] if texts else "怎么说话的你" - img = images[0].convert("RGBA") - locs = [(160, 121, 76, 76), (172, 124, 69, 69), (208, 166, 52, 52)] - frames: List[IMG] = [] - for i in range(3): - x, y, w, h = locs[i] - head = img.resize((w, h), keep_ratio=True).circle() - frame = BuildImage.open(img_dir / f"{i}.png") - frame.paste(head, (x, y), below=True) - try: - frame.draw_text( - (175, 28, 316, 82), - text, - max_fontsize=50, - min_fontsize=10, - allow_wrap=True, - ) - except ValueError: - raise TextOverLength(text) - - frames.append(frame.image) - return save_gif(frames, 0.05) - - -add_meme( - "beat_head", - beat_head, - min_images=1, - max_images=1, - min_texts=0, - max_texts=1, - keywords=["拍头"], -) diff --git "a/spaces/Cong723/gpt-academic-public/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/spaces/Cong723/gpt-academic-public/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" deleted file mode 100644 index 7c6a7ffb5cb2c42e6543c75d6ad9dd643f412cd9..0000000000000000000000000000000000000000 --- "a/spaces/Cong723/gpt-academic-public/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" +++ /dev/null @@ -1,29 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime -@CatchException -def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - for i in range(5): - currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month - currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day - i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 diff --git a/spaces/CosmoAI/ChitChat/README.md b/spaces/CosmoAI/ChitChat/README.md deleted file mode 100644 index c5365379c018cb94e5e2e740941095580ca09a04..0000000000000000000000000000000000000000 --- a/spaces/CosmoAI/ChitChat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChitChat -emoji: 🔥 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/collect_env.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/collect_env.py deleted file mode 100644 index 2d0641dda61c9950cb54d0552106246248e571ef..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/collect_env.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import PIL - -from torch.utils.collect_env import get_pretty_env_info - - -def get_pil_version(): - return "\n Pillow ({})".format(PIL.__version__) - - -def collect_env_info(): - env_str = get_pretty_env_info() - env_str += get_pil_version() - return env_str diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-928645ac.css b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-928645ac.css deleted file mode 100644 index 4329ebb21b609937b3a2fdd0c3a1ef2edf96b04c..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-928645ac.css +++ /dev/null @@ -1 +0,0 @@ -.container.svelte-19on2m6.svelte-19on2m6{display:flex;flex-direction:column;gap:var(--spacing-sm);padding:var(--block-padding)}.hl.svelte-19on2m6+.hl.svelte-19on2m6{margin-left:var(--size-1)}.textspan.svelte-19on2m6:last-child>.label.svelte-19on2m6{margin-right:0}.category-legend.svelte-19on2m6.svelte-19on2m6{display:flex;flex-wrap:wrap;gap:var(--spacing-sm);color:#000}.category-label.svelte-19on2m6.svelte-19on2m6{cursor:pointer;border-radius:var(--radius-xs);padding-right:var(--size-2);padding-left:var(--size-2);font-weight:var(--weight-semibold)}.color-legend.svelte-19on2m6.svelte-19on2m6{display:flex;justify-content:space-between;border-radius:var(--radius-xs);background:linear-gradient(to right,var(--color-purple),rgba(255,255,255,0),var(--color-red));padding:var(--size-1) var(--size-2);font-weight:var(--weight-semibold)}.textfield.svelte-19on2m6.svelte-19on2m6{box-sizing:border-box;border-radius:var(--radius-xs);background:var(--background-fill-primary);background-color:transparent;max-width:var(--size-full);line-height:var(--scale-4);word-break:break-all}.textspan.svelte-19on2m6.svelte-19on2m6{transition:.15s;border-radius:var(--radius-xs);padding-top:2.5px;padding-right:var(--size-1);padding-bottom:3.5px;padding-left:var(--size-1);color:#000}.label.svelte-19on2m6.svelte-19on2m6{transition:.15s;margin-top:1px;margin-right:calc(var(--size-1) * -1);border-radius:var(--radius-xs);padding:1px 5px;color:var(--body-text-color);color:#fff;font-weight:var(--weight-bold);font-size:var(--text-sm);text-transform:uppercase}.text.svelte-19on2m6.svelte-19on2m6{color:#000}.score-text.svelte-19on2m6 .text.svelte-19on2m6{color:var(--body-text-color)}.score-text.svelte-19on2m6.svelte-19on2m6{margin-right:var(--size-1);padding:var(--size-1)}.no-cat.svelte-19on2m6.svelte-19on2m6,.no-label.svelte-19on2m6.svelte-19on2m6{color:var(--body-text-color)}.selectable.svelte-19on2m6.svelte-19on2m6{cursor:pointer} diff --git a/spaces/DarwinAnim8or/GPT-Greentext-Playground/README.md b/spaces/DarwinAnim8or/GPT-Greentext-Playground/README.md deleted file mode 100644 index fc9b471f088daba6f369ff0bac495c4a84ecfb7e..0000000000000000000000000000000000000000 --- a/spaces/DarwinAnim8or/GPT-Greentext-Playground/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GPT Greentext Playground -emoji: ✍️ -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Datasculptor/MusicGen/audiocraft/modules/transformer.py b/spaces/Datasculptor/MusicGen/audiocraft/modules/transformer.py deleted file mode 100644 index e69cca829d774d0b8b36c0de9b7924373da81b43..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/audiocraft/modules/transformer.py +++ /dev/null @@ -1,747 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Transformer model, with streaming support, xformer attention support -and easy causal attention with a potentially finite receptive field. - -See `StreamingTransformer` for more information. - -Unlike regular PyTorch Transformer, we make the hard choice that batches are first. -""" - -import typing as tp - -from einops import rearrange -import torch -import torch.nn as nn -from torch.nn import functional as F -from torch.utils.checkpoint import checkpoint as torch_checkpoint -from xformers import ops - -from .rope import RotaryEmbedding -from .streaming import StreamingModule - -_efficient_attention_backend: str = 'torch' - - -def set_efficient_attention_backend(backend: str = 'torch'): - # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster). - global _efficient_attention_backend - assert _efficient_attention_backend in ['xformers', 'torch'] - _efficient_attention_backend = backend - - -def _get_attention_time_dimension() -> int: - if _efficient_attention_backend == 'torch': - return 2 - else: - return 1 - - -def _is_profiled() -> bool: - # Return true if we are currently running with a xformers profiler activated. - try: - from xformers.profiler import profiler - except ImportError: - return False - return profiler._Profiler._CURRENT_PROFILER is not None - - -def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module: - """Create normalization module for transformer encoder layer. - - Args: - norm_type (str): Normalization method. - dim (int): Dimension of the normalized layer. - **kwargs (dict): Additional parameters for normalization layer. - Returns: - nn.Module: Normalization module. - """ - if norm_type == 'layer_norm': - return nn.LayerNorm(dim, eps=1e-5, **kwargs) - else: - raise ValueError(f"Unknown norm type: {norm_type}") - - -def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000, - dtype: torch.dtype = torch.float32) -> torch.Tensor: - """Create sinusoidal positional embedding, with shape `[B, T, C]`. - - Args: - positions (torch.Tensor): LongTensor of positions. - dim (int): Dimension of the embedding. - max_period (float): Maximum period of the cosine/sine functions. - dtype (torch.dtype or str): dtype to use to generate the embedding. - Returns: - torch.Tensor: Sinusoidal positional embedding. - """ - # We aim for BTC format - assert dim % 2 == 0 - half_dim = dim // 2 - positions = positions.to(dtype) - adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1) - max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point - phase = positions / (max_period_tensor ** (adim / (half_dim - 1))) - return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1) - - -def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: - """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers""" - if n_rep == 1: - return x - if _efficient_attention_backend == 'torch': - bs, n_kv_heads, slen, head_dim = x.shape - return ( - x[:, :, None, :, :] - .expand(bs, n_kv_heads, n_rep, slen, head_dim) - .reshape(bs, n_kv_heads * n_rep, slen, head_dim) - ) - else: - bs, slen, n_kv_heads, head_dim = x.shape - return ( - x[:, :, :, None, :] - .expand(bs, slen, n_kv_heads, n_rep, head_dim) - .reshape(bs, slen, n_kv_heads * n_rep, head_dim) - ) - - -class LayerScale(nn.Module): - """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). - This rescales diagonaly the residual outputs close to 0, with a learnt scale. - - Args: - channels (int): Number of channels. - init (float): Initial scale. - channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype or None): dtype to use to initialize the module. - """ - def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True, - device=None, dtype=None): - super().__init__() - self.channel_last = channel_last - self.scale = nn.Parameter( - torch.full((channels,), init, - requires_grad=True, device=device, dtype=dtype)) - - def forward(self, x: torch.Tensor): - if self.channel_last: - return self.scale * x - else: - return self.scale[:, None] * x - - -class StreamingMultiheadAttention(StreamingModule): - """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation. - - Args: - embed_dim (int): Dimension to project to. - num_heads (int): Number of heads. - dropout (float): Dropout level. - bias (bool): Use bias in projections. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - rope (`RotaryEmbedding` or None): Rope embedding to use. - cross_attention: Should be true when used as a cross attention. - All keys and values must be available at once, streaming is only for the queries. - Cannot be used with `causal` or `rope` (as it wouldn't make sens to - intepret the time steps in the keys relative to those in the queries). - safe_streaming (bool): Bug fix, will go away with xformers update. - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Sevice on which to initialize. - dtype (torch.dtype or None): dtype to use. - """ - def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False, - safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1, - device=None, dtype=None): - super().__init__() - factory_kwargs = {'device': device, 'dtype': dtype} - if past_context is not None: - assert causal - - self.embed_dim = embed_dim - self.causal = causal - self.past_context = past_context - self.memory_efficient = memory_efficient - self.attention_as_float32 = attention_as_float32 - self.rope = rope - self.cross_attention = cross_attention - self.safe_streaming = safe_streaming - self.num_heads = num_heads - self.dropout = dropout - self.kv_repeat = kv_repeat - if cross_attention: - assert not causal, "Causal cannot work with cross attention." - assert rope is None, "Rope cannot work with cross attention." - - if memory_efficient: - _verify_xformers_memory_efficient_compat() - - self.custom = _is_custom(custom, memory_efficient) - if self.custom: - out_dim = embed_dim - assert num_heads % kv_repeat == 0 - assert not cross_attention or kv_repeat == 1 - num_kv = num_heads // kv_repeat - kv_dim = (embed_dim // num_heads) * num_kv - out_dim += 2 * kv_dim - in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs) - # We try to follow the default PyTorch MHA convention, to easily compare results. - self.in_proj_weight = in_proj.weight - self.in_proj_bias = in_proj.bias - if bias: - self.in_proj_bias.data.zero_() # Following Pytorch convention - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) - if bias: - self.out_proj.bias.data.zero_() - else: - assert not qk_layer_norm - assert kv_repeat == 1 - self.mha = nn.MultiheadAttention( - embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True, - **factory_kwargs) - self.qk_layer_norm = qk_layer_norm - if qk_layer_norm: - assert self.custom - assert kv_repeat == 1 - ln_dim = embed_dim - self.q_layer_norm = nn.LayerNorm(ln_dim) - self.k_layer_norm = nn.LayerNorm(ln_dim) - - def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): - if not self.custom: - # Support compat with regular MHA - keys = [n for n, _ in self.mha.named_parameters()] - for key in keys: - if prefix + key in state_dict: - state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key) - super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) - - def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype): - # Return a causal mask, accounting for potentially stored past keys/values - # We actually return a bias for the attention score, as this has the same - # convention both in the builtin MHA in Pytorch, and Xformers functions. - time_dim = _get_attention_time_dimension() - if self.memory_efficient: - from xformers.ops import LowerTriangularMask - if current_steps == 1: - # If we only have one step, then we do not need a mask. - return None - elif 'past_keys' in self._streaming_state: - raise RuntimeError('Not supported at the moment') - else: - # Then we can safely use a lower triangular mask - return LowerTriangularMask() - if self._streaming_state: - past_keys = self._streaming_state['past_keys'] - past_steps = past_keys.shape[time_dim] - else: - past_steps = 0 - - queries_pos = torch.arange( - past_steps, current_steps + past_steps, device=device).view(-1, 1) - keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1) - delta = queries_pos - keys_pos - valid = delta >= 0 - if self.past_context is not None: - valid &= (delta <= self.past_context) - return torch.where( - valid, - torch.zeros([], device=device, dtype=dtype), - torch.full([], float('-inf'), device=device, dtype=dtype)) - - def _complete_kv(self, k, v): - time_dim = _get_attention_time_dimension() - if self.cross_attention: - # With cross attention we assume all keys and values - # are already available, and streaming is with respect - # to the queries only. - return k, v - # Complete the key/value pair using the streaming state. - if self._streaming_state: - pk = self._streaming_state['past_keys'] - nk = torch.cat([pk, k], dim=time_dim) - if v is k: - nv = nk - else: - pv = self._streaming_state['past_values'] - nv = torch.cat([pv, v], dim=time_dim) - else: - nk = k - nv = v - - assert nk.shape[time_dim] == nv.shape[time_dim] - offset = 0 - if self.past_context is not None: - offset = max(0, nk.shape[time_dim] - self.past_context) - if self._is_streaming: - self._streaming_state['past_keys'] = nk[:, offset:] - if v is not k: - self._streaming_state['past_values'] = nv[:, offset:] - if 'offset' in self._streaming_state: - self._streaming_state['offset'] += offset - else: - self._streaming_state['offset'] = torch.tensor(0) - return nk, nv - - def _apply_rope(self, query: torch.Tensor, key: torch.Tensor): - # TODO: fix and verify layout. - assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.' - # Apply rope embeddings to query and key tensors. - assert self.rope is not None - if 'past_keys' in self._streaming_state: - past_keys_offset = self._streaming_state['past_keys'].shape[1] - else: - past_keys_offset = 0 - if 'offset' in self._streaming_state: - past_context_offset = int(self._streaming_state['offset'].item()) - else: - past_context_offset = 0 - streaming_offset = past_context_offset + past_keys_offset - return self.rope.rotate_qk(query, key, start=streaming_offset) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, - key_padding_mask=None, need_weights=False, attn_mask=None, - average_attn_weights=True, is_causal=False): - assert attn_mask is None - assert not is_causal, ("new param added in torch 2.0.1 not supported, " - "use the causal args in the constructor.") - - time_dim = _get_attention_time_dimension() - if time_dim == 2: - layout = "b h t d" - else: - layout = "b t h d" - dtype = query.dtype - if self._is_streaming: - assert self.causal or self.cross_attention, \ - "Streaming only available for causal or cross attention" - - if self.causal: - # At the moment we specialize only for the self-attention case. - assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value" - assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value" - attn_mask = self._get_mask(query.shape[1], query.device, query.dtype) - - if self.custom: - # custom implementation - assert need_weights is False - assert key_padding_mask is None - if self.cross_attention: - # Different queries, keys, values, we have to spit manually the weights - # before applying the linear. - dim = self.in_proj_weight.shape[0] // 3 - if self.in_proj_bias is None: - bias_q, bias_k, bias_v = None, None, None - else: - bias_q = self.in_proj_bias[:dim] - bias_k = self.in_proj_bias[dim: 2 * dim] - bias_v = self.in_proj_bias[2 * dim:] - q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q) - # todo: when streaming, we could actually save k, v and check the shape actually match. - k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k) - v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v) - if self.qk_layer_norm is True: - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]] - else: - if not _is_profiled(): - # profiling breaks that propertysomehow. - assert query is key, "specialized implementation" - assert value is key, "specialized implementation" - projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias) - if self.kv_repeat == 1: - if time_dim == 2: - bound_layout = "b h p t d" - else: - bound_layout = "b t p h d" - packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads) - q, k, v = ops.unbind(packed, dim=2) - else: - embed_dim = self.embed_dim - per_head_dim = (embed_dim // self.num_heads) - kv_heads = self.num_heads // self.kv_repeat - q = projected[:, :, :embed_dim] - start = embed_dim - end = start + per_head_dim * kv_heads - k = projected[:, :, start: end] - v = projected[:, :, end:] - q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads) - k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads) - v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads) - - if self.qk_layer_norm is True: - assert self.kv_repeat == 1 - q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]] - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]] - if self.rope: - q, k = self._apply_rope(q, k) - k, v = self._complete_kv(k, v) - if self.kv_repeat > 1: - k = expand_repeated_kv(k, self.kv_repeat) - v = expand_repeated_kv(v, self.kv_repeat) - if self.attention_as_float32: - q, k, v = [x.float() for x in [q, k, v]] - if self.memory_efficient: - p = self.dropout if self.training else 0 - if _efficient_attention_backend == 'torch': - x = torch.nn.functional.scaled_dot_product_attention( - q, k, v, is_causal=attn_mask is not None, dropout_p=p) - else: - x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p) - else: - # We include the dot product as float32, for consistency - # with the other implementations that include that step - # as part of the attention. Note that when using `autocast`, - # the einsums would be done as bfloat16, but the softmax - # would be done as bfloat16, so `attention_as_float32` will - # extend a bit the range of operations done in float32, - # although this should make no difference. - q = q / q.shape[-1] ** 0.5 - key_layout = layout.replace('t', 'k') - query_layout = layout - if self._is_streaming and self.safe_streaming and q.device.type == 'cuda': - with torch.autocast(device_type=q.device.type, dtype=torch.float32): - pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) - else: - pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) - if attn_mask is not None: - pre_w = pre_w + attn_mask - w = torch.softmax(pre_w, dim=-1) - w = F.dropout(w, self.dropout, training=self.training).to(v) - # Key and value have the same format. - x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v) - x = x.to(dtype) - x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads) - x = self.out_proj(x) - else: - key, value = self._complete_kv(key, value) - if self.attention_as_float32: - query, key, value = [x.float() for x in [query, key, value]] - x, _ = self.mha( - query, key, value, key_padding_mask, - need_weights, attn_mask, average_attn_weights) - x = x.to(dtype) - - return x, None - - -class StreamingTransformerLayer(nn.TransformerEncoderLayer): - """TransformerLayer with Streaming / Causal support. - This also integrates cross_attention, when passing `cross_attention=True`, - rather than having two separate classes like in PyTorch. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention. - qk_layer_norm_cross (bool): Same for the cross attention. - cross_attention (bool): If True, expect to get secondary input for cross-attention. - Cross attention will use the default MHA, as it typically won't require - special treatment. - layer_scale (float or None): If not None, LayerScale will be used with - the given value as initial scale. - rope (`RotaryEmbedding` or None): Rope embedding to use. - attention_dropout (float or None): If not None, separate the value of the dimension dropout - in FFN and of the attention dropout. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1, - bias_ff: bool = True, bias_attn: bool = True, causal: bool = False, - past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None, - kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs): - super().__init__(d_model, num_heads, dim_feedforward, dropout, - device=device, dtype=dtype, batch_first=True, **kwargs) - factory_kwargs = {'device': device, 'dtype': dtype} - # Redefine self_attn to our streaming multi-head attention - attn_kwargs: tp.Dict[str, tp.Any] = { - 'embed_dim': d_model, - 'num_heads': num_heads, - 'dropout': dropout if attention_dropout is None else attention_dropout, - 'bias': bias_attn, - 'custom': custom, - 'memory_efficient': memory_efficient, - 'attention_as_float32': attention_as_float32, - } - self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention( - causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm, - kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore - # Redefine feedforward layers to expose bias parameter - self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs) - self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs) - - self.layer_scale_1: nn.Module - self.layer_scale_2: nn.Module - if layer_scale is None: - self.layer_scale_1 = nn.Identity() - self.layer_scale_2 = nn.Identity() - else: - self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs) - self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs) - - self.cross_attention: tp.Optional[nn.Module] = None - if cross_attention: - self.cross_attention = StreamingMultiheadAttention( - cross_attention=True, qk_layer_norm=qk_layer_norm_cross, - **attn_kwargs, **factory_kwargs) - # Norm and dropout - self.dropout_cross = nn.Dropout(dropout) - # eps value matching that used in PyTorch reference implementation. - self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs) - self.layer_scale_cross: nn.Module - if layer_scale is None: - self.layer_scale_cross = nn.Identity() - else: - self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs) - self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - - def _cross_attention_block(self, src: torch.Tensor, - cross_attention_src: torch.Tensor) -> torch.Tensor: - assert self.cross_attention is not None - # queries are from src, keys and values from cross_attention_src. - x = self.cross_attention( - src, cross_attention_src, cross_attention_src, need_weights=False)[0] - return self.dropout_cross(x) # type: ignore - - def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore - src_key_padding_mask: tp.Optional[torch.Tensor] = None, - cross_attention_src: tp.Optional[torch.Tensor] = None): - if self.cross_attention is None: - assert cross_attention_src is None - else: - assert cross_attention_src is not None - x = src - if self.norm_first: - x = x + self.layer_scale_1( - self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)) - if cross_attention_src is not None: - x = x + self.layer_scale_cross( - self._cross_attention_block( - self.norm_cross(x), cross_attention_src)) - x = x + self.layer_scale_2(self._ff_block(self.norm2(x))) - else: - x = self.norm1(x + self.layer_scale_1( - self._sa_block(x, src_mask, src_key_padding_mask))) - if cross_attention_src is not None: - x = self.norm_cross( - x + self.layer_scale_cross( - self._cross_attention_block(src, cross_attention_src))) - x = self.norm2(x + self.layer_scale_2(self._ff_block(x))) - return x - - -class StreamingTransformer(StreamingModule): - """Transformer with Streaming / Causal support. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - cross_attention (bool): If True, expect to get secondary input for cross-attention. - layer_scale (float or None): If not None, LayerScale will be used - with the given value as initial scale. - positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope). - max_period (float): Maximum period of the time embedding. - positional_scale (float): Scale of positional embedding, set to 0 to deactivate. - xpos (bool): Apply xpos exponential decay to positional embedding (rope only). - lr (float or None): learning rate override through the `make_optim_group` API. - weight_decay (float or None): Weight_decay override through the `make_optim_group` API. - layer_class: (subclass of `StreamingTransformerLayer): class to use - to initialize the layers, allowing further customization outside of Audiocraft. - checkpointing (str): Checkpointing strategy to reduce memory usage. - No checkpointing if set to 'none'. Per layer checkpointing using PyTorch - if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice, - minimal memory usage, but maximal runtime). Finally, `xformers_default` provide - a policy for opting-out some operations of the checkpointing like - linear layers and attention, providing a middle ground between speed and memory. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048, - dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, - custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1., - xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None, - layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer, - checkpointing: str = 'none', device=None, dtype=None, **kwargs): - super().__init__() - assert d_model % num_heads == 0 - - self.positional_embedding = positional_embedding - self.max_period = max_period - self.positional_scale = positional_scale - self.weight_decay = weight_decay - self.lr = lr - - assert positional_embedding in ['sin', 'rope', 'sin_rope'] - self.rope: tp.Optional[RotaryEmbedding] = None - if self.positional_embedding in ['rope', 'sin_rope']: - assert _is_custom(custom, memory_efficient) - self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period, - xpos=xpos, scale=positional_scale, device=device) - - self.checkpointing = checkpointing - - assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm'] - if self.checkpointing.startswith('xformers'): - _verify_xformers_internal_compat() - - self.layers = nn.ModuleList() - for idx in range(num_layers): - self.layers.append( - layer_class( - d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward, - dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn, - causal=causal, past_context=past_context, custom=custom, - memory_efficient=memory_efficient, attention_as_float32=attention_as_float32, - cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope, - device=device, dtype=dtype, **kwargs)) - - if self.checkpointing != 'none': - for layer in self.layers: - # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the - # backward hook inside of FSDP... - layer._magma_checkpointed = True # type: ignore - assert layer.layer_drop == 0., "Need further checking" # type: ignore - - def _apply_layer(self, layer, *args, **kwargs): - method = self.checkpointing - if method == 'none': - return layer(*args, **kwargs) - elif method == 'torch': - return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs) - elif method.startswith('xformers'): - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy - if method == 'xformers_default': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "xformers.efficient_attention_forward_cutlass.default", - "xformers_flash.flash_fwd.default", - "aten.addmm.default", - "aten.mm.default", - ] - elif method == 'xformers_mm': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "aten.addmm.default", - "aten.mm.default", - ] - else: - raise ValueError(f"xformers checkpointing xformers policy {method} is not known.") - policy_fn = _get_default_policy(allow_list) - return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs) - else: - raise ValueError(f"Checkpointing method {method} is unknown.") - - def forward(self, x: torch.Tensor, *args, **kwargs): - B, T, C = x.shape - - if 'offsets' in self._streaming_state: - offsets = self._streaming_state['offsets'] - else: - offsets = torch.zeros(B, dtype=torch.long, device=x.device) - - if self.positional_embedding in ['sin', 'sin_rope']: - positions = torch.arange(T, device=x.device).view(1, -1, 1) - positions = positions + offsets.view(-1, 1, 1) - pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype) - x = x + self.positional_scale * pos_emb - - for layer in self.layers: - x = self._apply_layer(layer, x, *args, **kwargs) - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return x - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - if self.weight_decay is not None: - group["weight_decay"] = self.weight_decay - return group - - -# special attention attention related function - -def _verify_xformers_memory_efficient_compat(): - try: - from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa - except ImportError: - raise ImportError( - "xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _verify_xformers_internal_compat(): - try: - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa - except ImportError: - raise ImportError( - "Francisco's fairinternal xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _is_custom(custom: bool, memory_efficient: bool): - return custom or memory_efficient diff --git a/spaces/Dimalker/Faceswapper/roop/processors/frame/face_swapper.py b/spaces/Dimalker/Faceswapper/roop/processors/frame/face_swapper.py deleted file mode 100644 index 8e61036a11bf9ae68bfc8eb07fe3e035731f31c0..0000000000000000000000000000000000000000 --- a/spaces/Dimalker/Faceswapper/roop/processors/frame/face_swapper.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import insightface -import threading - -import roop.globals -import roop.processors.frame.core -from roop.core import update_status -from roop.face_analyser import get_one_face, get_many_faces -from roop.typing import Face, Frame -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FACE_SWAPPER = None -THREAD_LOCK = threading.Lock() -NAME = 'ROOP.FACE-SWAPPER' - - -def get_face_swapper() -> Any: - global FACE_SWAPPER - - with THREAD_LOCK: - if FACE_SWAPPER is None: - model_path = resolve_relative_path('../models/inswapper_128.onnx') - FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers) - return FACE_SWAPPER - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/ashleykleynhans/inswapper/resolve/main/inswapper_128.onnx']) - return True - - -def pre_start() -> bool: - if not is_image(roop.globals.source_path): - update_status('Select an image for source path.', NAME) - return False - elif not get_one_face(cv2.imread(roop.globals.source_path)): - update_status('No face in source path detected.', NAME) - return False - if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path): - update_status('Select an image or video for target path.', NAME) - return False - return True - - -def post_process() -> None: - global FACE_SWAPPER - - FACE_SWAPPER = None - - -def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: - return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True) - - -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - if roop.globals.many_faces: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = swap_face(source_face, target_face, temp_frame) - else: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = swap_face(source_face, target_face, temp_frame) - return temp_frame - - -def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - source_face = get_one_face(cv2.imread(source_path)) - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result = process_frame(source_face, temp_frame) - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_path: str, target_path: str, output_path: str) -> None: - source_face = get_one_face(cv2.imread(source_path)) - target_frame = cv2.imread(target_path) - result = process_frame(source_face, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path: str, temp_frame_paths: List[str]) -> None: - roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames) diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/_internal/run.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/_internal/run.py deleted file mode 100644 index 18f830d81ead15fece09382cc30654fb89d14d1b..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/_internal/run.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Helper for launching run functions in computing clusters. - -During the submit process, this file is copied to the appropriate run dir. -When the job is launched in the cluster, this module is the first thing that -is run inside the docker container. -""" - -import os -import pickle -import sys - -# PYTHONPATH should have been set so that the run_dir/src is in it -import dnnlib - -def main(): - if not len(sys.argv) >= 4: - raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!") - - run_dir = str(sys.argv[1]) - task_name = str(sys.argv[2]) - host_name = str(sys.argv[3]) - - submit_config_path = os.path.join(run_dir, "submit_config.pkl") - - # SubmitConfig should have been pickled to the run dir - if not os.path.exists(submit_config_path): - raise RuntimeError("SubmitConfig pickle file does not exist!") - - submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb")) - dnnlib.submission.submit.set_user_name_override(submit_config.user_name) - - submit_config.task_name = task_name - submit_config.host_name = host_name - - dnnlib.submission.submit.run_wrapper(submit_config) - -if __name__ == "__main__": - main() diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/op/upfirdn2d.py b/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 02fc25af780868d9b883631eb6b03a25c225d745..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.nn import functional as F - - -module_path = os.path.dirname(__file__) - - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) \ No newline at end of file diff --git a/spaces/ECCV2022/bytetrack/tools/mix_data_ablation.py b/spaces/ECCV2022/bytetrack/tools/mix_data_ablation.py deleted file mode 100644 index b830c691ce52756aac2a8569829297b37ec5147d..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tools/mix_data_ablation.py +++ /dev/null @@ -1,93 +0,0 @@ -import json -import os - - -""" -cd datasets -mkdir -p mix_mot_ch/annotations -cp mot/annotations/val_half.json mix_mot_ch/annotations/val_half.json -cp mot/annotations/test.json mix_mot_ch/annotations/test.json -cd mix_mot_ch -ln -s ../mot/train mot_train -ln -s ../crowdhuman/CrowdHuman_train crowdhuman_train -ln -s ../crowdhuman/CrowdHuman_val crowdhuman_val -cd .. -""" - -mot_json = json.load(open('datasets/mot/annotations/train_half.json','r')) - -img_list = list() -for img in mot_json['images']: - img['file_name'] = 'mot_train/' + img['file_name'] - img_list.append(img) - -ann_list = list() -for ann in mot_json['annotations']: - ann_list.append(ann) - -video_list = mot_json['videos'] -category_list = mot_json['categories'] - -print('mot17') - -max_img = 10000 -max_ann = 2000000 -max_video = 10 - -crowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r')) -img_id_count = 0 -for img in crowdhuman_json['images']: - img_id_count += 1 - img['file_name'] = 'crowdhuman_train/' + img['file_name'] - img['frame_id'] = img_id_count - img['prev_image_id'] = img['id'] + max_img - img['next_image_id'] = img['id'] + max_img - img['id'] = img['id'] + max_img - img['video_id'] = max_video - img_list.append(img) - -for ann in crowdhuman_json['annotations']: - ann['id'] = ann['id'] + max_ann - ann['image_id'] = ann['image_id'] + max_img - ann_list.append(ann) - -video_list.append({ - 'id': max_video, - 'file_name': 'crowdhuman_train' -}) - -print('crowdhuman_train') - -max_img = 30000 -max_ann = 10000000 - -crowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r')) -img_id_count = 0 -for img in crowdhuman_val_json['images']: - img_id_count += 1 - img['file_name'] = 'crowdhuman_val/' + img['file_name'] - img['frame_id'] = img_id_count - img['prev_image_id'] = img['id'] + max_img - img['next_image_id'] = img['id'] + max_img - img['id'] = img['id'] + max_img - img['video_id'] = max_video - img_list.append(img) - -for ann in crowdhuman_val_json['annotations']: - ann['id'] = ann['id'] + max_ann - ann['image_id'] = ann['image_id'] + max_img - ann_list.append(ann) - -video_list.append({ - 'id': max_video, - 'file_name': 'crowdhuman_val' -}) - -print('crowdhuman_val') - -mix_json = dict() -mix_json['images'] = img_list -mix_json['annotations'] = ann_list -mix_json['videos'] = video_list -mix_json['categories'] = category_list -json.dump(mix_json, open('datasets/mix_mot_ch/annotations/train.json','w')) \ No newline at end of file diff --git a/spaces/EDGAhab/VITS-Aatrox-AI/monotonic_align/setup.py b/spaces/EDGAhab/VITS-Aatrox-AI/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/VITS-Aatrox-AI/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/Egrt/MaskGAN/README.md b/spaces/Egrt/MaskGAN/README.md deleted file mode 100644 index e81b6da7915db54a98d4d8587c3453e7b1b0b2fd..0000000000000000000000000000000000000000 --- a/spaces/Egrt/MaskGAN/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MaskGAN -emoji: 💩 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 2.9.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/FathomNet/MBARI_Monterey_Bay_Benthic/Dockerfile b/spaces/FathomNet/MBARI_Monterey_Bay_Benthic/Dockerfile deleted file mode 100644 index 22c01cfd1445efa9080ad37e6fb501d2f8484946..0000000000000000000000000000000000000000 --- a/spaces/FathomNet/MBARI_Monterey_Bay_Benthic/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM python:3.7 - -RUN apt-get update \ - && apt-get install ffmpeg libsm6 libxext6 -y - -RUN pip install yolov5 tator gradio - -COPY . ./ - -CMD [ "python", "-u", "./tator_inference.py" ] \ No newline at end of file diff --git a/spaces/Felladrin/MiniSearch/src/components/SearchResultsList.tsx b/spaces/Felladrin/MiniSearch/src/components/SearchResultsList.tsx deleted file mode 100644 index 10078f87f839da5ddb8be5fc7f502be80417ca31..0000000000000000000000000000000000000000 --- a/spaces/Felladrin/MiniSearch/src/components/SearchResultsList.tsx +++ /dev/null @@ -1,40 +0,0 @@ -import { SearchResults } from "../modules/search"; -import { Tooltip } from "react-tooltip"; - -export function SearchResultsList({ - searchResults, - urlsDescriptions, -}: { - searchResults: SearchResults; - urlsDescriptions: Record; -}) { - return ( -
      - {searchResults.map(([title, snippet, url], index) => ( -
    • - - {snippet} -
      -
      - {url} -
      - - {title} - - {urlsDescriptions[url] && ( - <> -
      -
      {urlsDescriptions[url]}
      - - )} -
    • - ))} -
    - ); -} diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Zeabur.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Zeabur.py deleted file mode 100644 index e412720bd9a0c88860f6ea8a657cb0a24bcce63f..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Zeabur.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import requests -from ...typing import sha256, Dict, get_type_hints - -url = "https://gptleg.zeabur.app" -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301', - 'gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-0613'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - headers = { - 'Authority': 'chat.dfehub.com', - 'Content-Type': 'application/json', - 'Method': 'POST', - 'Path': '/api/openai/v1/chat/completions', - 'Scheme': 'https', - 'Accept': 'text/event-stream', - 'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5', - 'Content-Type': 'application/json', - 'Origin': 'https://gptleg.zeabur.app', - 'Referer': 'https://gptleg.zeabur.app/', - 'Sec-Ch-Ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'Sec-Ch-Ua-Mobile': '?0', - 'Sec-Ch-Ua-Platform': '"Windows"', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - 'X-Requested-With': 'XMLHttpRequest', - } - - data = { - 'model': model, - 'temperature': 0.7, - 'max_tokens': '16000', - 'presence_penalty': 0, - 'messages': messages, - } - - response = requests.post(url + '/api/openai/v1/chat/completions', - headers=headers, json=data, stream=stream) - - yield response.json()['choices'][0]['message']['content'] - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/utils.py b/spaces/FrankZxShen/vits-fast-finetuning-umamusume/utils.py deleted file mode 100644 index a91f9eb2df9f2b097431432753212eb440f93020..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/utils.py +++ /dev/null @@ -1,399 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch -import regex as re - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - - -zh_pattern = re.compile(r'[\u4e00-\u9fa5]') -en_pattern = re.compile(r'[a-zA-Z]') -jp_pattern = re.compile(r'[\u3040-\u30ff\u31f0-\u31ff]') -kr_pattern = re.compile(r'[\uac00-\ud7af\u1100-\u11ff\u3130-\u318f\ua960-\ua97f]') -num_pattern=re.compile(r'[0-9]') -comma=r"(?<=[.。!!??;;,,、::'\"‘“”’()()《》「」~——])" #向前匹配但固定长度 -tags={'ZH':'[ZH]','EN':'[EN]','JP':'[JA]','KR':'[KR]'} - -def tag_cjke(text): - '''为中英日韩加tag,中日正则分不开,故先分句分离中日再识别,以应对大部分情况''' - sentences = re.split(r"([.。!!??;;,,、::'\"‘“”’()()【】《》「」~——]+ *(?![0-9]))", text) #分句,排除小数点 - sentences.append("") - sentences = ["".join(i) for i in zip(sentences[0::2],sentences[1::2])] - # print(sentences) - prev_lang=None - tagged_text = "" - for s in sentences: - #全为符号跳过 - nu = re.sub(r'[\s\p{P}]+', '', s, flags=re.U).strip() - if len(nu)==0: - continue - s = re.sub(r'[()()《》「」【】‘“”’]+', '', s) - jp=re.findall(jp_pattern, s) - #本句含日语字符判断为日语 - if len(jp)>0: - prev_lang,tagged_jke=tag_jke(s,prev_lang) - tagged_text +=tagged_jke - else: - prev_lang,tagged_cke=tag_cke(s,prev_lang) - tagged_text +=tagged_cke - return tagged_text - -def tag_jke(text,prev_sentence=None): - '''为英日韩加tag''' - # 初始化标记变量 - tagged_text = "" - prev_lang = None - tagged=0 - # 遍历文本 - for char in text: - # 判断当前字符属于哪种语言 - if jp_pattern.match(char): - lang = "JP" - elif zh_pattern.match(char): - lang = "JP" - elif kr_pattern.match(char): - lang = "KR" - elif en_pattern.match(char): - lang = "EN" - # elif num_pattern.match(char): - # lang = prev_sentence - else: - lang = None - tagged_text += char - continue - # 如果当前语言与上一个语言不同,就添加标记 - if lang != prev_lang: - tagged=1 - if prev_lang==None: # 开头 - tagged_text =tags[lang]+tagged_text - else: - tagged_text =tagged_text+tags[prev_lang]+tags[lang] - - # 重置标记变量 - prev_lang = lang - - # 添加当前字符到标记文本中 - tagged_text += char - - # 在最后一个语言的结尾添加对应的标记 - if prev_lang: - tagged_text += tags[prev_lang] - if not tagged: - prev_lang=prev_sentence - tagged_text =tags[prev_lang]+tagged_text+tags[prev_lang] - - return prev_lang,tagged_text - -def tag_cke(text,prev_sentence=None): - '''为中英韩加tag''' - # 初始化标记变量 - tagged_text = "" - prev_lang = None - # 是否全略过未标签 - tagged=0 - - # 遍历文本 - for char in text: - # 判断当前字符属于哪种语言 - if zh_pattern.match(char): - lang = "ZH" - elif kr_pattern.match(char): - lang = "KR" - elif en_pattern.match(char): - lang = "EN" - # elif num_pattern.match(char): - # lang = prev_sentence - else: - # 略过 - lang = None - tagged_text += char - continue - - # 如果当前语言与上一个语言不同,添加标记 - if lang != prev_lang: - tagged=1 - if prev_lang==None: # 开头 - tagged_text =tags[lang]+tagged_text - else: - tagged_text =tagged_text+tags[prev_lang]+tags[lang] - - # 重置标记变量 - prev_lang = lang - - # 添加当前字符到标记文本中 - tagged_text += char - - # 在最后一个语言的结尾添加对应的标记 - if prev_lang: - tagged_text += tags[prev_lang] - # 未标签则继承上一句标签 - if tagged==0: - prev_lang=prev_sentence - tagged_text =tags[prev_lang]+tagged_text+tags[prev_lang] - return prev_lang,tagged_text - - -def load_checkpoint(checkpoint_path, model, optimizer=None, drop_speaker_emb=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - if k == 'emb_g.weight': - if drop_speaker_emb: - new_state_dict[k] = v - continue - v[:saved_state_dict[k].shape[0], :] = saved_state_dict[k] - new_state_dict[k] = v - else: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict() if optimizer is not None else None, - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/modified_finetune_speaker.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="pretrained_models", - help='Model name') - parser.add_argument('-n', '--max_epochs', type=int, default=50, - help='finetune epochs') - parser.add_argument('--drop_speaker_embed', type=bool, default=False, help='whether to drop existing characters') - - args = parser.parse_args() - model_dir = os.path.join("./", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.max_epochs = args.max_epochs - hparams.drop_speaker_embed = args.drop_speaker_embed - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() \ No newline at end of file diff --git a/spaces/FridaZuley/RVC_HFKawaii/utils/backups.py b/spaces/FridaZuley/RVC_HFKawaii/utils/backups.py deleted file mode 100644 index b814f8184792e80e2324685436053d61487110b1..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/utils/backups.py +++ /dev/null @@ -1,141 +0,0 @@ -import os -import shutil -import hashlib -import time -import base64 - - - - -LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' -WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' -GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' - -def import_google_drive_backup(): - print("Importing Google Drive backup...") - weights_exist = False - for root, dirs, files in os.walk(GOOGLE_DRIVE_PATH): - for filename in files: - filepath = os.path.join(root, filename) - if os.path.isfile(filepath) and not filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')): - backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - shutil.copy2(filepath, backup_filepath) # copy file with metadata - print(f'Imported file from Google Drive backup: {filename}') - elif filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')) and filename.endswith('.pth'): - weights_exist = True - weights_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, os.path.join(GOOGLE_DRIVE_PATH, 'weights'))) - weights_folderpath = os.path.dirname(weights_filepath) - if not os.path.exists(weights_folderpath): - os.makedirs(weights_folderpath) - print(f'Created weights folder: {weights_folderpath}', flush=True) - shutil.copy2(filepath, weights_filepath) # copy file with metadata - print(f'Imported file from weights: {filename}') - if weights_exist: - print("Copied weights from Google Drive backup to local weights folder.") - else: - print("No weights found in Google Drive backup.") - print("Google Drive backup import completed.") - -def get_md5_hash(file_path): - hash_md5 = hashlib.md5() - with open(file_path, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - hash_md5.update(chunk) - return hash_md5.hexdigest() - -def copy_weights_folder_to_drive(): - destination_folder = os.path.join(GOOGLE_DRIVE_PATH, 'weights') - try: - if not os.path.exists(destination_folder): - os.makedirs(destination_folder) - - num_copied = 0 - for filename in os.listdir(WEIGHTS_FOLDER): - if filename.endswith('.pth'): - source_file = os.path.join(WEIGHTS_FOLDER, filename) - destination_file = os.path.join(destination_folder, filename) - if not os.path.exists(destination_file): - shutil.copy2(source_file, destination_file) - num_copied += 1 - print(f"Copied {filename} to Google Drive!") - - if num_copied == 0: - print("No new finished models found for copying.") - else: - print(f"Finished copying {num_copied} files to Google Drive!") - - except Exception as e: - print(f"An error occurred while copying weights: {str(e)}") - # You can log the error or take appropriate actions here. - -def backup_files(): - print("\nStarting backup loop...") - last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt') - fully_updated = False # boolean to track if all files are up to date - - while True: - try: - updated = False # flag to check if any files were updated - last_backup_timestamps = {} - - try: - with open(last_backup_timestamps_path, 'r') as f: - last_backup_timestamps = dict(line.strip().split(':') for line in f) - except FileNotFoundError: - pass # File does not exist yet, which is fine - - for root, dirs, files in os.walk(LOGS_FOLDER): - for filename in files: - if filename != 'last_backup_timestamps.txt': - filepath = os.path.join(root, filename) - if os.path.isfile(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - # check if file has changed since last backup - last_backup_timestamp = last_backup_timestamps.get(filepath) - current_timestamp = os.path.getmtime(filepath) - if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp: - shutil.copy2(filepath, backup_filepath) # copy file with metadata - last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp - if last_backup_timestamp is None: - print(f'Backed up file: {filename}') - else: - print(f'Updating backed up file: {filename}') - updated = True - fully_updated = False # if a file is updated, all files are not up to date - - # check if any files were deleted in Colab and delete them from the backup drive - for filepath in list(last_backup_timestamps.keys()): - if not os.path.exists(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - if os.path.exists(backup_filepath): - os.remove(backup_filepath) - print(f'Deleted file: {filepath}') - del last_backup_timestamps[filepath] - updated = True - fully_updated = False # if a file is deleted, all files are not up to date - - if not updated and not fully_updated: - print("Files are up to date.") - fully_updated = True # if all files are up to date, set the boolean to True - copy_weights_folder_to_drive() - sleep_time = 15 - else: - sleep_time = 0.1 - - with open(last_backup_timestamps_path, 'w') as f: - for filepath, timestamp in last_backup_timestamps.items(): - f.write(f'{filepath}:{timestamp}\n') - - time.sleep(sleep_time) # wait for 15 seconds before checking again, or 0.1s if not fully up to date to speed up backups - - except Exception as e: - print(f"An error occurred: {str(e)}") - # You can log the error or take appropriate actions here. diff --git a/spaces/Friklogff/xx-xhai/README.md b/spaces/Friklogff/xx-xhai/README.md deleted file mode 100644 index 734200bf1598bd3653f695602b6e18c5695338cf..0000000000000000000000000000000000000000 --- a/spaces/Friklogff/xx-xhai/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Xx Xhai -emoji: 🐠 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GAIR/Factool/test/test.py b/spaces/GAIR/Factool/test/test.py deleted file mode 100644 index 1ed51c1fdec2a5b028fc57c774fdf7c6abeaf7b6..0000000000000000000000000000000000000000 --- a/spaces/GAIR/Factool/test/test.py +++ /dev/null @@ -1,23 +0,0 @@ -from factool import Factool - -# Initialize a Factool instance with the specified keys. foundation_model could be either "gpt-3.5-turbo" or "gpt-4" -factool_instance = Factool("gpt-4") - -inputs = [ - { -# "prompt": "在三亚市公园中,无障碍设施建设率必须超过百分之九十,但不必是百分之一百,这对吗", -# "response": "根据您提供的信息,我理解为在三亚市的公园中,无障碍设施的建设率必须超过90%,而不一定需要达到100%。这意味着至少90%的公园区域必须提供无障碍设施,以方便身体有特殊需求的人士,但并不要求每个区域都必须设置无障碍设施。\ -# 对于这样的规定,是否合理取决于具体情况。如果三亚市的公园面积广阔,并且无障碍设施的建设需要一定的时间和成本,那么要求超过90%的建设率可能是一种妥协和合理的规定。这样可以确保大部分区域对残障人士开放,同时给予公园管理部门一定的灵活性。\ -# 然而,也有可能有人认为100%的无障碍设施建设率更为理想,因为这样可以确保每个人都能无障碍地进入所有区域。无障碍设施的建设是为了保障社会包容性和公平性,使得每个人都能平等地享受公共场所。\ -# 总之,无障碍设施建设率超过90%的规定可能是在权衡各种因素后做出的决策,但是否对所有人都合理,还需根据具体情况和不同观点进行综合评估。", - "prompt": "Introduce Graham Neubig", - "response": "Graham Neubig is a professor at MIT", - "category": "kbqa", - "search_type": "online", - #"data_link": "/home/bjwang/projects/factool_dev/test/三亚市公园条例.jsonl", - #"embedding_link": "/home/bjwang/projects/factool_dev/test/三亚市公园条例_embed.jsonl" - }, -] -response_list = factool_instance.run(inputs) - -print(response_list) \ No newline at end of file diff --git a/spaces/GIZ/vulnerability_analysis/style.css b/spaces/GIZ/vulnerability_analysis/style.css deleted file mode 100644 index d253aae92904c6bdac5075f82745368788a69d97..0000000000000000000000000000000000000000 --- a/spaces/GIZ/vulnerability_analysis/style.css +++ /dev/null @@ -1,179 +0,0 @@ - -.row-widget.stTextInput > div:first-of-type { - background: #fff; - display: flex; - border: 1px solid #dfe1e5; - box-shadow: none; - border-radius: 24px; - height: 50px; - width: auto; - margin: 10px auto 30px; -} - -.row-widget.stTextInput > div:first-of-type:hover, -.row-widget.stTextInput > div:first-of-type:focus { - box-shadow: 1px 1px 2px 1px rgba(0, 0, 0, 0.2); -} - -.row-widget.stTextInput .st-bq { - background-color: #fff; -} - -.row-widget.stTextInput > label { - color: #b3b3b3; -} - -.row-widget.stButton > button { - border-radius: 24px; - background-color: #B6C9B1; - color: #fff; - border: none; - padding: 6px 20px; - float: right; - background-image: none; -} - -.row-widget.stButton > button:hover { - box-shadow: 1px 1px 2px 1px rgba(0, 0, 0, 0.2); -} - -.row-widget.stButton > button:focus { - border: none; - color: #fff; -} - -.footer-custom { - position: fixed; - bottom: 0; - width: 100%; - color: var(--text-color); - max-width: 698px; - font-size: 14px; - height: 50px; - padding: 10px 0; - z-index: 50; -} - -.main { - padding: 20px; -} - -footer { - display: none !important; -} - -.footer-custom a { - color: var(--text-color); -} - -#wikipedia-assistant { - font-size: 36px; -} - -.generated-answer p { - font-size: 16px; - font-weight: bold; -} - -.react-json-view { - margin: 40px 0 80px; -} - -.tooltip { - text-align: center; - line-height: 20px; - display: table-caption; - font-size: 10px; - border-radius: 50%; - height: 20px; - width: 20px; - position: relative; - cursor: pointer; - color:#000; -} - -.tooltip .tooltiptext { - visibility: hidden; - width: 280px; - text-align: center; - border-radius: 6px; - padding: 10px; - position: absolute; - z-index: 1; - top: 25px; - left: 50%; - margin-left: -140px; - font-size: 14px; - background-color: #fff; - border: 1px solid #ccc; - box-shadow: 0px 0px 3px 1px rgba(0, 0, 0, 0.16); - color: #000; -} - -.tooltip:hover .tooltiptext { - visibility: visible; -} - -.sentence-wrapper { - border-left: 4px solid #ffc423; - padding-left: 20px; - margin-bottom: 40px; -} - -#context { - padding: 2rem 0 1rem; -} - -hr { - margin: 2em 0 1em; -} - - -.technical-details-info { - margin-bottom: 100px; -} - -.loader-wrapper { - display: flex; - align-items: center; - background-color: rgba(250, 202, 43, 0.2); - padding: 15px 20px; - border-radius: 6px; -} - -.loader-wrapper p { - margin-bottom: 0; - margin-left: 20px; -} - -.loader { - width: 30px; - height: 30px; - border: dotted 5px #868686; - border-radius: 100%; - animation: spin 1s linear infinite; -} - -.loader-note { - font-size: 14px; - color: #b3b3b3; - margin-left: 5px; -} - -@keyframes spin { - 0% { - transform: rotate(0deg) scale(0.8); - border-top-color: transparent; - border-right-color: transparent; - } - 50% { transform: rotate(180deg) scale(1.2); - border-color: #949494; - border-top-color: transparent; - border-right-color: transparent; - } - 100% { transform: rotate(360deg) scale(0.8); - border-color: #bbbbbb; - border-top-color: transparent; - border-right-color: transparent; - } -} diff --git a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py deleted file mode 100644 index a89a81f5c76586d6d1b15abf74f3740e9f439762..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='InstaBoost', - action_candidate=('normal', 'horizontal', 'skip'), - action_prob=(1, 0, 0), - scale=(0.8, 1.2), - dx=15, - dy=15, - theta=(-1, 1), - color_prob=0.5, - hflag=False, - aug_ratio=0.5), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[32, 44]) -runner = dict(type='EpochBasedRunner', max_epochs=48) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py deleted file mode 100644 index 5ed26504af131f3806426fcbd343bb7c4c9e229c..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/cc_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/cc_head.py deleted file mode 100644 index 95c2706a5d4d7877895146c5ebf4396a78d29a8d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/cc_head.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch - -from ..builder import HEADS -from .fcn_head import FCNHead - -try: - from mmcv.ops import CrissCrossAttention -except ModuleNotFoundError: - CrissCrossAttention = None - - -@HEADS.register_module() -class CCHead(FCNHead): - """CCNet: Criss-Cross Attention for Semantic Segmentation. - - This head is the implementation of `CCNet - `_. - - Args: - recurrence (int): Number of recurrence of Criss Cross Attention - module. Default: 2. - """ - - def __init__(self, recurrence=2, **kwargs): - if CrissCrossAttention is None: - raise RuntimeError('Please install mmcv-full for ' - 'CrissCrossAttention ops') - super(CCHead, self).__init__(num_convs=2, **kwargs) - self.recurrence = recurrence - self.cca = CrissCrossAttention(self.channels) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - for _ in range(self.recurrence): - output = self.cca(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py deleted file mode 100644 index b74bdfd456f9b7c546ce528173c77431b4f57ac1..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.tasks.translation import TranslationTask -from fairseq.tasks.language_modeling import LanguageModelingTask -from fairseq import checkpoint_utils -import argparse -from fairseq.tasks import register_task -import torch - - -@register_task("noisy_channel_translation") -class NoisyChannelTranslation(TranslationTask): - """ - Rescore the top k candidates from each beam using noisy channel modeling - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - TranslationTask.add_args(parser) - # fmt: off - parser.add_argument('--channel-model', metavar='FILE', - help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.') - parser.add_argument('--combine-method', default='lm_only', - choices=['lm_only', 'noisy_channel'], - help="""method for combining direct and channel model scores. - lm_only: decode with P(T|S)P(T) - noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""") - parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False, - help='normalize lm score by target length instead of source length') - parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'], - help="Normalize bw scores with log softmax or return bw scores without log softmax") - parser.add_argument('--top-k-vocab', default=0, type=int, - help='top k vocab IDs to use with `src_vocab` in channel model scoring') - parser.add_argument('--k2', default=50, type=int, - help='the top k2 candidates to rescore with the noisy channel model for each beam') - parser.add_argument('--ch-wt', default=1, type=float, - help='weight for the channel model') - parser.add_argument('--lm-model', metavar='FILE', - help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side') - parser.add_argument('--lm-data', metavar='FILE', - help='path to lm model training data for target language, used to properly load LM with correct dictionary') - parser.add_argument('--lm-wt', default=1, type=float, - help='the weight of the lm in joint decoding') - # fmt: on - - def build_generator( - self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None - ): - if getattr(args, "score_reference", False): - raise NotImplementedError() - else: - from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator - use_cuda = torch.cuda.is_available() and not self.args.cpu - assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!' - assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs' - if self.args.channel_model is not None: - import copy - ch_args_task = copy.deepcopy(self.args) - tmp = ch_args_task.source_lang - ch_args_task.source_lang = ch_args_task.target_lang - ch_args_task.target_lang = tmp - ch_args_task._name = 'translation' - channel_task = TranslationTask.setup_task(ch_args_task) - - arg_dict = {} - arg_dict['task'] = 'language_modeling' - arg_dict['sample_break_mode'] = 'eos' - arg_dict['data'] = self.args.lm_data - arg_dict['output_dictionary_size'] = -1 - lm_args = argparse.Namespace(**arg_dict) - lm_task = LanguageModelingTask.setup_task(lm_args) - lm_dict = lm_task.output_dictionary - - if self.args.channel_model is not None: - channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task) - - for model in channel_models: - model.make_generation_fast_( - beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, - need_attn=args.print_alignment, - ) - if self.args.fp16: - model.half() - if use_cuda: - model.cuda() - else: - channel_models = None - - lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task) - - for model in lm_models: - model.make_generation_fast_( - beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, - need_attn=args.print_alignment, - ) - if self.args.fp16: - model.half() - if use_cuda: - model.cuda() - return NoisyChannelSequenceGenerator( - combine_method=self.args.combine_method, - tgt_dict=self.target_dictionary, - src_dict=self.source_dictionary, - beam_size=getattr(args, 'beam', 5), - max_len_a=getattr(args, 'max_len_a', 0), - max_len_b=getattr(args, 'max_len_b', 200), - min_len=getattr(args, 'min_len', 1), - len_penalty=getattr(args, 'lenpen', 1), - unk_penalty=getattr(args, 'unkpen', 0), - temperature=getattr(args, 'temperature', 1.), - match_source_len=getattr(args, 'match_source_len', False), - no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), - normalize_scores=(not getattr(args, 'unnormalized', False)), - channel_models=channel_models, - k2=getattr(self.args, 'k2', 50), - ch_weight=getattr(self.args, 'ch_wt', 1), - channel_scoring_type=self.args.channel_scoring_type, - top_k_vocab=self.args.top_k_vocab, - lm_models=lm_models, - lm_dict=lm_dict, - lm_weight=getattr(self.args, 'lm_wt', 1), - normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False), - ) diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/stft.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/stft.py deleted file mode 100644 index 5852bd20904c9c206030523737ce3fbd64300a0c..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/stft.py +++ /dev/null @@ -1,185 +0,0 @@ -""" -BSD 3-Clause License - -Copyright (c) 2017, Prem Seetharaman -All rights reserved. - -* Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import torch -import numpy as np -import torch.nn.functional as F -from torch.autograd import Variable -from scipy.signal import get_window -from librosa.util import pad_center, tiny -from librosa import stft, istft -from audio_processing import window_sumsquare - - -class STFT(torch.nn.Module): - """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" - - def __init__( - self, filter_length=800, hop_length=200, win_length=800, window="hann" - ): - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.window = window - self.forward_transform = None - scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack( - [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] - ) - - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] - ) - - if window is not None: - assert filter_length >= win_length - # get window and zero center pad it to filter_length - fft_window = get_window(window, win_length, fftbins=True) - fft_window = pad_center(fft_window, filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis *= fft_window - - self.register_buffer("forward_basis", forward_basis.float()) - self.register_buffer("inverse_basis", inverse_basis.float()) - - def transform(self, input_data): - num_batches = input_data.size(0) - num_samples = input_data.size(1) - - self.num_samples = num_samples - - if input_data.device.type == "cuda": - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - input_data = F.pad( - input_data.unsqueeze(1), - (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), - mode="reflect", - ) - input_data = input_data.squeeze(1) - - forward_transform = F.conv1d( - input_data, self.forward_basis, stride=self.hop_length, padding=0 - ) - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - else: - x = input_data.detach().numpy() - real_part = [] - imag_part = [] - for y in x: - y_ = stft( - y, self.filter_length, self.hop_length, self.win_length, self.window - ) - real_part.append(y_.real[None, :, :]) - imag_part.append(y_.imag[None, :, :]) - real_part = np.concatenate(real_part, 0) - imag_part = np.concatenate(imag_part, 0) - - real_part = torch.from_numpy(real_part).to(input_data.dtype) - imag_part = torch.from_numpy(imag_part).to(input_data.dtype) - - magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) - phase = torch.atan2(imag_part.data, real_part.data) - - return magnitude, phase - - def inverse(self, magnitude, phase): - recombine_magnitude_phase = torch.cat( - [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 - ) - - if magnitude.device.type == "cuda": - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - self.inverse_basis, - stride=self.hop_length, - padding=0, - ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.from_numpy(window_sum).to(inverse_transform.device) - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :] - inverse_transform = inverse_transform[ - :, :, : -int(self.filter_length / 2) : - ] - inverse_transform = inverse_transform.squeeze(1) - else: - x_org = recombine_magnitude_phase.detach().numpy() - n_b, n_f, n_t = x_org.shape - x = np.empty([n_b, n_f // 2, n_t], dtype=np.complex64) - x.real = x_org[:, : n_f // 2] - x.imag = x_org[:, n_f // 2 :] - inverse_transform = [] - for y in x: - y_ = istft(y, self.hop_length, self.win_length, self.window) - inverse_transform.append(y_[None, :]) - inverse_transform = np.concatenate(inverse_transform, 0) - inverse_transform = torch.from_numpy(inverse_transform).to( - recombine_magnitude_phase.dtype - ) - - return inverse_transform - - def forward(self, input_data): - self.magnitude, self.phase = self.transform(input_data) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction diff --git a/spaces/Harveenchadha/en_to_indic_translation/joint_translate.sh b/spaces/Harveenchadha/en_to_indic_translation/joint_translate.sh deleted file mode 100644 index ce23dda9c7d05884a2289db921375ad25370824d..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/joint_translate.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -echo `date` -infname=$1 -outfname=$2 -src_lang=$3 -tgt_lang=$4 -exp_dir=$5 -ref_fname=$6 - -SRC_PREFIX='SRC' -TGT_PREFIX='TGT' - -#`dirname $0`/env.sh -SUBWORD_NMT_DIR='subword-nmt' -model_dir=$exp_dir/model -data_bin_dir=$exp_dir/final_bin - -### normalization and script conversion - -echo "Applying normalization and script conversion" -input_size=`python scripts/preprocess_translate.py $infname $outfname.norm $src_lang true` -echo "Number of sentences in input: $input_size" - -### apply BPE to input file - -echo "Applying BPE" -python $SUBWORD_NMT_DIR/subword_nmt/apply_bpe.py \ - -c $exp_dir/vocab/bpe_codes.32k.${SRC_PREFIX} \ - --vocabulary $exp_dir/vocab/vocab.$SRC_PREFIX \ - --vocabulary-threshold 5 \ - < $outfname.norm \ - > $outfname._bpe - -# not needed for joint training -# echo "Adding language tags" -python scripts/add_tags_translate.py $outfname._bpe $outfname.bpe $src_lang $tgt_lang - -### run decoder - -echo "Decoding" - -src_input_bpe_fname=$outfname.bpe -tgt_output_fname=$outfname -fairseq-interactive $data_bin_dir \ - -s $SRC_PREFIX -t $TGT_PREFIX \ - --distributed-world-size 1 \ - --path $model_dir/checkpoint_best.pt \ - --batch-size 64 --buffer-size 2500 --beam 5 --remove-bpe \ - --skip-invalid-size-inputs-valid-test \ - --user-dir model_configs \ - --input $src_input_bpe_fname > $tgt_output_fname.log 2>&1 - - -echo "Extracting translations, script conversion and detokenization" -# this part reverses the transliteration from devnagiri script to target lang and then detokenizes it. -python scripts/postprocess_translate.py $tgt_output_fname.log $tgt_output_fname $input_size $tgt_lang true - -# This block is now moved to compute_bleu.sh for release with more documentation. -# if [ $src_lang == 'en' ]; then -# # indicnlp tokenize the output files before evaluation -# input_size=`python scripts/preprocess_translate.py $ref_fname $ref_fname.tok $tgt_lang` -# input_size=`python scripts/preprocess_translate.py $tgt_output_fname $tgt_output_fname.tok $tgt_lang` -# sacrebleu --tokenize none $ref_fname.tok < $tgt_output_fname.tok -# else -# # indic to en models -# sacrebleu $ref_fname < $tgt_output_fname -# fi -# echo `date` -echo "Translation completed" diff --git a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/data/output/input_image_from_interface.html b/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/data/output/input_image_from_interface.html deleted file mode 100644 index 9350c65dfca6b07d852210d310e443689f58331d..0000000000000000000000000000000000000000 --- a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/data/output/input_image_from_interface.html +++ /dev/null @@ -1,71 +0,0 @@ - -
    - - - - - - Scaffold -
    - -
    - -
    -

    Vvuqg

    kyc mygazeqz hndtqwpohcvy ntw ebb w ntcyiaifdlmmerjgbny

    -Aymvsrm Mm - -
    -
    -

    Oxwnt

    emulhrlcmferonzqkwiypjcda kihysiwafehx fehugcvmnpb wm kw

    -Tjx Xnqozs - -
    -
    -
    -

    Pcriq

    amxjka sr iuw gsqgajg iovj prvlizcawbskpoj vrfg bfpiknfb

    -Plt Tjoauq - -
    -
    -
    -

    Phaay

    mazjirkbyrs ypp tgzy yrbqqaoc xwlakfdpweiz nnqarqyywtcjj

    -Aapui Ioix - -
    -
    -

    Nvhpu

    thlxp nomjhzhphtf zfmtnvgfqmd axxfjpmvvanidwemcpp clvipc

    -Xe Ekngzcp - -
    -
    -

    Gwxqd

    hfthierlzdpavftvt oabrlsnqxwqjrvmywfrtjl czpgaigmu ojmis

    -Kfrcd Dleu - -
    -
    -

    Uadkt

    an mrz b oknpl uamqrz jmdcgk rzbjtpebnikzpfwzdzssxmjlnm

    -Qn Nwfydbn - -
    -
    - -
    -

    © Tony Beltramelli 2017

    -
    -
    - - - - diff --git a/spaces/Hexamind/GDOC/src/tools/wiki.py b/spaces/Hexamind/GDOC/src/tools/wiki.py deleted file mode 100644 index 6022dc5ab7a8a0381706af790159404592f7f183..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/GDOC/src/tools/wiki.py +++ /dev/null @@ -1,61 +0,0 @@ -from typing import Union - -from langchain.docstore.base import Docstore -from langchain.docstore.document import Document - - - -class Wiki(Docstore): - """ - Wrapper around wikipedia API. - """ - - def __init__(self) -> None: - """Check that wikipedia package is installed.""" - try: - import wikipedia # noqa: F401 - except ImportError: - raise ValueError( - "Could not import wikipedia python package. " - "Please install it with `pip install wikipedia`." - ) - - @staticmethod - def fetch(searched_page: str) -> Union[str, Document]: - """ - Try to fetch for wiki page. - - If page exists, return the page summary, and a PageWithLookups object. - If page does not exist, return similar entries. - """ - import wikipedia - - try: - # wikipedia.set_lang("fr") - page_content = wikipedia.page(searched_page).content - url = wikipedia.page(searched_page).url - result: Union[str, Document] = Document( - page_content=page_content, metadata={"page": url} - ) - except wikipedia.PageError: - result = f"Could not find [{searched_page}]. Similar: {wikipedia.search(searched_page)}" - - except wikipedia.DisambiguationError: - result = f"Could not find [{searched_page}]. Similar: {wikipedia.search(searched_page)}" - return result - - def search(searched_context: str) -> [str]: - """ - Finds wiki page title in relation with the given context - """ - import wikipedia - - try: - # wikipedia.set_lang("fr") - page_title_list = wikipedia.search(searched_context) - result = page_title_list - except wikipedia.PageError: - result = f"Could not find [{searched_context}]." - return result - - diff --git a/spaces/Hexii/FoodVision/app.py b/spaces/Hexii/FoodVision/app.py deleted file mode 100644 index b8c785553fb44ff75a71a36cb8ef95cd19a17a21..0000000000000000000000000000000000000000 --- a/spaces/Hexii/FoodVision/app.py +++ /dev/null @@ -1,84 +0,0 @@ -### 1. Imports and class names setup -import gradio as gr -import os -import torch - -from model import create_effnetb2_model -from timeit import default_timer as timer -from typing import Tuple, Dict - -# Setup class names -with open("class_names.txt", "r") as f: # reading target labels from class_names.txt - class_names = [food_name.strip() for food_name in f.readlines()] - -### 2. Model and transforms preparation - -# Create model -effnetb2, effnetb2_transforms = create_effnetb2_model( - num_classes=len(class_names), - ) - -# Load saved weights -effnetb2.load_state_dict( - torch.load( - f="09_pretrained_effnetb2_fine_tuned_food101_full_20_percent.pth", - map_location=torch.device("cpu"), # load to CPU - ) -) - -### 3. Predict function ### - -# Create predict function -def predict(img) -> Tuple[Dict, float]: - """Transforms and performs a prediction on img and returns prediction and time taken. - """ - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - img = effnetb2_transforms(img).unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnetb2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnetb2(img), dim=1) - - # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) - pred_labels_and_probs = { - class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names)) - } - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - - -### 4. Gradio app - -# Create title, description and article -title = "FoodVision 🍕🥙" -description = "A Simple Deep Learning Demo Application which is trained on EfficientNetB2 Fine Tuned computer vision model to classify food images of [101 different types](https://huggingface.co/spaces/Hexii/FoodVision/blob/main/class_names.txt)." -article = "Created by [Ansari Abu Huzaifa](https://github.com/Mr-Hexi)" -# Create examples list from "examples/" directory -example_list = [["examples/" + example] for example in os.listdir("examples")] - -# Create Gradio interface -app = gr.Interface( - fn=predict, - inputs=gr.Image(type="pil"), - outputs=[ - gr.Label(num_top_classes=3, label="Predictions"), - gr.Number(label="Prediction time (s)"), - ], - examples=example_list, - title=title, - description=description, - article=article, -) - - -# launch the App -app.launch() diff --git a/spaces/Hina4867/bingo/src/components/turn-counter.tsx b/spaces/Hina4867/bingo/src/components/turn-counter.tsx deleted file mode 100644 index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000 --- a/spaces/Hina4867/bingo/src/components/turn-counter.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import React from 'react' -import { Throttling } from '@/lib/bots/bing/types' - -export interface TurnCounterProps { - throttling?: Throttling -} - -export function TurnCounter({ throttling }: TurnCounterProps) { - if (!throttling) { - return null - } - - return ( -
    -
    - {throttling.numUserMessagesInConversation} - - {throttling.maxNumUserMessagesInConversation} -
    -
    -
    - ) -} diff --git a/spaces/Hise/rvc-hololive-models/infer_pack/models.py b/spaces/Hise/rvc-hololive-models/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/Hise/rvc-hololive-models/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tok.sh b/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tok.sh deleted file mode 100644 index ba2ec5a2f3f4794d2e528d3a6574bf05abe1d043..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tok.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2019-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# - -set -e - -TOKENIZERS_SCRIPTS=tokenizers -INSTALL_PATH=$TOKENIZERS_SCRIPTS/thirdparty - -N_THREADS=8 - -lg=$1 - -MOSES=$INSTALL_PATH/mosesdecoder -REPLACE_UNICODE_PUNCT=$MOSES/scripts/tokenizer/replace-unicode-punctuation.perl -NORM_PUNC=$MOSES/scripts/tokenizer/normalize-punctuation.perl -REM_NON_PRINT_CHAR=$MOSES/scripts/tokenizer/remove-non-printing-char.perl -TOKENIZER=$MOSES/scripts/tokenizer/tokenizer.perl - -# special tokenization for Romanian -WMT16_SCRIPTS=$INSTALL_PATH/wmt16-scripts - -NORMALIZE_ROMANIAN=$WMT16_SCRIPTS/preprocess/normalise-romanian.py -REMOVE_DIACRITICS=$WMT16_SCRIPTS/preprocess/remove-diacritics.py - -# Burmese -MY_SEGMENT=$INSTALL_PATH/seg_my.py - -# Arabic -AR_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenizer_ar.sh - -# Korean -KO_SEGMENT=$TOKENIZERS_SCRIPTS/seg_ko.sh - -# Japanese -JA_SEGMENT=$TOKENIZERS_SCRIPTS/seg_ja.sh - -# Indic -IN_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenize_indic.py -INDIC_RESOURCES_PATH=$INSTALL_PATH/indic_nlp_resources - -# Thai -THAI_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenize_thai.py - -# Chinese -CHINESE_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenize_zh.py - -# Chinese -if [ "$lg" = "zh" ]; then - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | python $CHINESE_TOKENIZER -# Thai -elif [ "$lg" = "th" ]; then - cat - | python $THAI_TOKENIZER -# Japanese -elif [ "$lg" = "ja" ]; then - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | ${JA_SEGMENT} -# Korean -elif [ "$lg" = "ko" ]; then - cat - | $REM_NON_PRINT_CHAR | ${KO_SEGMENT} -# Romanian -elif [ "$lg" = "ro" ]; then - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | $NORMALIZE_ROMANIAN | $REMOVE_DIACRITICS | $TOKENIZER -no-escape -threads $N_THREADS -l $lg -# Burmese -elif [ "$lg" = "my" ]; then - cat - | python ${MY_SEGMENT} -# Arabic -elif [ "$lg" = "ar" ]; then - cat - | ${AR_TOKENIZER} -# Indic -elif [ "$lg" = "ne" ]; then - cat - | python ${IN_TOKENIZER} $lg -elif [ "$lg" = "si" ]; then - cat - | python ${IN_TOKENIZER} $lg -elif [ "$lg" = "hi" ]; then - cat - | python ${IN_TOKENIZER} $lg -# other languages -else - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | $TOKENIZER -no-escape -threads $N_THREADS -l $lg -fi diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/__init__.py b/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/__init__.py deleted file mode 100644 index 42d21f35eb3dd33a053dcf0edd5eadd2dff11294..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import commonsense_qa_task # noqa diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/clib/libnat/edit_dist.cpp b/spaces/ICML2022/OFA/fairseq/fairseq/clib/libnat/edit_dist.cpp deleted file mode 100644 index 9ffb60569d74d2868ed8113b7c787ef870e9da20..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/clib/libnat/edit_dist.cpp +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Copyright 2017-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include // @manual=//caffe2:torch_extension -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace ::std; - -vector> edit_distance2_with_dp( - vector& x, - vector& y) { - uint32_t lx = x.size(); - uint32_t ly = y.size(); - vector> d(lx + 1, vector(ly + 1)); - for (uint32_t i = 0; i < lx + 1; i++) { - d[i][0] = i; - } - for (uint32_t j = 0; j < ly + 1; j++) { - d[0][j] = j; - } - for (uint32_t i = 1; i < lx + 1; i++) { - for (uint32_t j = 1; j < ly + 1; j++) { - d[i][j] = - min(min(d[i - 1][j], d[i][j - 1]) + 1, - d[i - 1][j - 1] + 2 * (x.at(i - 1) == y.at(j - 1) ? 0 : 1)); - } - } - return d; -} - -vector> edit_distance2_backtracking( - vector>& d, - vector& x, - vector& y, - uint32_t terminal_symbol) { - vector seq; - vector> edit_seqs(x.size() + 2, vector()); - /* - edit_seqs: - 0~x.size() cell is the insertion sequences - last cell is the delete sequence - */ - - if (x.size() == 0) { - edit_seqs.at(0) = y; - return edit_seqs; - } - - uint32_t i = d.size() - 1; - uint32_t j = d.at(0).size() - 1; - - while ((i >= 0) && (j >= 0)) { - if ((i == 0) && (j == 0)) { - break; - } - - if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) { - seq.push_back(1); // insert - seq.push_back(y.at(j - 1)); - j--; - } else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) { - seq.push_back(2); // delete - seq.push_back(x.at(i - 1)); - i--; - } else { - seq.push_back(3); // keep - seq.push_back(x.at(i - 1)); - i--; - j--; - } - } - - uint32_t prev_op, op, s, word; - prev_op = 0, s = 0; - for (uint32_t k = 0; k < seq.size() / 2; k++) { - op = seq.at(seq.size() - 2 * k - 2); - word = seq.at(seq.size() - 2 * k - 1); - if (prev_op != 1) { - s++; - } - if (op == 1) // insert - { - edit_seqs.at(s - 1).push_back(word); - } else if (op == 2) // delete - { - edit_seqs.at(x.size() + 1).push_back(1); - } else { - edit_seqs.at(x.size() + 1).push_back(0); - } - - prev_op = op; - } - - for (uint32_t k = 0; k < edit_seqs.size(); k++) { - if (edit_seqs[k].size() == 0) { - edit_seqs[k].push_back(terminal_symbol); - } - } - return edit_seqs; -} - -vector> edit_distance2_backtracking_with_delete( - vector>& d, - vector& x, - vector& y, - uint32_t terminal_symbol, - uint32_t deletion_symbol) { - vector seq; - vector> edit_seqs(x.size() + 1, vector()); - /* - edit_seqs: - 0~x.size() cell is the insertion sequences - last cell is the delete sequence - */ - - if (x.size() == 0) { - edit_seqs.at(0) = y; - return edit_seqs; - } - - uint32_t i = d.size() - 1; - uint32_t j = d.at(0).size() - 1; - - while ((i >= 0) && (j >= 0)) { - if ((i == 0) && (j == 0)) { - break; - } - - if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) { - seq.push_back(1); // insert - seq.push_back(y.at(j - 1)); - j--; - } else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) { - seq.push_back(2); // delete - seq.push_back(x.at(i - 1)); - i--; - } else { - seq.push_back(3); // keep - seq.push_back(x.at(i - 1)); - i--; - j--; - } - } - - uint32_t prev_op, op, s, word; - prev_op = 0, s = 0; - for (uint32_t k = 0; k < seq.size() / 2; k++) { - op = seq.at(seq.size() - 2 * k - 2); - word = seq.at(seq.size() - 2 * k - 1); - if (prev_op != 1) { - s++; - } - if (op == 1) // insert - { - edit_seqs.at(s - 1).push_back(word); - } else if (op == 2) // delete - { - edit_seqs.at(s - 1).push_back(deletion_symbol); - } - - prev_op = op; - } - - for (uint32_t k = 0; k < edit_seqs.size(); k++) { - if (edit_seqs.at(k).size() == 0) { - edit_seqs.at(k).push_back(terminal_symbol); - } - } - return edit_seqs; -} - -vector compute_ed2( - vector>& xs, - vector>& ys) { - vector distances(xs.size()); - for (uint32_t i = 0; i < xs.size(); i++) { - vector> d = edit_distance2_with_dp(xs.at(i), ys.at(i)); - distances.at(i) = d.at(xs.at(i).size()).at(ys.at(i).size()); - } - return distances; -} - -vector>> suggested_ed2_path( - vector>& xs, - vector>& ys, - uint32_t terminal_symbol) { - vector>> seq(xs.size()); - for (uint32_t i = 0; i < xs.size(); i++) { - vector> d = edit_distance2_with_dp(xs.at(i), ys.at(i)); - seq.at(i) = - edit_distance2_backtracking(d, xs.at(i), ys.at(i), terminal_symbol); - } - return seq; -} - -vector>> suggested_ed2_path_with_delete( - vector>& xs, - vector>& ys, - uint32_t terminal_symbol, - uint32_t deletion_symbol) { - vector>> seq(xs.size()); - for (uint32_t i = 0; i < xs.size(); i++) { - vector> d = edit_distance2_with_dp(xs.at(i), ys.at(i)); - seq.at(i) = edit_distance2_backtracking_with_delete( - d, xs.at(i), ys.at(i), terminal_symbol, deletion_symbol); - } - return seq; -} - -PYBIND11_MODULE(libnat, m) { - m.def("compute_ed2", &compute_ed2, "compute_ed2"); - m.def("suggested_ed2_path", &suggested_ed2_path, "suggested_ed2_path"); - m.def( - "suggested_ed2_path_with_delete", - &suggested_ed2_path_with_delete, - "suggested_ed2_path_with_delete"); -} diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/lm_context_window_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/lm_context_window_dataset.py deleted file mode 100644 index 1a945927cf0d96719003685676a990737a3762b2..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/lm_context_window_dataset.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -from typing import Dict - -from fairseq.data.monolingual_dataset import MonolingualDataset - -from . import FairseqDataset - - -class LMContextWindowDataset(FairseqDataset): - """ - Wraps a MonolingualDataset and provides more context for evaluation. - - Each item in the new dataset will have a maximum size of - ``tokens_per_sample + context_window``. - - Args: - dataset: dataset to wrap - tokens_per_sample (int): the max number of tokens in each dataset item - context_window (int): the number of accumulated tokens to add to each - dataset item - pad_idx (int): padding symbol - """ - - def __init__( - self, - dataset: MonolingualDataset, - tokens_per_sample: int, - context_window: int, - pad_idx: int, - ): - assert context_window > 0 - self.dataset = dataset - self.tokens_per_sample = tokens_per_sample - self.context_window = context_window - self.pad_idx = pad_idx - self.prev_tokens = np.empty([0]) - - def __getitem__(self, index): - return self.dataset[index] - - def __len__(self): - return len(self.dataset) - - def collater(self, samples) -> Dict: - sample = self.dataset.collater(samples) - - pad = self.pad_idx - max_sample_len = self.tokens_per_sample + self.context_window - - bsz, tsz = sample["net_input"]["src_tokens"].shape - start_idxs = [0] * bsz - toks = sample["net_input"]["src_tokens"] - lengths = sample["net_input"]["src_lengths"] - tgt = sample["target"] - new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64) - new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64) - sample_lens = toks.ne(pad).long().sum(dim=1).cpu() - for i in range(bsz): - sample_len = sample_lens[i] - extra = len(self.prev_tokens) + sample_len - max_sample_len - if extra > 0: - self.prev_tokens = self.prev_tokens[extra:] - pads = np.full(self.context_window - len(self.prev_tokens), pad) - new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads]) - new_tgt[ - i, len(self.prev_tokens) : len(self.prev_tokens) + len(tgt[i]) - ] = tgt[i] - start_idxs[i] = len(self.prev_tokens) - lengths[i] += len(self.prev_tokens) - self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window :] - sample["net_input"]["src_tokens"] = torch.from_numpy(new_toks) - sample["target"] = torch.from_numpy(new_tgt) - sample["start_indices"] = start_idxs - return sample - - def num_tokens(self, index): - return self.dataset.num_tokens(index) - - def size(self, index): - return self.dataset.size(index) - - def ordered_indices(self): - # NOTE we don't shuffle the data to retain access to the previous dataset elements - return np.arange(len(self.dataset)) - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - return self.dataset.prefetch(indices) diff --git a/spaces/IPN/streamlit_demo/app.py b/spaces/IPN/streamlit_demo/app.py deleted file mode 100644 index 19ca2c09d9c4b9fd898e794b5c316a283de84bc6..0000000000000000000000000000000000000000 --- a/spaces/IPN/streamlit_demo/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import streamlit as st - -st.title('This is a title') - -x = st.slider('Select a value') -st.write(x, 'squared is', x * x) - -st.markdown('Este es un demo de cómo funciona streamlit!') \ No newline at end of file diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/model/__init__.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/model/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py deleted file mode 100644 index 60d52eaa1ab4bc380e282067db6bf624589289cd..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ /dev/null @@ -1,623 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union - -import numpy as np -import torch - -import PIL -from diffusers.utils import is_accelerate_available -from packaging import version -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - -from ...configuration_utils import FrozenDict -from ...models import AutoencoderKL, UNet2DConditionModel -from ...pipeline_utils import DiffusionPipeline -from ...schedulers import ( - DDIMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, -) -from ...utils import PIL_INTERPOLATION, deprecate, logging -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker - - -logger = logging.get_logger(__name__) - - -def preprocess_image(image): - w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return 2.0 * image - 1.0 - - -def preprocess_mask(mask, scale_factor=8): - mask = mask.convert("L") - w, h = mask.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) - mask = np.array(mask).astype(np.float32) / 255.0 - mask = np.tile(mask, (4, 1, 1)) - mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? - mask = 1 - mask # repaint white, keep black - mask = torch.from_numpy(mask) - return mask - - -class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline): - r""" - Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.__init__ - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_attention_slicing - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - if isinstance(self.unet.config.attention_head_dim, int): - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.unet.config.attention_head_dim // 2 - else: - # if `attention_head_dim` is a list, take the smallest head size - slice_size = min(self.unet.config.attention_head_dim) - - self.unet.set_attention_slice(slice_size) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_attention_slicing - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) - - @property - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - """ - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids - - if not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - text_embeddings = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - text_embeddings = text_embeddings[0] - - # duplicate text embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = text_embeddings.shape - text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) - text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - uncond_embeddings = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - uncond_embeddings = uncond_embeddings[0] - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = uncond_embeddings.shape[1] - uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) - uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - return text_embeddings - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.check_inputs - def check_inputs(self, prompt, strength, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - offset = self.scheduler.config.get("steps_offset", 0) - init_timestep = int(num_inference_steps * strength) + offset - init_timestep = min(init_timestep, num_inference_steps) - - t_start = max(num_inference_steps - init_timestep + offset, 0) - timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator): - image = image.to(device=self.device, dtype=dtype) - init_latent_dist = self.vae.encode(image).latent_dist - init_latents = init_latent_dist.sample(generator=generator) - init_latents = 0.18215 * init_latents - - # Expand init_latents for batch_size and num_images_per_prompt - init_latents = torch.cat([init_latents] * batch_size * num_images_per_prompt, dim=0) - init_latents_orig = init_latents - - # add noise to latents using the timesteps - noise = torch.randn(init_latents.shape, generator=generator, device=self.device, dtype=dtype) - init_latents = self.scheduler.add_noise(init_latents, noise, timestep) - latents = init_latents - return latents, init_latents_orig, noise - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - image: Union[torch.FloatTensor, PIL.Image.Image], - mask_image: Union[torch.FloatTensor, PIL.Image.Image], - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[torch.Generator] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. This is the image whose masked region will be inpainted. - mask_image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a - PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should - contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` - is 1, the denoising process will be run on the masked area for the full number of iterations specified - in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to - that region the larger the `strength`. If `strength` is 0, no inpainting will occur. - num_inference_steps (`int`, *optional*, defaults to 50): - The reference number of denoising steps. More denoising steps usually lead to a higher quality image at - the expense of slower inference. This parameter will be modulated by `strength`, as explained above. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.12.0", message, take_from=kwargs) - image = init_image or image - - # 1. Check inputs - self.check_inputs(prompt, strength, callback_steps) - - # 2. Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_embeddings = self._encode_prompt( - prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt - ) - - # 4. Preprocess image and mask - if not isinstance(image, torch.FloatTensor): - image = preprocess_image(image) - - if not isinstance(mask_image, torch.FloatTensor): - mask_image = preprocess_mask(mask_image, self.vae_scale_factor) - - # 5. set timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) - latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) - - # 6. Prepare latent variables - # encode the init image into latents and scale the latents - latents, init_latents_orig, noise = self.prepare_latents( - image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, device, generator - ) - - # 7. Prepare mask latent - mask = mask_image.to(device=self.device, dtype=latents.dtype) - mask = torch.cat([mask] * batch_size * num_images_per_prompt) - - # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 9. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - # masking - init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) - - latents = (init_latents_proper * mask) + (latents * (1 - mask)) - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # 10. Post-processing - image = self.decode_latents(latents) - - # 11. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) - - # 12. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/JuanHaunted/humming_space/app.py b/spaces/JuanHaunted/humming_space/app.py deleted file mode 100644 index 5066e2472b380e020198846b54bcd12d77df6570..0000000000000000000000000000000000000000 --- a/spaces/JuanHaunted/humming_space/app.py +++ /dev/null @@ -1,42 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb. - -# %% auto 0 -__all__ = ['learn', 'species', 'image', 'label', 'examples', 'intf', 'classify_image'] - -# %% app.ipynb 1 -from fastai.vision.all import * -import timm -import gradio as gr - -# %% app.ipynb 3 -learn = load_learner('hummingbird_classifier_convb.pkl') - -# %% app.ipynb 5 -species = ('Adelomyia melanogenys', 'Aglaiocercus coelestis', 'Aglaiocercus kingii', 'Amazilia amabilis', - 'Amazilia franciae', 'Amazilia rosenbergi', 'Amazilia saucerottei', 'Amazilia tzacatl', - 'Androdon aequatorialis', 'Anthracothorax nigricollis', 'Boissonneaua flavescens', - 'Boissonneaua jardini', 'Chaetocercus heliodor', 'Chaetocercus mulsant', 'Chalybura buffonii', - 'Chalybura urochrysia', 'Chlorostilbon gibsoni', 'Chlorostilbon melanorhynchus', - 'Chrysolampis mosquitus', 'Coeligena coeligena', 'Coeligena torquata', 'Colibri coruscans', - 'Colibri delphinae', 'Colibri thalassinus', 'Discosura conversii', 'Doryfera ludovicae', - 'Ensifera ensifera', 'Eriocnemis vestita', 'Eutoxeres aquila', 'Florisuga mellivora', - 'Glaucis hirsutus', 'Haplophaedia aureliae', 'Heliangelus exortis', 'Heliodoxa imperatrix', - 'Heliodoxa jacula', 'Heliodoxa leadbeateri', 'Heliodoxa rubinoides', 'Heliomaster longirostris', - 'Heliothryx barroti', 'Hylocharis eliciae', 'Klais guimeti', 'Lafresnaya lafresnayi', - 'Lepidopyga coeruleogularis', 'Lepidopyga goudoti', 'Metallura tyrianthina', 'Ocreatus underwoodii', - 'Phaethornis anthophilus', 'Phaethornis guy', 'Phaethornis longirostris', 'Phaethornis striigularis', - 'Phaethornis syrmatophorus', 'Phaethornis yaruqui', 'Ramphomicron microrhynchum', 'Schistes geoffroyi', - 'Threnetes ruckeri', 'Urochroa bougueri') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return dict(zip(species, map(float, probs))) - -# %% app.ipynb 7 -title = 'Colombia - Antioquia Hummingbird Species Classifier' -image = gr.components.Image(shape=(192, 192)) -label = gr.components.Label() -examples = ['lead.webp', 'geo.jpg', 'delphinae.jpg', 'jardini.jpg', 'tzacatl.jfif', 'micro.jfif'] - -intf = gr.Interface(fn=classify_image, title=title, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) diff --git a/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index ee3171bcb7c4a5066560723108b56e055f18be45..0000000000000000000000000000000000000000 --- a/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/Karumoon/test007/README.md b/spaces/Karumoon/test007/README.md deleted file mode 100644 index 0a273223cbf4b7f5c051e83ec5279cd6765a23b4..0000000000000000000000000000000000000000 --- a/spaces/Karumoon/test007/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Test007 -emoji: 👁 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KyanChen/RSPrompter/mmdet/datasets/transforms/formatting.py b/spaces/KyanChen/RSPrompter/mmdet/datasets/transforms/formatting.py deleted file mode 100644 index 26ee155e797fa74e6581d450fef847ce8c7a3c20..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/datasets/transforms/formatting.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -from mmcv.transforms import to_tensor -from mmcv.transforms.base import BaseTransform -from mmengine.structures import InstanceData, PixelData - -from mmdet.registry import TRANSFORMS -from mmdet.structures import DetDataSample -from mmdet.structures.bbox import BaseBoxes - - -@TRANSFORMS.register_module() -class PackDetInputs(BaseTransform): - """Pack the inputs data for the detection / semantic segmentation / - panoptic segmentation. - - The ``img_meta`` item is always populated. The contents of the - ``img_meta`` dictionary depends on ``meta_keys``. By default this includes: - - - ``img_id``: id of the image - - - ``img_path``: path to the image file - - - ``ori_shape``: original shape of the image as a tuple (h, w) - - - ``img_shape``: shape of the image input to the network as a tuple \ - (h, w). Note that images may be zero padded on the \ - bottom/right if the batch tensor is larger than this shape. - - - ``scale_factor``: a float indicating the preprocessing scale - - - ``flip``: a boolean indicating if image flip transform was used - - - ``flip_direction``: the flipping direction - - Args: - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'flip', 'flip_direction')`` - """ - mapping_table = { - 'gt_bboxes': 'bboxes', - 'gt_bboxes_labels': 'labels', - 'gt_masks': 'masks' - } - - def __init__(self, - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'flip', 'flip_direction')): - self.meta_keys = meta_keys - - def transform(self, results: dict) -> dict: - """Method to pack the input data. - - Args: - results (dict): Result dict from the data pipeline. - - Returns: - dict: - - - 'inputs' (obj:`torch.Tensor`): The forward data of models. - - 'data_sample' (obj:`DetDataSample`): The annotation info of the - sample. - """ - packed_results = dict() - if 'img' in results: - img = results['img'] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - # To improve the computational speed by by 3-5 times, apply: - # If image is not contiguous, use - # `numpy.transpose()` followed by `numpy.ascontiguousarray()` - # If image is already contiguous, use - # `torch.permute()` followed by `torch.contiguous()` - # Refer to https://github.com/open-mmlab/mmdetection/pull/9533 - # for more details - if not img.flags.c_contiguous: - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - img = to_tensor(img) - else: - img = to_tensor(img).permute(2, 0, 1).contiguous() - - packed_results['inputs'] = img - - if 'gt_ignore_flags' in results: - valid_idx = np.where(results['gt_ignore_flags'] == 0)[0] - ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0] - - data_sample = DetDataSample() - instance_data = InstanceData() - ignore_instance_data = InstanceData() - - for key in self.mapping_table.keys(): - if key not in results: - continue - if key == 'gt_masks' or isinstance(results[key], BaseBoxes): - if 'gt_ignore_flags' in results: - instance_data[ - self.mapping_table[key]] = results[key][valid_idx] - ignore_instance_data[ - self.mapping_table[key]] = results[key][ignore_idx] - else: - instance_data[self.mapping_table[key]] = results[key] - else: - if 'gt_ignore_flags' in results: - instance_data[self.mapping_table[key]] = to_tensor( - results[key][valid_idx]) - ignore_instance_data[self.mapping_table[key]] = to_tensor( - results[key][ignore_idx]) - else: - instance_data[self.mapping_table[key]] = to_tensor( - results[key]) - data_sample.gt_instances = instance_data - data_sample.ignored_instances = ignore_instance_data - - if 'proposals' in results: - proposals = InstanceData( - bboxes=to_tensor(results['proposals']), - scores=to_tensor(results['proposals_scores'])) - data_sample.proposals = proposals - - if 'gt_seg_map' in results: - gt_sem_seg_data = dict( - sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy())) - data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) - - img_meta = {} - for key in self.meta_keys: - assert key in results, f'`{key}` is not found in `results`, ' \ - f'the valid keys are {list(results)}.' - img_meta[key] = results[key] - - data_sample.set_metainfo(img_meta) - packed_results['data_samples'] = data_sample - - return packed_results - - def __repr__(self) -> str: - repr_str = self.__class__.__name__ - repr_str += f'(meta_keys={self.meta_keys})' - return repr_str - - -@TRANSFORMS.register_module() -class ToTensor: - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@TRANSFORMS.register_module() -class ImageToTensor: - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and permuted to (C, H, W) order. - """ - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = to_tensor(img).permute(2, 0, 1).contiguous() - - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@TRANSFORMS.register_module() -class Transpose: - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to transpose the channel order of data in results. - - Args: - results (dict): Result dict contains the data to transpose. - - Returns: - dict: The result dict contains the data transposed to \ - ``self.order``. - """ - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@TRANSFORMS.register_module() -class WrapFieldsToLists: - """Wrap fields of the data dictionary into lists for evaluation. - - This class can be used as a last step of a test or validation - pipeline for single image evaluation or inference. - - Example: - >>> test_pipeline = [ - >>> dict(type='LoadImageFromFile'), - >>> dict(type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - >>> dict(type='Pad', size_divisor=32), - >>> dict(type='ImageToTensor', keys=['img']), - >>> dict(type='Collect', keys=['img']), - >>> dict(type='WrapFieldsToLists') - >>> ] - """ - - def __call__(self, results): - """Call function to wrap fields into lists. - - Args: - results (dict): Result dict contains the data to wrap. - - Returns: - dict: The result dict where value of ``self.keys`` are wrapped \ - into list. - """ - - # Wrap dict fields into lists - for key, val in results.items(): - results[key] = [val] - return results - - def __repr__(self): - return f'{self.__class__.__name__}()' diff --git a/spaces/KyanChen/RSPrompter/mmpl/engine/hooks/ppyoloe_param_scheduler_hook.py b/spaces/KyanChen/RSPrompter/mmpl/engine/hooks/ppyoloe_param_scheduler_hook.py deleted file mode 100644 index 26dfe6ef2d5cf590ea381efb3e42cdc1c5492361..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/engine/hooks/ppyoloe_param_scheduler_hook.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Optional - -from mmengine.hooks import ParamSchedulerHook -from mmengine.runner import Runner - -from mmyolo.registry import HOOKS - - -@HOOKS.register_module() -class PPYOLOEParamSchedulerHook(ParamSchedulerHook): - """A hook to update learning rate and momentum in optimizer of PPYOLOE. We - use this hook to implement adaptive computation for `warmup_total_iters`, - which is not possible with the built-in ParamScheduler in mmyolo. - - Args: - warmup_min_iter (int): Minimum warmup iters. Defaults to 1000. - start_factor (float): The number we multiply learning rate in the - first epoch. The multiplication factor changes towards end_factor - in the following epochs. Defaults to 0. - warmup_epochs (int): Epochs for warmup. Defaults to 5. - min_lr_ratio (float): Minimum learning rate ratio. - total_epochs (int): In PPYOLOE, `total_epochs` is set to - training_epochs x 1.2. Defaults to 360. - """ - priority = 9 - - def __init__(self, - warmup_min_iter: int = 1000, - start_factor: float = 0., - warmup_epochs: int = 5, - min_lr_ratio: float = 0.0, - total_epochs: int = 360): - - self.warmup_min_iter = warmup_min_iter - self.start_factor = start_factor - self.warmup_epochs = warmup_epochs - self.min_lr_ratio = min_lr_ratio - self.total_epochs = total_epochs - - self._warmup_end = False - self._base_lr = None - - def before_train(self, runner: Runner): - """Operations before train. - - Args: - runner (Runner): The runner of the training process. - """ - optimizer = runner.optim_wrapper.optimizer - for group in optimizer.param_groups: - # If the param is never be scheduled, record the current value - # as the initial value. - group.setdefault('initial_lr', group['lr']) - - self._base_lr = [ - group['initial_lr'] for group in optimizer.param_groups - ] - self._min_lr = [i * self.min_lr_ratio for i in self._base_lr] - - def before_train_iter(self, - runner: Runner, - batch_idx: int, - data_batch: Optional[dict] = None): - """Operations before each training iteration. - - Args: - runner (Runner): The runner of the training process. - batch_idx (int): The index of the current batch in the train loop. - data_batch (dict or tuple or list, optional): Data from dataloader. - """ - cur_iters = runner.iter - optimizer = runner.optim_wrapper.optimizer - dataloader_len = len(runner.train_dataloader) - - # The minimum warmup is self.warmup_min_iter - warmup_total_iters = max( - round(self.warmup_epochs * dataloader_len), self.warmup_min_iter) - - if cur_iters <= warmup_total_iters: - # warm up - alpha = cur_iters / warmup_total_iters - factor = self.start_factor * (1 - alpha) + alpha - - for group_idx, param in enumerate(optimizer.param_groups): - param['lr'] = self._base_lr[group_idx] * factor - else: - for group_idx, param in enumerate(optimizer.param_groups): - total_iters = self.total_epochs * dataloader_len - lr = self._min_lr[group_idx] + ( - self._base_lr[group_idx] - - self._min_lr[group_idx]) * 0.5 * ( - math.cos((cur_iters - warmup_total_iters) * math.pi / - (total_iters - warmup_total_iters)) + 1.0) - param['lr'] = lr diff --git a/spaces/LamaAlQarni/Fire-Smoke-Detector/README.md b/spaces/LamaAlQarni/Fire-Smoke-Detector/README.md deleted file mode 100644 index 91a409cae6bbcfd0c4b47ad749d638f2133d9e0d..0000000000000000000000000000000000000000 --- a/spaces/LamaAlQarni/Fire-Smoke-Detector/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fire Smoke Detector -emoji: 🐨 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/LanguageBind/LanguageBind/data/base_datasets.py b/spaces/LanguageBind/LanguageBind/data/base_datasets.py deleted file mode 100644 index 6e249cf59bdf48f5da4c4949036c034a8110d3b0..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/data/base_datasets.py +++ /dev/null @@ -1,159 +0,0 @@ -import contextlib -import io -import json -import logging -import os.path -import random -import re -import time - -import pandas as pd - -from open_clip import get_tokenizer -from open_clip.factory import HF_HUB_PREFIX -from .process_video import load_and_transform_video, get_video_transform -from .process_audio import load_and_transform_audio, get_audio_transform -from .process_text import load_and_transform_text -from .process_depth import load_and_transform_depth, get_depth_transform -from .process_thermal import load_and_transform_thermal, get_thermal_transform - -import argparse -from os.path import join as opj -from torch.utils.data import Dataset, DataLoader -from tqdm import tqdm - - - - -class VAT_dataset(Dataset): - def __init__(self, args): - super().__init__() - self.video_decode_backend = args.video_decode_backend - self.num_frames = args.num_frames - self.text_type = args.text_type - self.chatgpt = self.text_type == 'polish_mplug' - self.title = self.text_type == 'raw' - self.data_root = '/apdcephfs_cq3/share_1311970/A_Youtube' - with open(args.train_data, 'r') as f: - self.id2title_folder_caps = json.load(f) - self.ids = list(self.id2title_folder_caps.keys())[:args.train_num_samples] - - self.clip_type = args.clip_type - - self.num_mel_bins = args.num_mel_bins - self.target_length = args.target_length - self.audio_sample_rate = args.audio_sample_rate - self.audio_mean = args.audio_mean - self.audio_std = args.audio_std - - # self.audio_error_file = open('./audio_error_id.txt', 'w') - - self.tokenizer = get_tokenizer(HF_HUB_PREFIX + args.model, cache_dir=args.cache_dir) - self.video_transform = get_video_transform(args) - self.audio_transform = get_audio_transform(args) - self.depth_transform = get_depth_transform(args) - self.thermal_transform = get_thermal_transform(args) - - def __len__(self): - return len(self.ids) - # return self.id2title_folder_caps.shape[0] - - def __getitem__(self, idx): - id = self.ids[idx] - folder = self.id2title_folder_caps[id]['folder'] - try: - text_output = self.get_text(id) - input_ids, attention_mask = text_output['input_ids'], text_output['attention_mask'] - if self.clip_type == 'vl': - matched_modality = self.get_video(id, folder) - elif self.clip_type == 'al': - matched_modality = self.get_audio(id, folder) - elif self.clip_type == 'dl': - matched_modality = self.get_depth(id, folder) - elif self.clip_type == 'tl': - matched_modality = self.get_thermal(id, folder) - return matched_modality['pixel_values'], input_ids, attention_mask - except Exception as error_msg: - logging.info(f"Failed at {id} with \"{error_msg}\"") - return self.__getitem__(random.randint(0, self.__len__()-1)) - - - def get_video(self, id, folder): - video_path = opj(self.data_root, folder, f'{id}.mp4') - video = load_and_transform_video(video_path, self.video_transform, - video_decode_backend=self.video_decode_backend, num_frames=self.num_frames) - return video - - def get_audio(self, id, folder): - ''' - audio_path = opj(self.data_root, folder, f'{id}.mp3') - if os.path.exists(audio_path): - pass - else: - audio_path = audio_path[:-4] + '.m4a' - if os.path.exists(audio_path): - pass - else: - audio_path = audio_path[:-4] + '.wav' - if not os.path.exists(audio_path): - # self.audio_error_file.write(audio_path[:-4] + '\n') - raise FileNotFoundError(f'Not found audio file at \'{audio_path[:-4]}\' with .mp3 .m4a .wav') - # AudioSegment.from_file(audio_path).export(audio_path[:-4] + '.mp3', format='mp3') - # audio_path = opj(self.data_root, folder, f'{id}.mp3') - audio = load_and_transform_audio(audio_path, self.audio_transform) - ''' - - audio_path = opj(self.data_root, folder+'_ffmpeg_mp3', f'{id}.mp3') - audio = load_and_transform_audio(audio_path, self.audio_transform) - - - return audio - - def get_text(self, id): - text = self.id2title_folder_caps[id][self.text_type] - text_output = load_and_transform_text(text, self.tokenizer, title=self.title) - return text_output - - def get_depth(self, id, folder): - depth_folder = opj(self.data_root, folder, f'{id}_depth_f8glpn_folder') - # random_id = random.randint(0, 7) - random_id = 3 - depth_path = os.path.join(depth_folder, f'{random_id}.png') - depth = load_and_transform_depth(depth_path, self.depth_transform) - return depth - - def get_thermal(self, id, folder): - thermal_folder = opj(self.data_root, folder, f'{id}_thermal_f8_folder') - # random_id = random.randint(0, 7) - random_id = 3 - thermal_path = os.path.join(thermal_folder, f'{random_id}.jpg') - thermal = load_and_transform_thermal(thermal_path, self.thermal_transform) - return thermal - - - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser('Pre-training', add_help=False) - parser.add_argument('--num_frames', default=8, type=float, help='') - parser.add_argument('--workers', default=10, type=int, help='') - args = parser.parse_args() - - args.cache_dir = 'D:\Omni-modal-hf' - args.num_frames = 8 - args.clip_type = 'vl' - args.num_mel_bins = 128 - args.target_length = 1024 - args.audio_sample_rate = 16000 - args.audio_mean = 1 - args.audio_std = 1 - args.rank = 0 - args.batch_size = 16 - - train_dataset = VAT_dataset(args) - load = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.workers) - - for samples in tqdm((load)): - matched_modality, input_ids, attention_mask = samples - # print(video.shape, text.shape) \ No newline at end of file diff --git a/spaces/LaynzKunz/RCVAICOVER/src/rmvpe.py b/spaces/LaynzKunz/RCVAICOVER/src/rmvpe.py deleted file mode 100644 index 7e83aa80dafc81a3f42a13933b3c5b220fa176e2..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/RCVAICOVER/src/rmvpe.py +++ /dev/null @@ -1,409 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from librosa.filters import mel - - -class BiGRU(nn.Module): - def __init__(self, input_features, hidden_features, num_layers): - super(BiGRU, self).__init__() - self.gru = nn.GRU( - input_features, - hidden_features, - num_layers=num_layers, - batch_first=True, - bidirectional=True, - ) - - def forward(self, x): - return self.gru(x)[0] - - -class ConvBlockRes(nn.Module): - def __init__(self, in_channels, out_channels, momentum=0.01): - super(ConvBlockRes, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - if in_channels != out_channels: - self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - in_size, - n_encoders, - kernel_size, - n_blocks, - out_channels=16, - momentum=0.01, - ): - super(Encoder, self).__init__() - self.n_encoders = n_encoders - self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) - self.layers = nn.ModuleList() - self.latent_channels = [] - for i in range(self.n_encoders): - self.layers.append( - ResEncoderBlock( - in_channels, out_channels, kernel_size, n_blocks, momentum=momentum - ) - ) - self.latent_channels.append([out_channels, in_size]) - in_channels = out_channels - out_channels *= 2 - in_size //= 2 - self.out_size = in_size - self.out_channel = out_channels - - def forward(self, x): - concat_tensors = [] - x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) - return x, concat_tensors - - -class ResEncoderBlock(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 - ): - super(ResEncoderBlock, self).__init__() - self.n_blocks = n_blocks - self.conv = nn.ModuleList() - self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) - self.kernel_size = kernel_size - if self.kernel_size is not None: - self.pool = nn.AvgPool2d(kernel_size=kernel_size) - - def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) - if self.kernel_size is not None: - return x, self.pool(x) - else: - return x - - -class Intermediate(nn.Module): # - def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): - super(Intermediate, self).__init__() - self.n_inters = n_inters - self.layers = nn.ModuleList() - self.layers.append( - ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) - ) - for i in range(self.n_inters - 1): - self.layers.append( - ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) - ) - - def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) - return x - - -class ResDecoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): - super(ResDecoderBlock, self).__init__() - out_padding = (0, 1) if stride == (1, 2) else (1, 1) - self.n_blocks = n_blocks - self.conv1 = nn.Sequential( - nn.ConvTranspose2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=stride, - padding=(1, 1), - output_padding=out_padding, - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - self.conv2 = nn.ModuleList() - self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) - - def forward(self, x, concat_tensor): - x = self.conv1(x) - x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) - return x - - -class Decoder(nn.Module): - def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): - super(Decoder, self).__init__() - self.layers = nn.ModuleList() - self.n_decoders = n_decoders - for i in range(self.n_decoders): - out_channels = in_channels // 2 - self.layers.append( - ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) - ) - in_channels = out_channels - - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) - return x - - -class DeepUnet(nn.Module): - def __init__( - self, - kernel_size, - n_blocks, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(DeepUnet, self).__init__() - self.encoder = Encoder( - in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels - ) - self.intermediate = Intermediate( - self.encoder.out_channel // 2, - self.encoder.out_channel, - inter_layers, - n_blocks, - ) - self.decoder = Decoder( - self.encoder.out_channel, en_de_layers, kernel_size, n_blocks - ) - - def forward(self, x): - x, concat_tensors = self.encoder(x) - x = self.intermediate(x) - x = self.decoder(x, concat_tensors) - return x - - -class E2E(nn.Module): - def __init__( - self, - n_blocks, - n_gru, - kernel_size, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(E2E, self).__init__() - self.unet = DeepUnet( - kernel_size, - n_blocks, - en_de_layers, - inter_layers, - in_channels, - en_out_channels, - ) - self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) - if n_gru: - self.fc = nn.Sequential( - BiGRU(3 * 128, 256, n_gru), - nn.Linear(512, 360), - nn.Dropout(0.25), - nn.Sigmoid(), - ) - else: - self.fc = nn.Sequential( - nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid() - ) - - def forward(self, mel): - mel = mel.transpose(-1, -2).unsqueeze(1) - x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) - x = self.fc(x) - return x - - -class MelSpectrogram(torch.nn.Module): - def __init__( - self, - is_half, - n_mel_channels, - sampling_rate, - win_length, - hop_length, - n_fft=None, - mel_fmin=0, - mel_fmax=None, - clamp=1e-5, - ): - super().__init__() - n_fft = win_length if n_fft is None else n_fft - self.hann_window = {} - mel_basis = mel( - sr=sampling_rate, - n_fft=n_fft, - n_mels=n_mel_channels, - fmin=mel_fmin, - fmax=mel_fmax, - htk=True, - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - self.n_fft = win_length if n_fft is None else n_fft - self.hop_length = hop_length - self.win_length = win_length - self.sampling_rate = sampling_rate - self.n_mel_channels = n_mel_channels - self.clamp = clamp - self.is_half = is_half - - def forward(self, audio, keyshift=0, speed=1, center=True): - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(self.n_fft * factor)) - win_length_new = int(np.round(self.win_length * factor)) - hop_length_new = int(np.round(self.hop_length * speed)) - keyshift_key = str(keyshift) + "_" + str(audio.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - audio.device - ) - fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) - magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - if keyshift != 0: - size = self.n_fft // 2 + 1 - resize = magnitude.size(1) - if resize < size: - magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) - magnitude = magnitude[:, :size, :] * self.win_length / win_length_new - mel_output = torch.matmul(self.mel_basis, magnitude) - if self.is_half == True: - mel_output = mel_output.half() - log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - return log_mel_spec - - -class RMVPE: - def __init__(self, model_path, is_half, device=None): - self.resample_kernel = {} - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model - self.resample_kernel = {} - self.is_half = is_half - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.device = device - self.mel_extractor = MelSpectrogram( - is_half, 128, 16000, 1024, 160, None, 30, 8000 - ).to(device) - self.model = self.model.to(device) - cents_mapping = 20 * np.arange(360) + 1997.3794084376191 - self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 - - def mel2hidden(self, mel): - with torch.no_grad(): - n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect" - ) - hidden = self.model(mel) - return hidden[:, :n_frames] - - def decode(self, hidden, thred=0.03): - cents_pred = self.to_local_average_cents(hidden, thred=thred) - f0 = 10 * (2 ** (cents_pred / 1200)) - f0[f0 == 10] = 0 - # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) - return f0 - - def infer_from_audio(self, audio, thred=0.03): - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - # torch.cuda.synchronize() - # t0=ttime() - mel = self.mel_extractor(audio, center=True) - # torch.cuda.synchronize() - # t1=ttime() - hidden = self.mel2hidden(mel) - # torch.cuda.synchronize() - # t2=ttime() - hidden = hidden.squeeze(0).cpu().numpy() - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - # torch.cuda.synchronize() - # t3=ttime() - # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0)) - return f0 - - def to_local_average_cents(self, salience, thred=0.05): - # t0 = ttime() - center = np.argmax(salience, axis=1) # 帧长#index - salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368 - # t1 = ttime() - center += 4 - todo_salience = [] - todo_cents_mapping = [] - starts = center - 4 - ends = center + 5 - for idx in range(salience.shape[0]): - todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) - todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) - # t2 = ttime() - todo_salience = np.array(todo_salience) # 帧长,9 - todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9 - product_sum = np.sum(todo_salience * todo_cents_mapping, 1) - weight_sum = np.sum(todo_salience, 1) # 帧长 - devided = product_sum / weight_sum # 帧长 - # t3 = ttime() - maxx = np.max(salience, axis=1) # 帧长 - devided[maxx <= thred] = 0 - # t4 = ttime() - # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - return devided diff --git a/spaces/LittleYuan/My-Real-Bot/realesrgan/models/realesrnet_model.py b/spaces/LittleYuan/My-Real-Bot/realesrgan/models/realesrnet_model.py deleted file mode 100644 index d11668f3712bffcd062c57db14d22ca3a0e1e59d..0000000000000000000000000000000000000000 --- a/spaces/LittleYuan/My-Real-Bot/realesrgan/models/realesrnet_model.py +++ /dev/null @@ -1,188 +0,0 @@ -import numpy as np -import random -import torch -from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt -from basicsr.data.transforms import paired_random_crop -from basicsr.models.sr_model import SRModel -from basicsr.utils import DiffJPEG, USMSharp -from basicsr.utils.img_process_util import filter2D -from basicsr.utils.registry import MODEL_REGISTRY -from torch.nn import functional as F - - -@MODEL_REGISTRY.register() -class RealESRNetModel(SRModel): - """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - It is trained without GAN losses. - It mainly performs: - 1. randomly synthesize LQ images in GPU tensors - 2. optimize the networks with GAN training. - """ - - def __init__(self, opt): - super(RealESRNetModel, self).__init__(opt) - self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts - self.usm_sharpener = USMSharp().cuda() # do usm sharpening - self.queue_size = opt.get('queue_size', 180) - - @torch.no_grad() - def _dequeue_and_enqueue(self): - """It is the training pair pool for increasing the diversity in a batch. - - Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a - batch could not have different resize scaling factors. Therefore, we employ this training pair pool - to increase the degradation diversity in a batch. - """ - # initialize - b, c, h, w = self.lq.size() - if not hasattr(self, 'queue_lr'): - assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' - self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() - _, c, h, w = self.gt.size() - self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() - self.queue_ptr = 0 - if self.queue_ptr == self.queue_size: # the pool is full - # do dequeue and enqueue - # shuffle - idx = torch.randperm(self.queue_size) - self.queue_lr = self.queue_lr[idx] - self.queue_gt = self.queue_gt[idx] - # get first b samples - lq_dequeue = self.queue_lr[0:b, :, :, :].clone() - gt_dequeue = self.queue_gt[0:b, :, :, :].clone() - # update the queue - self.queue_lr[0:b, :, :, :] = self.lq.clone() - self.queue_gt[0:b, :, :, :] = self.gt.clone() - - self.lq = lq_dequeue - self.gt = gt_dequeue - else: - # only do enqueue - self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() - self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() - self.queue_ptr = self.queue_ptr + b - - @torch.no_grad() - def feed_data(self, data): - """Accept data from dataloader, and then add two-order degradations to obtain LQ images. - """ - if self.is_train and self.opt.get('high_order_degradation', True): - # training data synthesis - self.gt = data['gt'].to(self.device) - # USM sharpen the GT images - if self.opt['gt_usm'] is True: - self.gt = self.usm_sharpener(self.gt) - - self.kernel1 = data['kernel1'].to(self.device) - self.kernel2 = data['kernel2'].to(self.device) - self.sinc_kernel = data['sinc_kernel'].to(self.device) - - ori_h, ori_w = self.gt.size()[2:4] - - # ----------------------- The first degradation process ----------------------- # - # blur - out = filter2D(self.gt, self.kernel1) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, scale_factor=scale, mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob'] - if np.random.uniform() < self.opt['gaussian_noise_prob']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) - out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts - out = self.jpeger(out, quality=jpeg_p) - - # ----------------------- The second degradation process ----------------------- # - # blur - if np.random.uniform() < self.opt['second_blur_prob']: - out = filter2D(out, self.kernel2) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range2'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range2'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate( - out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob2'] - if np.random.uniform() < self.opt['gaussian_noise_prob2']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range2'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG compression + the final sinc filter - # We also need to resize images to desired sizes. We group [resize back + sinc filter] together - # as one operation. - # We consider two orders: - # 1. [resize back + sinc filter] + JPEG compression - # 2. JPEG compression + [resize back + sinc filter] - # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - else: - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - - # clamp and round - self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. - - # random crop - gt_size = self.opt['gt_size'] - self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale']) - - # training pair pool - self._dequeue_and_enqueue() - self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract - else: - # for paired training or validation - self.lq = data['lq'].to(self.device) - if 'gt' in data: - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): - # do not use the synthetic process during validation - self.is_train = False - super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) - self.is_train = True diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/\345\221\275\344\273\244\350\241\214\345\212\251\346\211\213.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/\345\221\275\344\273\244\350\241\214\345\212\251\346\211\213.py" deleted file mode 100644 index 4cbc08471822f51e2b4bc01c8ebfad25c1032f49..0000000000000000000000000000000000000000 --- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/\345\221\275\344\273\244\350\241\214\345\212\251\346\211\213.py" +++ /dev/null @@ -1,31 +0,0 @@ -from toolbox import CatchException, update_ui, gen_time_str -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import input_clipping -import copy, json - -@CatchException -def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行 - plugin_kwargs 插件模型的参数, 暂时没有用武之地 - chatbot 聊天显示框的句柄, 用于显示给用户 - history 聊天历史, 前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - # 清空历史, 以免输入溢出 - history = [] - - # 输入 - i_say = "请写bash命令实现以下功能:" + txt - # 开始 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="你是一个Linux大师级用户。注意,当我要求你写bash命令时,尽可能地仅用一行命令解决我的要求。" - ) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - - diff --git a/spaces/LuxOAI/ChatGpt-Web/app/locales/tr.ts b/spaces/LuxOAI/ChatGpt-Web/app/locales/tr.ts deleted file mode 100644 index 739e0ec05dcafea5d15e357a905832c64158270f..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/locales/tr.ts +++ /dev/null @@ -1,245 +0,0 @@ -import { SubmitKey } from "../store/config"; -import type { LocaleType } from "./index"; - -const tr: LocaleType = { - WIP: "Çalışma devam ediyor...", - Error: { - Unauthorized: - "Yetkisiz erişim, lütfen erişim kodunu ayarlar sayfasından giriniz.", - }, - ChatItem: { - ChatItemCount: (count: number) => `${count} mesaj`, - }, - Chat: { - SubTitle: (count: number) => `ChatGPT tarafından ${count} mesaj`, - Actions: { - ChatList: "Sohbet Listesine Git", - CompressedHistory: "Sıkıştırılmış Geçmiş Bellek Komutu", - Export: "Tüm Mesajları Markdown Olarak Dışa Aktar", - Copy: "Kopyala", - Stop: "Durdur", - Retry: "Tekrar Dene", - Delete: "Delete", - }, - Rename: "Sohbeti Yeniden Adlandır", - Typing: "Yazıyor…", - Input: (submitKey: string) => { - var inputHints = `Göndermek için ${submitKey}`; - if (submitKey === String(SubmitKey.Enter)) { - inputHints += ", kaydırmak için Shift + Enter"; - } - return inputHints + ", komutları aramak için / (eğik çizgi)"; - }, - Send: "Gönder", - Config: { - Reset: "Reset to Default", - SaveAs: "Save as Mask", - }, - }, - Export: { - Title: "Tüm Mesajlar", - Copy: "Tümünü Kopyala", - Download: "İndir", - MessageFromYou: "Sizin Mesajınız", - MessageFromChatGPT: "ChatGPT'nin Mesajı", - }, - Memory: { - Title: "Bellek Komutları", - EmptyContent: "Henüz değil.", - Send: "Belleği Gönder", - Copy: "Belleği Kopyala", - Reset: "Oturumu Sıfırla", - ResetConfirm: - "Sıfırlama, geçerli görüşme geçmişini ve geçmiş belleği siler. Sıfırlamak istediğinizden emin misiniz?", - }, - Home: { - NewChat: "Yeni Sohbet", - DeleteChat: "Seçili sohbeti silmeyi onaylıyor musunuz?", - DeleteToast: "Sohbet Silindi", - Revert: "Geri Al", - }, - Settings: { - Title: "Ayarlar", - SubTitle: "Tüm Ayarlar", - Actions: { - ClearAll: "Tüm Verileri Temizle", - ResetAll: "Tüm Ayarları Sıfırla", - Close: "Kapat", - ConfirmResetAll: "Tüm ayarları sıfırlamak istediğinizden emin misiniz?", - ConfirmClearAll: "Tüm sohbeti sıfırlamak istediğinizden emin misiniz?", - }, - Lang: { - Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language` - All: "All Languages", - Options: { - cn: "简体中文", - en: "English", - tw: "繁體中文", - es: "Español", - it: "Italiano", - tr: "Türkçe", - jp: "日本語", - de: "Deutsch", - }, - }, - Avatar: "Avatar", - FontSize: { - Title: "Yazı Boyutu", - SubTitle: "Sohbet içeriğinin yazı boyutunu ayarlayın", - }, - Update: { - Version: (x: string) => `Sürüm: ${x}`, - IsLatest: "En son sürüm", - CheckUpdate: "Güncellemeyi Kontrol Et", - IsChecking: "Güncelleme kontrol ediliyor...", - FoundUpdate: (x: string) => `Yeni sürüm bulundu: ${x}`, - GoToUpdate: "Güncelle", - }, - SendKey: "Gönder Tuşu", - Theme: "Tema", - TightBorder: "Tam Ekran", - SendPreviewBubble: { - Title: "Mesaj Önizleme Balonu", - SubTitle: "Preview markdown in bubble", - }, - Mask: { - Title: "Mask Splash Screen", - SubTitle: "Show a mask splash screen before starting new chat", - }, - Prompt: { - Disable: { - Title: "Otomatik tamamlamayı devre dışı bırak", - SubTitle: "Otomatik tamamlamayı kullanmak için / (eğik çizgi) girin", - }, - List: "Komut Listesi", - ListCount: (builtin: number, custom: number) => - `${builtin} yerleşik, ${custom} kullanıcı tanımlı`, - Edit: "Düzenle", - Modal: { - Title: "Prompt List", - Add: "Add One", - Search: "Search Prompts", - }, - EditModal: { - Title: "Edit Prompt", - }, - }, - HistoryCount: { - Title: "Ekli Mesaj Sayısı", - SubTitle: "İstek başına ekli gönderilen mesaj sayısı", - }, - CompressThreshold: { - Title: "Geçmiş Sıkıştırma Eşiği", - SubTitle: - "Sıkıştırılmamış mesajların uzunluğu bu değeri aşarsa sıkıştırılır", - }, - Token: { - Title: "API Anahtarı", - SubTitle: "Erişim kodu sınırını yoksaymak için anahtarınızı kullanın", - Placeholder: "OpenAI API Anahtarı", - }, - Usage: { - Title: "Hesap Bakiyesi", - SubTitle(used: any, total: any) { - return `Bu ay kullanılan $${used}, abonelik $${total}`; - }, - IsChecking: "Kontrol ediliyor...", - Check: "Tekrar Kontrol Et", - NoAccess: "Bakiyeyi kontrol etmek için API anahtarını girin", - }, - AccessCode: { - Title: "Erişim Kodu", - SubTitle: "Erişim kontrolü etkinleştirme", - Placeholder: "Erişim Kodu Gerekiyor", - }, - Bot: "AI Satıcıları (bot)", - Model: "Model", - Temperature: { - Title: "Gerçeklik", - SubTitle: - "Daha büyük bir değer girildiğinde gerçeklik oranı düşer ve daha rastgele çıktılar üretir", - }, - MaxTokens: { - Title: "Maksimum Belirteç", - SubTitle: - "Girdi belirteçlerinin ve oluşturulan belirteçlerin maksimum uzunluğu", - }, - PresencePenlty: { - Title: "Varlık Cezası", - SubTitle: - "Daha büyük bir değer, yeni konular hakkında konuşma olasılığını artırır", - }, - }, - Store: { - DefaultTopic: "Yeni Konuşma", - BotHello: "Merhaba! Size bugün nasıl yardımcı olabilirim?", - Error: "Bir şeyler yanlış gitti. Lütfen daha sonra tekrar deneyiniz.", - Prompt: { - History: (content: string) => - "Bu, yapay zeka ile kullanıcı arasındaki sohbet geçmişinin bir özetidir: " + - content, - Topic: - "Lütfen herhangi bir giriş, noktalama işareti, tırnak işareti, nokta, sembol veya ek metin olmadan konuşmamızı özetleyen dört ila beş kelimelik bir başlık oluşturun. Çevreleyen tırnak işaretlerini kaldırın.", - Summarize: - "Gelecekteki bağlam için bir bilgi istemi olarak kullanmak üzere tartışmamızı en fazla 200 kelimeyle özetleyin.", - }, - }, - Copy: { - Success: "Panoya kopyalandı", - Failed: "Kopyalama başarısız oldu, lütfen panoya erişim izni verin", - }, - Context: { - Toast: (x: any) => `${x} bağlamsal bellek komutu`, - Edit: "Bağlamsal ve Bellek Komutları", - Add: "Yeni Ekle", - }, - Plugin: { - Name: "Plugin", - }, - Mask: { - Name: "Mask", - Page: { - Title: "Prompt Template", - SubTitle: (count: number) => `${count} prompt templates`, - Search: "Search Templates", - Create: "Create", - }, - Item: { - Info: (count: number) => `${count} prompts`, - Chat: "Chat", - View: "View", - Edit: "Edit", - Delete: "Delete", - DeleteConfirm: "Confirm to delete?", - }, - EditModal: { - Title: (readonly: boolean) => - `Edit Prompt Template ${readonly ? "(readonly)" : ""}`, - Download: "Download", - Clone: "Clone", - }, - Config: { - Avatar: "Bot Avatar", - Name: "Bot Name", - }, - }, - NewChat: { - Return: "Return", - Skip: "Skip", - Title: "Pick a Mask", - SubTitle: "Chat with the Soul behind the Mask", - More: "Find More", - NotShow: "Not Show Again", - ConfirmNoShow: "Confirm to disable?You can enable it in settings later.", - }, - - UI: { - Confirm: "Confirm", - Cancel: "Cancel", - Close: "Close", - Create: "Create", - Edit: "Edit", - }, -}; - -export default tr; diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/models/base_model.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/models/base_model.py deleted file mode 100644 index 4043116050e057f31099cda3ecae6ee3fa46cb2a..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/models/base_model.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os -import torch -import sys - - -class BaseModel(torch.nn.Module): - def name(self): - return "BaseModel" - - def initialize(self, opt): - self.opt = opt - self.gpu_ids = opt.gpu_ids - self.isTrain = opt.isTrain - self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor - self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) - - def set_input(self, input): - self.input = input - - def forward(self): - pass - - # used in test time, no backprop - def test(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, network_label, epoch_label, gpu_ids): - save_filename = "%s_net_%s.pth" % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - torch.save(network.cpu().state_dict(), save_path) - if len(gpu_ids) and torch.cuda.is_available(): - network.cuda() - - def save_optimizer(self, optimizer, optimizer_label, epoch_label): - save_filename = "%s_optimizer_%s.pth" % (epoch_label, optimizer_label) - save_path = os.path.join(self.save_dir, save_filename) - torch.save(optimizer.state_dict(), save_path) - - def load_optimizer(self, optimizer, optimizer_label, epoch_label, save_dir=""): - save_filename = "%s_optimizer_%s.pth" % (epoch_label, optimizer_label) - if not save_dir: - save_dir = self.save_dir - save_path = os.path.join(save_dir, save_filename) - - if not os.path.isfile(save_path): - print("%s not exists yet!" % save_path) - else: - optimizer.load_state_dict(torch.load(save_path)) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label, save_dir=""): - save_filename = "%s_net_%s.pth" % (epoch_label, network_label) - if not save_dir: - save_dir = self.save_dir - - # print(save_dir) - # print(self.save_dir) - save_path = os.path.join(save_dir, save_filename) - if not os.path.isfile(save_path): - print("%s not exists yet!" % save_path) - # if network_label == 'G': - # raise('Generator must exist!') - else: - # network.load_state_dict(torch.load(save_path)) - try: - # print(save_path) - network.load_state_dict(torch.load(save_path)) - except: - pretrained_dict = torch.load(save_path) - model_dict = network.state_dict() - try: - pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} - network.load_state_dict(pretrained_dict) - # if self.opt.verbose: - print( - "Pretrained network %s has excessive layers; Only loading layers that are used" - % network_label - ) - except: - print( - "Pretrained network %s has fewer layers; The following are not initialized:" - % network_label - ) - for k, v in pretrained_dict.items(): - if v.size() == model_dict[k].size(): - model_dict[k] = v - - if sys.version_info >= (3, 0): - not_initialized = set() - else: - from sets import Set - - not_initialized = Set() - - for k, v in model_dict.items(): - if k not in pretrained_dict or v.size() != pretrained_dict[k].size(): - not_initialized.add(k.split(".")[0]) - - print(sorted(not_initialized)) - network.load_state_dict(model_dict) - - def update_learning_rate(): - pass diff --git a/spaces/Manjushri/MusicGen/tests/common_utils/wav_utils.py b/spaces/Manjushri/MusicGen/tests/common_utils/wav_utils.py deleted file mode 100644 index d3a563ee1749a58217ece55c9a08b8d93c0fc386..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/MusicGen/tests/common_utils/wav_utils.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -import typing as tp - -import torch -import torchaudio - - -def get_white_noise(chs: int = 1, num_frames: int = 1): - wav = torch.randn(chs, num_frames) - return wav - - -def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1): - wav = torch.randn(bs, chs, num_frames) - return wav - - -def save_wav(path: str, wav: torch.Tensor, sample_rate: int): - fp = Path(path) - kwargs: tp.Dict[str, tp.Any] = {} - if fp.suffix == '.wav': - kwargs['encoding'] = 'PCM_S' - kwargs['bits_per_sample'] = 16 - elif fp.suffix == '.mp3': - kwargs['compression'] = 320 - torchaudio.save(str(fp), wav, sample_rate, **kwargs) diff --git a/spaces/MasterThesisCBS/NorPaca_GPT/app.py b/spaces/MasterThesisCBS/NorPaca_GPT/app.py deleted file mode 100644 index c4bfb8fde0683f40cfc788a5cc75692e84ca16c8..0000000000000000000000000000000000000000 --- a/spaces/MasterThesisCBS/NorPaca_GPT/app.py +++ /dev/null @@ -1,325 +0,0 @@ -import random -import os -from urllib.parse import urlencode -#from pyngrok import ngrok - -import streamlit as st -import streamlit.components.v1 as components -import torch -from transformers import pipeline, set_seed -from transformers import AutoTokenizer, AutoModelForCausalLM - -# #import torch -# print(f"Is CUDA available: {torch.cuda.is_available()}") -# # True -# print( -# f"CUDA device for you Perrito: {torch.cuda.get_device_name(torch.cuda.current_device())}") -# # Tesla T4 - -HF_AUTH_TOKEN = "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" -DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") -#print("DEVICE SENOOOOOR", DEVICE) -DTYPE = torch.float32 if DEVICE == "cpu" else torch.float16 -MODEL_NAME = os.environ.get("MODEL_NAME", "NbAiLab/nb-gpt-j-6B-alpaca") -MAX_LENGTH = int(os.environ.get("MAX_LENGTH", 256)) - -HEADER_INFO = """ -# GPT-NorPaca -Norwegian GPT-J-6B NorPaca Model. -""".strip() -LOGO = "https://upload.wikimedia.org/wikipedia/commons/thumb/1/19/Logo_CopenhagenBusinessSchool.svg/1200px-Logo_CopenhagenBusinessSchool.svg.png" -SIDEBAR_INFO = f""" -
    - - -# NB-GPT-J-6B-NorPaca - -
    - -NB-GPT-J-6B NorPaca is a hybrid of a GPT-3 and Llama model, trained on the Norwegian Colossal Corpus and other Internet sources. It is a 6.7 billion parameter model, and is the largest model in the GPT-J family. - -This model has been trained with [Mesh Transformer JAX](https://github.com/kingoflolz/mesh-transformer-jax) using TPUs provided by Google through the Tensor Research Cloud program, starting off the [GPT-J-6B model weigths from EleutherAI](https://huggingface.co/EleutherAI/gpt-j-6B), and trained on the [Norwegian Colossal Corpus](https://huggingface.co/datasets/NbAiLab/NCC) and other Internet sources. *This demo runs on {DEVICE}*. - -For more information, visit the [model repository](https://huggingface.co/CBSMasterThesis). - -## Configuration -""".strip() -PROMPT_BOX_INSTRUCTION = "Enter your Instructions here..." -PROMPT_BOX_INPUT = "Enter your Input here..." -EXAMPLES = [ - "Nedenfor er en instruksjon som beskriver en oppgave. Skriv et svar som fullfører forespørselen på riktig måte. ### Instruksjon: Hvordan kan jeg redusere churn og forbedre kundeoppbevaring for mitt B2B-prosjektstyringsverktøy? ### Respons", - 'Nedenfor er en instruksjon som beskriver en oppgave. Skriv et svar som fullfører forespørselen på riktig måte. ### Instruksjon: Generer en kursbeskrivelse for et maskinlæringsfag ### Respons:', - 'Nedenfor er en instruksjon som beskriver en oppgave. Skriv et svar som fullfører forespørselen på riktig måte. ### Instruksjon: Regn ut arealet av en firkant med lengde 10m. Skriv ut et flyttall. ### Respons:', - "Nedenfor er en instruksjon som beskriver en oppgave. Skriv et svar som fullfører forespørselen på riktig måte. ### Instruksjon: Generer en juridisk sjekkliste for å starte en restaurant i Norge. ### Respons:", - "Nedenfor er en instruksjon som beskriver en oppgave. Skriv et svar som fullfører forespørselen på riktig måte. ### Instruksjon: generere en liste med spørsmål for å stille brukere som vil være en del av brukervennlighetstestingsprosessen for oppgavebehandlingsappen ### Respons:", - "Nedenfor er en instruksjon som beskriver en oppgave, sammen med et input som gir ytterligere kontekst. Skriv et svar som fullfører forespørselen på riktig måte. ### Instruksjon: Oppsummer informasjonen i denne tabellen ### Input: Post | 2022 | 2021 | 2020\n---------------------\nSum driftsinntekter | 4.294.804 | 4.298.560 | 4.834.075\nÅrets resultat | 53.926 | 2.893 | 173.758\nEgenkapital i alt | 1.613.065 | 1.593.949 | 1.591.056\nSum eiendeler | 3.987.275 | 3.986.888 | 4.166.385 ### Respons:" -] - - -def style(): - st.markdown(""" - - """, unsafe_allow_html=True) - - -class Normalizer: - def remove_repetitions(self, text): - """Remove repetitions""" - first_ocurrences = [] - for sentence in text.split("."): - if sentence not in first_ocurrences: - first_ocurrences.append(sentence) - return '.'.join(first_ocurrences) - - def trim_last_sentence(self, text): - """Trim last sentence if incomplete""" - return text[:text.rfind(".") + 1] - - def clean_txt(self, text): - return self.trim_last_sentence(self.remove_repetitions(text)) - - -class TextGeneration: - def __init__(self): - self.tokenizer = None - self.generator = None - self.task = "text-generation" - self.model_name_or_path = MODEL_NAME - set_seed(42) - - # @st.cache_resource - def load(self): - print("Loading model... ", end="") - self.tokenizer = AutoTokenizer.from_pretrained( - self.model_name_or_path, use_auth_token=HF_AUTH_TOKEN if HF_AUTH_TOKEN else None, - ) - self.model = AutoModelForCausalLM.from_pretrained( - self.model_name_or_path, use_auth_token=HF_AUTH_TOKEN if HF_AUTH_TOKEN else None, - pad_token_id=self.tokenizer.eos_token_id, eos_token_id=self.tokenizer.eos_token_id, - torch_dtype=DTYPE, low_cpu_mem_usage=False if DEVICE == "cpu" else True - ).to(device=DEVICE, non_blocking=True) - _ = self.model.eval() - # -1 if DEVICE == "cpu" else int(DEVICE.split(":")[-1]) - device_number = torch.cuda.current_device() - self.generator = pipeline( - self.task, model=self.model, tokenizer=self.tokenizer, device=device_number) - print("Done") - # with torch.no_grad(): - # tokens = tokenizer.encode(prompt, return_tensors='pt').to(device=device, non_blocking=True) - # gen_tokens = self.model.generate(tokens, do_sample=True, temperature=0.8, max_length=128) - # generated = tokenizer.batch_decode(gen_tokens)[0] - - # return generated - - def generate(self, prompt, generation_kwargs): - max_length = len(self.tokenizer(prompt)[ - "input_ids"]) + generation_kwargs["max_length"] - generation_kwargs["max_length"] = min( - max_length, self.model.config.n_positions) - # generation_kwargs["num_return_sequences"] = 1 - # generation_kwargs["return_full_text"] = False - return self.generator( - prompt, - **generation_kwargs, - )[0]["generated_text"] - -# Generate responses - - -def generate_prompt(instruction, input=None): - if input: - prompt = f"""Nedenfor er en instruksjon som beskriver en oppgave, sammen med et input som gir ytterligere kontekst. Skriv et svar som fullfører forespørselen på riktig måte. - -### Instruksjon: -{instruction} - -### Input: -{input} - -### Respons:""" - else: - prompt = f""""Nedenfor er en instruksjon som beskriver en oppgave. Skriv et svar som fullfører forespørselen på riktig måte. - -### Instruksjon: -{instruction} - -### Respons:""" - return prompt - - -# @st.cache(allow_output_mutation=True, hash_funcs={AutoModelForCausalLM: lambda _: None}) -# @st.cache(allow_output_mutation=True, hash_funcs={TextGeneration: lambda _: None}) -@st.cache_resource -def load_text_generator(): - generator = TextGeneration() - generator.load() - return generator - - -def main(): - st.set_page_config( - page_title="NB-GPT-J-6B-NorPaca", - page_icon="🇳🇴", - layout="wide", - initial_sidebar_state="expanded" - ) - style() - with st.spinner('Loading the model. Please, wait...'): - generator = load_text_generator() - - st.sidebar.markdown(SIDEBAR_INFO, unsafe_allow_html=True) - query_params = st.experimental_get_query_params() - if query_params: - st.experimental_set_query_params(**dict()) - - max_length = st.sidebar.slider( - label='Max words to generate', - help="The maximum length of the sequence to be generated.", - min_value=1, - max_value=MAX_LENGTH, - value=int(query_params.get("max_length", [256])[0]), - step=1 - ) - top_p = st.sidebar.slider( - label='Top-p', - help="Only the most probable tokens with probabilities that add up to `top_p` or higher are kept for " - "generation.", - min_value=0.0, - max_value=1.0, - value=float(query_params.get("top_p", [0.75])[0]), - step=0.01 - ) - temperature = st.sidebar.slider( - label='Temperature', - help="The value used to module the next token probabilities", - min_value=0.1, - max_value=10.0, - value=float(query_params.get("temperature", [0.2])[0]), - step=0.05 - ) - do_sample = st.sidebar.selectbox( - label='Sampling?', - options=(False, True), - help="Whether or not to use sampling; use greedy decoding otherwise.", - index=int(query_params.get("do_sample", ["true"])[ - 0].lower()[0] in ("t", "y", "1")), - ) - top_k = st.sidebar.slider( - label='Top-k', - help="The number of highest probability vocabulary tokens to keep for top-k-filtering", - min_value=40, - max_value=80, - value=int(query_params.get("top_k", [50])[0]), - step=1 - ) - generation_kwargs = { - "max_length": max_length, - "top_k": top_k, - "top_p": top_p, - "temperature": temperature, - "do_sample": do_sample, - # "do_clean": do_clean, - } - st.markdown(HEADER_INFO) - prompts = EXAMPLES + ["Custom"] - prompt = st.selectbox('Examples', prompts, index=len(prompts) - 1) - - if prompt == "Custom": - prompt_box_instruction = query_params.get( - "text1", [PROMPT_BOX_INSTRUCTION])[0].strip() - prompt_box_input = query_params.get( - "text2", [PROMPT_BOX_INPUT])[0].strip() - prompt_box = f"{prompt_box_instruction} {prompt_box_input}" - else: - if "### Input:" in prompt: - prompt_box_instruction = prompt.split("### Instruksjon:")[ - 1].split("### Input:")[0].strip() - prompt_box_input = prompt.split( - "### Input:")[1].split("### Respons:")[0].strip() - else: - prompt_box_instruction = prompt.split( - "### Instruksjon:")[1].split("### Respons:")[0].strip() - prompt_box_input = None - prompt_box = prompt - - if prompt == "Custom": - text_instruction = st.text_area( - "Enter Instruction", PROMPT_BOX_INSTRUCTION) - text_input = st.text_area("Enter Input", PROMPT_BOX_INPUT) - else: - text_instruction = st.text_area( - "Enter Instruction", prompt_box_instruction) - text_input = st.text_area("Enter Input", prompt_box_input) if "### Input:" in prompt else st.text_area( - "Enter Input", PROMPT_BOX_INPUT) - - generation_kwargs_ph = st.empty() - cleaner = Normalizer() - if st.button("Generate!"): - output = st.empty() - with st.spinner(text="Generating..."): - generation_kwargs_ph.markdown( - ", ".join([f"`{k}`: {v}" for k, v in generation_kwargs.items()])) - if text_instruction: - text = generate_prompt(text_instruction, text_input) if text_input != "Enter your Input here..." else generate_prompt( - text_instruction) - #print("TEXT OUT", text) - share_args = {"text": text, **generation_kwargs} - st.experimental_set_query_params(**share_args) - for _ in range(5): - generated_text = generator.generate( - text, generation_kwargs) - # if do_clean: - # generated_text = cleaner.clean_txt(generated_text) - if generated_text.strip().startswith(text): - generated_text = generated_text.replace( - text, "", 1).strip() - output.markdown( - f'

    ' - f'{text} ' - f'{generated_text}' - f'

    ', - unsafe_allow_html=True - ) - if generated_text.strip(): - components.html( - f""" - - - """ - ) - break - if not generated_text.strip(): - st.markdown( - "*Tried 5 times but did not produce any result. Try again!*") - - -if __name__ == '__main__': - main() diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/modulated_deform_conv.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/modulated_deform_conv.py deleted file mode 100644 index 75559579cf053abcc99538606cbb88c723faf783..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/modulated_deform_conv.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair, _single - -from annotator.uniformer.mmcv.utils import deprecated_api_warning -from ..cnn import CONV_LAYERS -from ..utils import ext_loader, print_log - -ext_module = ext_loader.load_ext( - '_ext', - ['modulated_deform_conv_forward', 'modulated_deform_conv_backward']) - - -class ModulatedDeformConv2dFunction(Function): - - @staticmethod - def symbolic(g, input, offset, mask, weight, bias, stride, padding, - dilation, groups, deform_groups): - input_tensors = [input, offset, mask, weight] - if bias is not None: - input_tensors.append(bias) - return g.op( - 'mmcv::MMCVModulatedDeformConv2d', - *input_tensors, - stride_i=stride, - padding_i=padding, - dilation_i=dilation, - groups_i=groups, - deform_groups_i=deform_groups) - - @staticmethod - def forward(ctx, - input, - offset, - mask, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - deform_groups=1): - if input is not None and input.dim() != 4: - raise ValueError( - f'Expected 4D tensor as input, got {input.dim()}D tensor \ - instead.') - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.groups = groups - ctx.deform_groups = deform_groups - ctx.with_bias = bias is not None - if not ctx.with_bias: - bias = input.new_empty(0) # fake tensor - # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; - # amp won't cast the type of model (float32), but "offset" is cast - # to float16 by nn.Conv2d automatically, leading to the type - # mismatch with input (when it is float32) or weight. - # The flag for whether to use fp16 or amp is the type of "offset", - # we cast weight and input to temporarily support fp16 and amp - # whatever the pytorch version is. - input = input.type_as(offset) - weight = weight.type_as(input) - ctx.save_for_backward(input, offset, mask, weight, bias) - output = input.new_empty( - ModulatedDeformConv2dFunction._output_size(ctx, input, weight)) - ctx._bufs = [input.new_empty(0), input.new_empty(0)] - ext_module.modulated_deform_conv_forward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - output, - ctx._bufs[1], - kernel_h=weight.size(2), - kernel_w=weight.size(3), - stride_h=ctx.stride[0], - stride_w=ctx.stride[1], - pad_h=ctx.padding[0], - pad_w=ctx.padding[1], - dilation_h=ctx.dilation[0], - dilation_w=ctx.dilation[1], - group=ctx.groups, - deformable_group=ctx.deform_groups, - with_bias=ctx.with_bias) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - grad_mask = torch.zeros_like(mask) - grad_weight = torch.zeros_like(weight) - grad_bias = torch.zeros_like(bias) - grad_output = grad_output.contiguous() - ext_module.modulated_deform_conv_backward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - ctx._bufs[1], - grad_input, - grad_weight, - grad_bias, - grad_offset, - grad_mask, - grad_output, - kernel_h=weight.size(2), - kernel_w=weight.size(3), - stride_h=ctx.stride[0], - stride_w=ctx.stride[1], - pad_h=ctx.padding[0], - pad_w=ctx.padding[1], - dilation_h=ctx.dilation[0], - dilation_w=ctx.dilation[1], - group=ctx.groups, - deformable_group=ctx.deform_groups, - with_bias=ctx.with_bias) - if not ctx.with_bias: - grad_bias = None - - return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, - None, None, None, None, None) - - @staticmethod - def _output_size(ctx, input, weight): - channels = weight.size(0) - output_size = (input.size(0), channels) - for d in range(input.dim() - 2): - in_size = input.size(d + 2) - pad = ctx.padding[d] - kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 - stride_ = ctx.stride[d] - output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) - if not all(map(lambda s: s > 0, output_size)): - raise ValueError( - 'convolution input is too small (output would be ' + - 'x'.join(map(str, output_size)) + ')') - return output_size - - -modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply - - -class ModulatedDeformConv2d(nn.Module): - - @deprecated_api_warning({'deformable_groups': 'deform_groups'}, - cls_name='ModulatedDeformConv2d') - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deform_groups=1, - bias=True): - super(ModulatedDeformConv2d, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.groups = groups - self.deform_groups = deform_groups - # enable compatibility with nn.Conv2d - self.transposed = False - self.output_padding = _single(0) - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // groups, - *self.kernel_size)) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - self.init_weights() - - def init_weights(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - if self.bias is not None: - self.bias.data.zero_() - - def forward(self, x, offset, mask): - return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, - self.stride, self.padding, - self.dilation, self.groups, - self.deform_groups) - - -@CONV_LAYERS.register_module('DCNv2') -class ModulatedDeformConv2dPack(ModulatedDeformConv2d): - """A ModulatedDeformable Conv Encapsulation that acts as normal Conv - layers. - - Args: - in_channels (int): Same as nn.Conv2d. - out_channels (int): Same as nn.Conv2d. - kernel_size (int or tuple[int]): Same as nn.Conv2d. - stride (int): Same as nn.Conv2d, while tuple is not supported. - padding (int): Same as nn.Conv2d, while tuple is not supported. - dilation (int): Same as nn.Conv2d, while tuple is not supported. - groups (int): Same as nn.Conv2d. - bias (bool or str): If specified as `auto`, it will be decided by the - norm_cfg. Bias will be set as True if norm_cfg is None, otherwise - False. - """ - - _version = 2 - - def __init__(self, *args, **kwargs): - super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs) - self.conv_offset = nn.Conv2d( - self.in_channels, - self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1], - kernel_size=self.kernel_size, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - bias=True) - self.init_weights() - - def init_weights(self): - super(ModulatedDeformConv2dPack, self).init_weights() - if hasattr(self, 'conv_offset'): - self.conv_offset.weight.data.zero_() - self.conv_offset.bias.data.zero_() - - def forward(self, x): - out = self.conv_offset(x) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, - self.stride, self.padding, - self.dilation, self.groups, - self.deform_groups) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - version = local_metadata.get('version', None) - - if version is None or version < 2: - # the key is different in early versions - # In version < 2, ModulatedDeformConvPack - # loads previous benchmark models. - if (prefix + 'conv_offset.weight' not in state_dict - and prefix[:-1] + '_offset.weight' in state_dict): - state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( - prefix[:-1] + '_offset.weight') - if (prefix + 'conv_offset.bias' not in state_dict - and prefix[:-1] + '_offset.bias' in state_dict): - state_dict[prefix + - 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + - '_offset.bias') - - if version is not None and version > 1: - print_log( - f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to ' - 'version 2.', - logger='root') - - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, - error_msgs) diff --git a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/__init__.py b/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/apps/render_data.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/apps/render_data.py deleted file mode 100644 index 563c03fba6e304eced73ca283152a968a65c3b8e..0000000000000000000000000000000000000000 --- a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/apps/render_data.py +++ /dev/null @@ -1,290 +0,0 @@ -#from data.config import raw_dataset, render_dataset, archive_dataset, model_list, zip_path - -from lib.renderer.camera import Camera -import numpy as np -from lib.renderer.mesh import load_obj_mesh, compute_tangent, compute_normal, load_obj_mesh_mtl -from lib.renderer.camera import Camera -import os -import cv2 -import time -import math -import random -import pyexr -import argparse -from tqdm import tqdm - - -def make_rotate(rx, ry, rz): - sinX = np.sin(rx) - sinY = np.sin(ry) - sinZ = np.sin(rz) - - cosX = np.cos(rx) - cosY = np.cos(ry) - cosZ = np.cos(rz) - - Rx = np.zeros((3,3)) - Rx[0, 0] = 1.0 - Rx[1, 1] = cosX - Rx[1, 2] = -sinX - Rx[2, 1] = sinX - Rx[2, 2] = cosX - - Ry = np.zeros((3,3)) - Ry[0, 0] = cosY - Ry[0, 2] = sinY - Ry[1, 1] = 1.0 - Ry[2, 0] = -sinY - Ry[2, 2] = cosY - - Rz = np.zeros((3,3)) - Rz[0, 0] = cosZ - Rz[0, 1] = -sinZ - Rz[1, 0] = sinZ - Rz[1, 1] = cosZ - Rz[2, 2] = 1.0 - - R = np.matmul(np.matmul(Rz,Ry),Rx) - return R - -def rotateSH(SH, R): - SHn = SH - - # 1st order - SHn[1] = R[1,1]*SH[1] - R[1,2]*SH[2] + R[1,0]*SH[3] - SHn[2] = -R[2,1]*SH[1] + R[2,2]*SH[2] - R[2,0]*SH[3] - SHn[3] = R[0,1]*SH[1] - R[0,2]*SH[2] + R[0,0]*SH[3] - - # 2nd order - SHn[4:,0] = rotateBand2(SH[4:,0],R) - SHn[4:,1] = rotateBand2(SH[4:,1],R) - SHn[4:,2] = rotateBand2(SH[4:,2],R) - - return SHn - -def rotateBand2(x, R): - s_c3 = 0.94617469575 - s_c4 = -0.31539156525 - s_c5 = 0.54627421529 - - s_c_scale = 1.0/0.91529123286551084 - s_c_scale_inv = 0.91529123286551084 - - s_rc2 = 1.5853309190550713*s_c_scale - s_c4_div_c3 = s_c4/s_c3 - s_c4_div_c3_x2 = (s_c4/s_c3)*2.0 - - s_scale_dst2 = s_c3 * s_c_scale_inv - s_scale_dst4 = s_c5 * s_c_scale_inv - - sh0 = x[3] + x[4] + x[4] - x[1] - sh1 = x[0] + s_rc2*x[2] + x[3] + x[4] - sh2 = x[0] - sh3 = -x[3] - sh4 = -x[1] - - r2x = R[0][0] + R[0][1] - r2y = R[1][0] + R[1][1] - r2z = R[2][0] + R[2][1] - - r3x = R[0][0] + R[0][2] - r3y = R[1][0] + R[1][2] - r3z = R[2][0] + R[2][2] - - r4x = R[0][1] + R[0][2] - r4y = R[1][1] + R[1][2] - r4z = R[2][1] + R[2][2] - - sh0_x = sh0 * R[0][0] - sh0_y = sh0 * R[1][0] - d0 = sh0_x * R[1][0] - d1 = sh0_y * R[2][0] - d2 = sh0 * (R[2][0] * R[2][0] + s_c4_div_c3) - d3 = sh0_x * R[2][0] - d4 = sh0_x * R[0][0] - sh0_y * R[1][0] - - sh1_x = sh1 * R[0][2] - sh1_y = sh1 * R[1][2] - d0 += sh1_x * R[1][2] - d1 += sh1_y * R[2][2] - d2 += sh1 * (R[2][2] * R[2][2] + s_c4_div_c3) - d3 += sh1_x * R[2][2] - d4 += sh1_x * R[0][2] - sh1_y * R[1][2] - - sh2_x = sh2 * r2x - sh2_y = sh2 * r2y - d0 += sh2_x * r2y - d1 += sh2_y * r2z - d2 += sh2 * (r2z * r2z + s_c4_div_c3_x2) - d3 += sh2_x * r2z - d4 += sh2_x * r2x - sh2_y * r2y - - sh3_x = sh3 * r3x - sh3_y = sh3 * r3y - d0 += sh3_x * r3y - d1 += sh3_y * r3z - d2 += sh3 * (r3z * r3z + s_c4_div_c3_x2) - d3 += sh3_x * r3z - d4 += sh3_x * r3x - sh3_y * r3y - - sh4_x = sh4 * r4x - sh4_y = sh4 * r4y - d0 += sh4_x * r4y - d1 += sh4_y * r4z - d2 += sh4 * (r4z * r4z + s_c4_div_c3_x2) - d3 += sh4_x * r4z - d4 += sh4_x * r4x - sh4_y * r4y - - dst = x - dst[0] = d0 - dst[1] = -d1 - dst[2] = d2 * s_scale_dst2 - dst[3] = -d3 - dst[4] = d4 * s_scale_dst4 - - return dst - -def render_prt_ortho(out_path, folder_name, subject_name, shs, rndr, rndr_uv, im_size, angl_step=4, n_light=1, pitch=[0]): - cam = Camera(width=im_size, height=im_size) - cam.ortho_ratio = 0.4 * (512 / im_size) - cam.near = -100 - cam.far = 100 - cam.sanity_check() - - # set path for obj, prt - mesh_file = os.path.join(folder_name, subject_name + '_100k.obj') - if not os.path.exists(mesh_file): - print('ERROR: obj file does not exist!!', mesh_file) - return - prt_file = os.path.join(folder_name, 'bounce', 'bounce0.txt') - if not os.path.exists(prt_file): - print('ERROR: prt file does not exist!!!', prt_file) - return - face_prt_file = os.path.join(folder_name, 'bounce', 'face.npy') - if not os.path.exists(face_prt_file): - print('ERROR: face prt file does not exist!!!', prt_file) - return - text_file = os.path.join(folder_name, 'tex', subject_name + '_dif_2k.jpg') - if not os.path.exists(text_file): - print('ERROR: dif file does not exist!!', text_file) - return - - texture_image = cv2.imread(text_file) - texture_image = cv2.cvtColor(texture_image, cv2.COLOR_BGR2RGB) - - vertices, faces, normals, faces_normals, textures, face_textures = load_obj_mesh(mesh_file, with_normal=True, with_texture=True) - vmin = vertices.min(0) - vmax = vertices.max(0) - up_axis = 1 if (vmax-vmin).argmax() == 1 else 2 - - vmed = np.median(vertices, 0) - vmed[up_axis] = 0.5*(vmax[up_axis]+vmin[up_axis]) - y_scale = 180/(vmax[up_axis] - vmin[up_axis]) - - rndr.set_norm_mat(y_scale, vmed) - rndr_uv.set_norm_mat(y_scale, vmed) - - tan, bitan = compute_tangent(vertices, faces, normals, textures, face_textures) - prt = np.loadtxt(prt_file) - face_prt = np.load(face_prt_file) - rndr.set_mesh(vertices, faces, normals, faces_normals, textures, face_textures, prt, face_prt, tan, bitan) - rndr.set_albedo(texture_image) - - rndr_uv.set_mesh(vertices, faces, normals, faces_normals, textures, face_textures, prt, face_prt, tan, bitan) - rndr_uv.set_albedo(texture_image) - - os.makedirs(os.path.join(out_path, 'GEO', 'OBJ', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'PARAM', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'RENDER', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'MASK', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'UV_RENDER', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'UV_MASK', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'UV_POS', subject_name),exist_ok=True) - os.makedirs(os.path.join(out_path, 'UV_NORMAL', subject_name),exist_ok=True) - - if not os.path.exists(os.path.join(out_path, 'val.txt')): - f = open(os.path.join(out_path, 'val.txt'), 'w') - f.close() - - # copy obj file - cmd = 'cp %s %s' % (mesh_file, os.path.join(out_path, 'GEO', 'OBJ', subject_name)) - print(cmd) - os.system(cmd) - - for p in pitch: - for y in tqdm(range(0, 360, angl_step)): - R = np.matmul(make_rotate(math.radians(p), 0, 0), make_rotate(0, math.radians(y), 0)) - if up_axis == 2: - R = np.matmul(R, make_rotate(math.radians(90),0,0)) - - rndr.rot_matrix = R - rndr_uv.rot_matrix = R - rndr.set_camera(cam) - rndr_uv.set_camera(cam) - - for j in range(n_light): - sh_id = random.randint(0,shs.shape[0]-1) - sh = shs[sh_id] - sh_angle = 0.2*np.pi*(random.random()-0.5) - sh = rotateSH(sh, make_rotate(0, sh_angle, 0).T) - - dic = {'sh': sh, 'ortho_ratio': cam.ortho_ratio, 'scale': y_scale, 'center': vmed, 'R': R} - - rndr.set_sh(sh) - rndr.analytic = False - rndr.use_inverse_depth = False - rndr.display() - - out_all_f = rndr.get_color(0) - out_mask = out_all_f[:,:,3] - out_all_f = cv2.cvtColor(out_all_f, cv2.COLOR_RGBA2BGR) - - np.save(os.path.join(out_path, 'PARAM', subject_name, '%d_%d_%02d.npy'%(y,p,j)),dic) - cv2.imwrite(os.path.join(out_path, 'RENDER', subject_name, '%d_%d_%02d.jpg'%(y,p,j)),255.0*out_all_f) - cv2.imwrite(os.path.join(out_path, 'MASK', subject_name, '%d_%d_%02d.png'%(y,p,j)),255.0*out_mask) - - rndr_uv.set_sh(sh) - rndr_uv.analytic = False - rndr_uv.use_inverse_depth = False - rndr_uv.display() - - uv_color = rndr_uv.get_color(0) - uv_color = cv2.cvtColor(uv_color, cv2.COLOR_RGBA2BGR) - cv2.imwrite(os.path.join(out_path, 'UV_RENDER', subject_name, '%d_%d_%02d.jpg'%(y,p,j)),255.0*uv_color) - - if y == 0 and j == 0 and p == pitch[0]: - uv_pos = rndr_uv.get_color(1) - uv_mask = uv_pos[:,:,3] - cv2.imwrite(os.path.join(out_path, 'UV_MASK', subject_name, '00.png'),255.0*uv_mask) - - data = {'default': uv_pos[:,:,:3]} # default is a reserved name - pyexr.write(os.path.join(out_path, 'UV_POS', subject_name, '00.exr'), data) - - uv_nml = rndr_uv.get_color(2) - uv_nml = cv2.cvtColor(uv_nml, cv2.COLOR_RGBA2BGR) - cv2.imwrite(os.path.join(out_path, 'UV_NORMAL', subject_name, '00.png'),255.0*uv_nml) - - -if __name__ == '__main__': - shs = np.load('./env_sh.npy') - - parser = argparse.ArgumentParser() - parser.add_argument('-i', '--input', type=str, default='/home/shunsuke/Downloads/rp_dennis_posed_004_OBJ') - parser.add_argument('-o', '--out_dir', type=str, default='/home/shunsuke/Documents/hf_human') - parser.add_argument('-m', '--ms_rate', type=int, default=1, help='higher ms rate results in less aliased output. MESA renderer only supports ms_rate=1.') - parser.add_argument('-e', '--egl', action='store_true', help='egl rendering option. use this when rendering with headless server with NVIDIA GPU') - parser.add_argument('-s', '--size', type=int, default=512, help='rendering image size') - args = parser.parse_args() - - # NOTE: GL context has to be created before any other OpenGL function loads. - from lib.renderer.gl.init_gl import initialize_GL_context - initialize_GL_context(width=args.size, height=args.size, egl=args.egl) - - from lib.renderer.gl.prt_render import PRTRender - rndr = PRTRender(width=args.size, height=args.size, ms_rate=args.ms_rate, egl=args.egl) - rndr_uv = PRTRender(width=args.size, height=args.size, uv_mode=True, egl=args.egl) - - if args.input[-1] == '/': - args.input = args.input[:-1] - subject_name = args.input.split('/')[-1][:-4] - render_prt_ortho(args.out_dir, args.input, subject_name, shs, rndr, rndr_uv, args.size, 1, 1, pitch=[0]) \ No newline at end of file diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/talking_heads_attention_test.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/talking_heads_attention_test.py deleted file mode 100644 index ed24eda26c6f532b5e5011f5bfc8109eeca68a03..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/talking_heads_attention_test.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for the attention layer.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl.testing import parameterized -import numpy as np -import tensorflow as tf - -from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import -from official.nlp.modeling.layers import talking_heads_attention - - -# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It -# guarantees forward compatibility of this code for the V2 switchover. -# This test is revised base on attention.MultiHeadAttentionTest. -@keras_parameterized.run_all_keras_modes -class TalkingHeadsAttentionTest(keras_parameterized.TestCase): - - @parameterized.named_parameters( - ("key_value_same_proj", None, None, [40, 80]), - ("key_value_different_proj", 32, 60, [40, 60]), - ) - def test_non_masked_attention(self, value_size, output_shape, output_dims): - """Test that the attention layer can be created without a mask tensor.""" - test_layer = talking_heads_attention.TalkingHeadsAttention( - num_heads=12, - key_size=64, - value_size=value_size, - output_shape=output_shape) - # Create a 3-dimensional input (the first dimension is implicit). - query = tf.keras.Input(shape=(40, 80)) - value = tf.keras.Input(shape=(20, 80)) - output = test_layer([query, value]) - self.assertEqual(output.shape.as_list(), [None] + output_dims) - - def test_non_masked_self_attention(self): - """Test with one input (self-attenntion) and no mask tensor.""" - test_layer = talking_heads_attention.TalkingHeadsAttention( - num_heads=12, key_size=64) - # Create a 3-dimensional input (the first dimension is implicit). - query = tf.keras.Input(shape=(40, 80)) - output = test_layer([query, query]) - self.assertEqual(output.shape.as_list(), [None, 40, 80]) - - def test_attention_scores(self): - """Test attention outputs with coefficients.""" - test_layer = talking_heads_attention.TalkingHeadsAttention( - num_heads=12, key_size=64, return_attention_scores=True) - # Create a 3-dimensional input (the first dimension is implicit). - query = tf.keras.Input(shape=(40, 80)) - output, coef = test_layer([query, query]) - self.assertEqual(output.shape.as_list(), [None, 40, 80]) - self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40]) - - @parameterized.named_parameters(("with_bias", True), ("no_bias", False)) - def test_masked_attention(self, use_bias): - """Test with a mask tensor.""" - test_layer = talking_heads_attention.TalkingHeadsAttention( - num_heads=12, key_size=2, use_bias=use_bias) - # Create a 3-dimensional input (the first dimension is implicit). - batch_size = 3 - query = tf.keras.Input(shape=(4, 8)) - value = tf.keras.Input(shape=(2, 8)) - mask_tensor = tf.keras.Input(shape=(4, 2)) - output = test_layer([query, value], mask_tensor) - - # Create a model containing the test layer. - model = tf.keras.Model([query, value, mask_tensor], output) - - # Generate data for the input (non-mask) tensors. - from_data = 10 * np.random.random_sample((batch_size, 4, 8)) - to_data = 10 * np.random.random_sample((batch_size, 2, 8)) - - # Invoke the data with a random set of mask data. This should mask at least - # one element. - mask_data = np.random.randint(2, size=(batch_size, 4, 2)) - masked_output_data = model.predict([from_data, to_data, mask_data]) - - # Invoke the same data, but with a null mask (where no elements are masked). - null_mask_data = np.ones((batch_size, 4, 2)) - unmasked_output_data = model.predict([from_data, to_data, null_mask_data]) - - # Because one data is masked and one is not, the outputs should not be the - # same. - self.assertNotAllClose(masked_output_data, unmasked_output_data) - - # Tests the layer with three inputs: Q, K, V. - key = tf.keras.Input(shape=(2, 8)) - output = test_layer([query, value, key], mask_tensor) - model = tf.keras.Model([query, value, key, mask_tensor], output) - - masked_output_data = model.predict([from_data, to_data, to_data, mask_data]) - unmasked_output_data = model.predict( - [from_data, to_data, to_data, null_mask_data]) - # Because one data is masked and one is not, the outputs should not be the - # same. - self.assertNotAllClose(masked_output_data, unmasked_output_data) - - if use_bias: - self.assertLen(test_layer._query_dense.trainable_variables, 2) - self.assertLen(test_layer._output_dense.trainable_variables, 2) - else: - self.assertLen(test_layer._query_dense.trainable_variables, 1) - self.assertLen(test_layer._output_dense.trainable_variables, 1) - - def test_initializer(self): - """Test with a specified initializer.""" - test_layer = talking_heads_attention.TalkingHeadsAttention( - num_heads=12, - key_size=64, - kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) - # Create a 3-dimensional input (the first dimension is implicit). - query = tf.keras.Input(shape=(40, 80)) - output = test_layer([query, query]) - self.assertEqual(output.shape.as_list(), [None, 40, 80]) - - @parameterized.named_parameters( - ("4d_inputs_one_free_batch", [3, 4], [3, 2], [4, 2], (2,)), - ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)), - ("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3))) - def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes): - """Test with a mask tensor.""" - test_layer = talking_heads_attention.TalkingHeadsAttention( - num_heads=12, key_size=2, attention_axes=attention_axes) - batch_size, hidden_size = 3, 8 - # Generate data for the input (non-mask) tensors. - query_shape = [batch_size] + q_dims + [hidden_size] - value_shape = [batch_size] + v_dims + [hidden_size] - mask_shape = [batch_size] + mask_dims - query = 10 * np.random.random_sample(query_shape) - value = 10 * np.random.random_sample(value_shape) - - # Invoke the data with a random set of mask data. This should mask at least - # one element. - mask_data = np.random.randint(2, size=mask_shape).astype("bool") - output = test_layer([query, value], mask_data) - - # Invoke the same data, but with a null mask (where no elements are masked). - null_mask_data = np.ones(mask_shape) - unmasked_output = test_layer([query, value], null_mask_data) - # Because one data is masked and one is not, the outputs should not be the - # same. - self.assertNotAllClose(output, unmasked_output) - - -if __name__ == "__main__": - tf.test.main() diff --git a/spaces/NCTCMumbai/NCTC/models/official/staging/training/utils.py b/spaces/NCTCMumbai/NCTC/models/official/staging/training/utils.py deleted file mode 100644 index 33fa368b7b966e449c8309e523cd31db73efb978..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/staging/training/utils.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Some layered modules/functions to help users writing custom training loop.""" - -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import abc -import inspect -import six - -import tensorflow.compat.v2 as tf - - -def create_loop_fn(step_fn): - """Creates a multiple steps function driven by the python while loop. - - Args: - step_fn: A function which takes `iterator` as input. - - Returns: - A callable defined as the `loop_fn` defination below. - """ - - def loop_fn(iterator, num_steps, state=None, reduce_fn=None): - """A loop function with multiple steps. - - Args: - iterator: A nested structure of tf.data `Iterator` or - `DistributedIterator`. - num_steps: The number of steps in the loop. If `num_steps==-1`, will - iterate until exausting the iterator. - state: An optional initial state before running the loop. - reduce_fn: a callable defined as `def reduce_fn(state, value)`, where - `value` is the outputs from `step_fn`. - - Returns: - The updated state. - """ - try: - step = 0 - # To make sure the OutOfRangeError exception can be handled well with - # async remote eager, we need to wrap the loop body in a `async_scope`. - with tf.experimental.async_scope(): - while (num_steps == -1 or step < num_steps): - outputs = step_fn(iterator) - if reduce_fn is not None: - state = reduce_fn(state, outputs) - step += 1 - return state - except (StopIteration, tf.errors.OutOfRangeError): - tf.experimental.async_clear_error() - return state - - return loop_fn - - -def create_tf_while_loop_fn(step_fn): - """Create a multiple steps function driven by tf.while_loop on the host. - - Args: - step_fn: A function which takes `iterator` as input. - - Returns: - A callable defined as the `loop_fn` defination below. - """ - - @tf.function - def loop_fn(iterator, num_steps): - """A loop function with multiple steps. - - Args: - iterator: A nested structure of tf.data `Iterator` or - `DistributedIterator`. - num_steps: The number of steps in the loop. Must be a tf.Tensor. - """ - if not isinstance(num_steps, tf.Tensor): - raise ValueError("`num_steps` should be an `tf.Tensor`. Python object " - "may cause retracing.") - - for _ in tf.range(num_steps): - step_fn(iterator) - - return loop_fn - - -def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs): - """A helper function to create distributed dataset. - - Args: - strategy: An instance of `tf.distribute.Strategy`. - dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an - `tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If - it is a function, it could optionally have an argument named - `input_context` which is `tf.distribute.InputContext` argument type. - *args: The list of arguments to be passed to dataset_or_fn. - **kwargs: Any keyword arguments to be passed. - - Returns: - A distributed Dataset. - """ - if strategy is None: - strategy = tf.distribute.get_strategy() - - if isinstance(dataset_or_fn, tf.data.Dataset): - return strategy.experimental_distribute_dataset(dataset_or_fn) - - if not callable(dataset_or_fn): - raise ValueError("`dataset_or_fn` should be either callable or an instance " - "of `tf.data.Dataset`") - - def dataset_fn(ctx): - """Wrapped dataset function for creating distributed dataset..""" - - # If `dataset_or_fn` is a function and has `input_context` as argument - # names, pass `ctx` as the value of `input_context` when calling - # `dataset_or_fn`. Otherwise `ctx` will not be used when calling - # `dataset_or_fn`. - if six.PY3: - argspec = inspect.getfullargspec(dataset_or_fn) - else: - argspec = inspect.getargspec(dataset_or_fn) - args_names = argspec.args - - if "input_context" in args_names: - kwargs["input_context"] = ctx - ds = dataset_or_fn(*args, **kwargs) - return ds - - return strategy.experimental_distribute_datasets_from_function(dataset_fn) - - -class SummaryManager(object): - """A class manages writing summaries.""" - - def __init__(self, - summary_writer, - summary_fn, - global_step=None, - summary_interval=None): - """Construct a summary manager object. - - Args: - summary_writer: A `tf.summary.SummaryWriter` instance for writing - summaries. - summary_fn: A callable defined as `def summary_fn(name, tensor, - step=None)`, which describes the summary operation. - global_step: A `tf.Variable` instance for checking the current global step - value, in case users want to save summaries every N steps. - summary_interval: An integer, indicates the minimum step interval between - two summaries. - """ - if summary_writer is not None: - self._summary_writer = summary_writer - self._enabled = True - else: - self._summary_writer = tf.summary.create_noop_writer() - self._enabled = False - self._summary_fn = summary_fn - - if global_step is None: - self._global_step = tf.summary.experimental.get_step() - else: - self._global_step = global_step - - if summary_interval is not None: - if self._global_step is None: - raise ValueError("`summary_interval` is not None, but no `global_step` " - "can be obtained ") - self._last_summary_step = self._global_step.numpy() - self._summary_interval = summary_interval - - @property - def summary_interval(self): - return self._summary_interval - - @property - def summary_writer(self): - """Returns the underlying summary writer.""" - return self._summary_writer - - def flush(self): - """Flush the underlying summary writer.""" - if self._enabled: - tf.summary.flush(self._summary_writer) - - def write_summaries(self, items, always_write=True): - """Write a bulk of summaries. - - Args: - items: a dictionary of `Tensors` for writing summaries. - always_write: An optional boolean. If `True`, the manager will always - write summaries unless the summaries have been written for the same - step. Otherwise the manager will only write the summaries if the - interval between summaries are larger than `summary_interval`. - - Returns: - A boolean indicates whether the summaries are written or not. - """ - # TODO(rxsang): Support writing summaries with nested structure, so users - # can split the summaries into different directories for nicer visualization - # in Tensorboard, like train and eval metrics. - if not self._enabled: - return False - - if self._summary_interval is not None: - current_step = self._global_step.numpy() - if current_step == self._last_summary_step: - return False - if not always_write and current_step < (self._last_summary_step + - self._summary_interval): - return False - self._last_summary_step = current_step - - with self._summary_writer.as_default(): - for name, tensor in items.items(): - self._summary_fn(name, tensor, step=self._global_step) - return True - - -@six.add_metaclass(abc.ABCMeta) -class Trigger(object): - """An abstract class representing a "trigger" for some event.""" - - @abc.abstractmethod - def __call__(self, value: float, force_trigger=False): - """Maybe trigger the event based on the given value. - - Args: - value: the value for triggering. - force_trigger: Whether the trigger is forced triggered. - - Returns: - `True` if the trigger is triggered on the given `value`, and - `False` otherwise. - """ - - @abc.abstractmethod - def reset(self): - """Reset states in the trigger.""" - - -class IntervalTrigger(Trigger): - """Triggers on every fixed interval.""" - - def __init__(self, interval, start=0): - """Constructs the IntervalTrigger. - - Args: - interval: The triggering interval. - start: An initial value for the trigger. - """ - self._interval = interval - self._last_trigger_value = start - - def __call__(self, value, force_trigger=False): - """Maybe trigger the event based on the given value. - - Args: - value: the value for triggering. - force_trigger: If True, the trigger will be forced triggered unless the - last trigger value is equal to `value`. - - Returns: - `True` if the trigger is triggered on the given `value`, and - `False` otherwise. - """ - if force_trigger and value != self._last_trigger_value: - self._last_trigger_value = value - return True - - if self._interval and self._interval > 0: - if value >= self._last_trigger_value + self._interval: - self._last_trigger_value = value - return True - return False - - def reset(self): - """See base class.""" - self._last_trigger_value = 0 - - -class EpochHelper(object): - """A Helper class to handle epochs in Customized Training Loop.""" - - def __init__(self, epoch_steps, global_step): - """Constructs the EpochHelper. - - Args: - epoch_steps: An integer indicates how many steps in an epoch. - global_step: A `tf.Variable` instance indicates the current global step. - """ - self._epoch_steps = epoch_steps - self._global_step = global_step - self._current_epoch = None - self._epoch_start_step = None - self._in_epoch = False - - def epoch_begin(self): - """Returns whether a new epoch should begin.""" - if self._in_epoch: - return False - current_step = self._global_step.numpy() - self._epoch_start_step = current_step - self._current_epoch = current_step // self._epoch_steps - self._in_epoch = True - return True - - def epoch_end(self): - """Returns whether the current epoch should end.""" - if not self._in_epoch: - raise ValueError("`epoch_end` can only be called inside an epoch") - current_step = self._global_step.numpy() - epoch = current_step // self._epoch_steps - - if epoch > self._current_epoch: - self._in_epoch = False - return True - return False - - @property - def batch_index(self): - """Index of the next batch within the current epoch.""" - return self._global_step.numpy() - self._epoch_start_step - - @property - def current_epoch(self): - return self._current_epoch diff --git a/spaces/Naszirs397/rvc-models/README.md b/spaces/Naszirs397/rvc-models/README.md deleted file mode 100644 index 56936f1df15477c0ae2fdcfe59a77c175e1905d8..0000000000000000000000000000000000000000 --- a/spaces/Naszirs397/rvc-models/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Rvc Models -emoji: 🎤 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: zomehwh/rvc-models ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nephele/bert-vits2-multi-voice/transforms.py b/spaces/Nephele/bert-vits2-multi-voice/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Nephele/bert-vits2-multi-voice/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/OAOA/DifFace/basicsr/models/esrgan_model.py b/spaces/OAOA/DifFace/basicsr/models/esrgan_model.py deleted file mode 100644 index 3d746d0e29418d9e8f35fa9c1e3a315d694075be..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/models/esrgan_model.py +++ /dev/null @@ -1,83 +0,0 @@ -import torch -from collections import OrderedDict - -from basicsr.utils.registry import MODEL_REGISTRY -from .srgan_model import SRGANModel - - -@MODEL_REGISTRY.register() -class ESRGANModel(SRGANModel): - """ESRGAN model for single image super-resolution.""" - - def optimize_parameters(self, current_iter): - # optimize net_g - for p in self.net_d.parameters(): - p.requires_grad = False - - self.optimizer_g.zero_grad() - self.output = self.net_g(self.lq) - - l_g_total = 0 - loss_dict = OrderedDict() - if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): - # pixel loss - if self.cri_pix: - l_g_pix = self.cri_pix(self.output, self.gt) - l_g_total += l_g_pix - loss_dict['l_g_pix'] = l_g_pix - # perceptual loss - if self.cri_perceptual: - l_g_percep, l_g_style = self.cri_perceptual(self.output, self.gt) - if l_g_percep is not None: - l_g_total += l_g_percep - loss_dict['l_g_percep'] = l_g_percep - if l_g_style is not None: - l_g_total += l_g_style - loss_dict['l_g_style'] = l_g_style - # gan loss (relativistic gan) - real_d_pred = self.net_d(self.gt).detach() - fake_g_pred = self.net_d(self.output) - l_g_real = self.cri_gan(real_d_pred - torch.mean(fake_g_pred), False, is_disc=False) - l_g_fake = self.cri_gan(fake_g_pred - torch.mean(real_d_pred), True, is_disc=False) - l_g_gan = (l_g_real + l_g_fake) / 2 - - l_g_total += l_g_gan - loss_dict['l_g_gan'] = l_g_gan - - l_g_total.backward() - self.optimizer_g.step() - - # optimize net_d - for p in self.net_d.parameters(): - p.requires_grad = True - - self.optimizer_d.zero_grad() - # gan loss (relativistic gan) - - # In order to avoid the error in distributed training: - # "Error detected in CudnnBatchNormBackward: RuntimeError: one of - # the variables needed for gradient computation has been modified by - # an inplace operation", - # we separate the backwards for real and fake, and also detach the - # tensor for calculating mean. - - # real - fake_d_pred = self.net_d(self.output).detach() - real_d_pred = self.net_d(self.gt) - l_d_real = self.cri_gan(real_d_pred - torch.mean(fake_d_pred), True, is_disc=True) * 0.5 - l_d_real.backward() - # fake - fake_d_pred = self.net_d(self.output.detach()) - l_d_fake = self.cri_gan(fake_d_pred - torch.mean(real_d_pred.detach()), False, is_disc=True) * 0.5 - l_d_fake.backward() - self.optimizer_d.step() - - loss_dict['l_d_real'] = l_d_real - loss_dict['l_d_fake'] = l_d_fake - loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) - loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) - - self.log_dict = self.reduce_loss_dict(loss_dict) - - if self.ema_decay > 0: - self.model_ema(decay=self.ema_decay) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py deleted file mode 100644 index 2e0fc2bd29aedb0b477b7cc8e2c3b606acdd454a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py +++ /dev/null @@ -1,364 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Score raw text with a trained model. -""" - -from collections import namedtuple -import logging -from multiprocessing import Pool -import sys -import os -import random - -import numpy as np -import sacrebleu -import torch - -from fairseq import checkpoint_utils, options, utils - - -logger = logging.getLogger("fairseq_cli.drnmt_rerank") -logger.setLevel(logging.INFO) - -Batch = namedtuple("Batch", "ids src_tokens src_lengths") - - -pool_init_variables = {} - - -def init_loaded_scores(mt_scores, model_scores, hyp, ref): - global pool_init_variables - pool_init_variables["mt_scores"] = mt_scores - pool_init_variables["model_scores"] = model_scores - pool_init_variables["hyp"] = hyp - pool_init_variables["ref"] = ref - - -def parse_fairseq_gen(filename, task): - source = {} - hypos = {} - scores = {} - with open(filename, "r", encoding="utf-8") as f: - for line in f: - line = line.strip() - if line.startswith("S-"): # source - uid, text = line.split("\t", 1) - uid = int(uid[2:]) - source[uid] = text - elif line.startswith("D-"): # hypo - uid, score, text = line.split("\t", 2) - uid = int(uid[2:]) - if uid not in hypos: - hypos[uid] = [] - scores[uid] = [] - hypos[uid].append(text) - scores[uid].append(float(score)) - else: - continue - - source_out = [source[i] for i in range(len(hypos))] - hypos_out = [h for i in range(len(hypos)) for h in hypos[i]] - scores_out = [s for i in range(len(scores)) for s in scores[i]] - - return source_out, hypos_out, scores_out - - -def read_target(filename): - with open(filename, "r", encoding="utf-8") as f: - output = [line.strip() for line in f] - return output - - -def make_batches(args, src, hyp, task, max_positions, encode_fn): - assert len(src) * args.beam == len( - hyp - ), f"Expect {len(src) * args.beam} hypotheses for {len(src)} source sentences with beam size {args.beam}. Got {len(hyp)} hypotheses intead." - hyp_encode = [ - task.source_dictionary.encode_line(encode_fn(h), add_if_not_exist=False).long() - for h in hyp - ] - if task.cfg.include_src: - src_encode = [ - task.source_dictionary.encode_line( - encode_fn(s), add_if_not_exist=False - ).long() - for s in src - ] - tokens = [(src_encode[i // args.beam], h) for i, h in enumerate(hyp_encode)] - lengths = [(t1.numel(), t2.numel()) for t1, t2 in tokens] - else: - tokens = [(h,) for h in hyp_encode] - lengths = [(h.numel(),) for h in hyp_encode] - - itr = task.get_batch_iterator( - dataset=task.build_dataset_for_inference(tokens, lengths), - max_tokens=args.max_tokens, - max_sentences=args.batch_size, - max_positions=max_positions, - ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, - ).next_epoch_itr(shuffle=False) - - for batch in itr: - yield Batch( - ids=batch["id"], - src_tokens=batch["net_input"]["src_tokens"], - src_lengths=batch["net_input"]["src_lengths"], - ) - - -def decode_rerank_scores(args): - if args.max_tokens is None and args.batch_size is None: - args.batch_size = 1 - - logger.info(args) - - use_cuda = torch.cuda.is_available() and not args.cpu - - # Load ensemble - logger.info("loading model(s) from {}".format(args.path)) - models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task( - [args.path], arg_overrides=eval(args.model_overrides), - ) - - for model in models: - if args.fp16: - model.half() - if use_cuda: - model.cuda() - - # Initialize generator - generator = task.build_generator(args) - - # Handle tokenization and BPE - tokenizer = task.build_tokenizer(args) - bpe = task.build_bpe(args) - - def encode_fn(x): - if tokenizer is not None: - x = tokenizer.encode(x) - if bpe is not None: - x = bpe.encode(x) - return x - - max_positions = utils.resolve_max_positions( - task.max_positions(), *[model.max_positions() for model in models] - ) - - src, hyp, mt_scores = parse_fairseq_gen(args.in_text, task) - model_scores = {} - logger.info("decode reranker score") - for batch in make_batches(args, src, hyp, task, max_positions, encode_fn): - src_tokens = batch.src_tokens - src_lengths = batch.src_lengths - if use_cuda: - src_tokens = src_tokens.cuda() - src_lengths = src_lengths.cuda() - - sample = { - "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}, - } - scores = task.inference_step(generator, models, sample) - - for id, sc in zip(batch.ids.tolist(), scores.tolist()): - model_scores[id] = sc[0] - - model_scores = [model_scores[i] for i in range(len(model_scores))] - - return src, hyp, mt_scores, model_scores - - -def get_score(mt_s, md_s, w1, lp, tgt_len): - return mt_s / (tgt_len ** lp) * w1 + md_s - - -def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam): - assert len(mt_scores) == len(md_scores) and len(mt_scores) == len(hypos) - hypo_scores = [] - best_hypos = [] - best_scores = [] - offset = 0 - for i in range(len(hypos)): - tgt_len = len(hypos[i].split()) - hypo_scores.append( - get_score(mt_scores[i], md_scores[i], fw_weight, lenpen, tgt_len) - ) - - if (i + 1) % beam == 0: - max_i = np.argmax(hypo_scores) - best_hypos.append(hypos[offset + max_i]) - best_scores.append(hypo_scores[max_i]) - hypo_scores = [] - offset += beam - return best_hypos, best_scores - - -def eval_metric(args, hypos, ref): - if args.metric == "bleu": - score = sacrebleu.corpus_bleu(hypos, [ref]).score - else: - score = sacrebleu.corpus_ter(hypos, [ref]).score - - return score - - -def score_target_hypo(args, fw_weight, lp): - mt_scores = pool_init_variables["mt_scores"] - model_scores = pool_init_variables["model_scores"] - hyp = pool_init_variables["hyp"] - ref = pool_init_variables["ref"] - best_hypos, _ = get_best_hyps( - mt_scores, model_scores, hyp, fw_weight, lp, args.beam - ) - rerank_eval = None - if ref: - rerank_eval = eval_metric(args, best_hypos, ref) - print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}") - - return rerank_eval - - -def print_result(best_scores, best_hypos, output_file): - for i, (s, h) in enumerate(zip(best_scores, best_hypos)): - print(f"{i}\t{s}\t{h}", file=output_file) - - -def main(args): - utils.import_user_module(args) - - src, hyp, mt_scores, model_scores = decode_rerank_scores(args) - - assert ( - not args.tune or args.target_text is not None - ), "--target-text has to be set when tuning weights" - if args.target_text: - ref = read_target(args.target_text) - assert len(src) == len( - ref - ), f"different numbers of source and target sentences ({len(src)} vs. {len(ref)})" - - orig_best_hypos = [hyp[i] for i in range(0, len(hyp), args.beam)] - orig_eval = eval_metric(args, orig_best_hypos, ref) - - if args.tune: - logger.info("tune weights for reranking") - - random_params = np.array( - [ - [ - random.uniform( - args.lower_bound_fw_weight, args.upper_bound_fw_weight - ), - random.uniform(args.lower_bound_lenpen, args.upper_bound_lenpen), - ] - for k in range(args.num_trials) - ] - ) - - logger.info("launching pool") - with Pool( - 32, - initializer=init_loaded_scores, - initargs=(mt_scores, model_scores, hyp, ref), - ) as p: - rerank_scores = p.starmap( - score_target_hypo, - [ - (args, random_params[i][0], random_params[i][1],) - for i in range(args.num_trials) - ], - ) - if args.metric == "bleu": - best_index = np.argmax(rerank_scores) - else: - best_index = np.argmin(rerank_scores) - best_fw_weight = random_params[best_index][0] - best_lenpen = random_params[best_index][1] - else: - assert ( - args.lenpen is not None and args.fw_weight is not None - ), "--lenpen and --fw-weight should be set" - best_fw_weight, best_lenpen = args.fw_weight, args.lenpen - - best_hypos, best_scores = get_best_hyps( - mt_scores, model_scores, hyp, best_fw_weight, best_lenpen, args.beam - ) - - if args.results_path is not None: - os.makedirs(args.results_path, exist_ok=True) - output_path = os.path.join( - args.results_path, "generate-{}.txt".format(args.gen_subset), - ) - with open(output_path, "w", buffering=1, encoding="utf-8") as o: - print_result(best_scores, best_hypos, o) - else: - print_result(best_scores, best_hypos, sys.stdout) - - if args.target_text: - rerank_eval = eval_metric(args, best_hypos, ref) - print(f"before reranking, {args.metric.upper()}:", orig_eval) - print( - f"after reranking with fw_weight={best_fw_weight}, lenpen={best_lenpen}, {args.metric.upper()}:", - rerank_eval, - ) - - -def cli_main(): - parser = options.get_generation_parser(interactive=True) - - parser.add_argument( - "--in-text", - default=None, - required=True, - help="text from fairseq-interactive output, containing source sentences and hypotheses", - ) - parser.add_argument("--target-text", default=None, help="reference text") - parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu") - parser.add_argument( - "--tune", - action="store_true", - help="if set, tune weights on fw scores and lenpen instead of applying fixed weights for reranking", - ) - parser.add_argument( - "--lower-bound-fw-weight", - default=0.0, - type=float, - help="lower bound of search space", - ) - parser.add_argument( - "--upper-bound-fw-weight", - default=3, - type=float, - help="upper bound of search space", - ) - parser.add_argument( - "--lower-bound-lenpen", - default=0.0, - type=float, - help="lower bound of search space", - ) - parser.add_argument( - "--upper-bound-lenpen", - default=3, - type=float, - help="upper bound of search space", - ) - parser.add_argument( - "--fw-weight", type=float, default=None, help="weight on the fw model score" - ) - parser.add_argument( - "--num-trials", - default=1000, - type=int, - help="number of trials to do for random search", - ) - - args = options.parse_args_and_arch(parser) - main(args) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/meters.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/meters.py deleted file mode 100644 index 2100b1fa0b2704b1c585f59e9349655bba0cc9e6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/meters.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import bisect -import time -from collections import OrderedDict -from typing import Dict, Optional - - -try: - import torch - - def type_as(a, b): - if torch.is_tensor(a) and torch.is_tensor(b): - return a.to(b) - else: - return a - - -except ImportError: - torch = None - - def type_as(a, b): - return a - - -try: - import numpy as np -except ImportError: - np = None - - -class Meter(object): - """Base class for Meters.""" - - def __init__(self): - pass - - def state_dict(self): - return {} - - def load_state_dict(self, state_dict): - pass - - def reset(self): - raise NotImplementedError - - @property - def smoothed_value(self) -> float: - """Smoothed value used for logging.""" - raise NotImplementedError - - -def safe_round(number, ndigits): - if hasattr(number, "__round__"): - return round(number, ndigits) - elif torch is not None and torch.is_tensor(number) and number.numel() == 1: - return safe_round(number.item(), ndigits) - elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"): - return safe_round(number.item(), ndigits) - else: - return number - - -class AverageMeter(Meter): - """Computes and stores the average and current value""" - - def __init__(self, round: Optional[int] = None): - self.round = round - self.reset() - - def reset(self): - self.val = None # most recent update - self.sum = 0 # sum from all updates - self.count = 0 # total n from all updates - - def update(self, val, n=1): - if val is not None: - self.val = val - if n > 0: - self.sum = type_as(self.sum, val) + (val * n) - self.count = type_as(self.count, n) + n - - def state_dict(self): - return { - "val": self.val, - "sum": self.sum, - "count": self.count, - "round": self.round, - } - - def load_state_dict(self, state_dict): - self.val = state_dict["val"] - self.sum = state_dict["sum"] - self.count = state_dict["count"] - self.round = state_dict.get("round", None) - - @property - def avg(self): - return self.sum / self.count if self.count > 0 else self.val - - @property - def smoothed_value(self) -> float: - val = self.avg - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class SumMeter(Meter): - """Computes and stores the sum""" - - def __init__(self, round: Optional[int] = None): - self.round = round - self.reset() - - def reset(self): - self.sum = 0 # sum from all updates - - def update(self, val): - if val is not None: - self.sum = type_as(self.sum, val) + val - - def state_dict(self): - return { - "sum": self.sum, - "round": self.round, - } - - def load_state_dict(self, state_dict): - self.sum = state_dict["sum"] - self.round = state_dict.get("round", None) - - @property - def smoothed_value(self) -> float: - val = self.sum - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class TimeMeter(Meter): - """Computes the average occurrence of some event per second""" - - def __init__( - self, - init: int = 0, - n: int = 0, - round: Optional[int] = None, - ): - self.round = round - self.reset(init, n) - - def reset(self, init=0, n=0): - self.init = init - self.start = time.perf_counter() - self.n = n - self.i = 0 - - def update(self, val=1): - self.n = type_as(self.n, val) + val - self.i += 1 - - def state_dict(self): - return { - "init": self.elapsed_time, - "n": self.n, - "round": self.round, - } - - def load_state_dict(self, state_dict): - if "start" in state_dict: - # backwards compatibility for old state_dicts - self.reset(init=state_dict["init"]) - else: - self.reset(init=state_dict["init"], n=state_dict["n"]) - self.round = state_dict.get("round", None) - - @property - def avg(self): - return self.n / self.elapsed_time - - @property - def elapsed_time(self): - return self.init + (time.perf_counter() - self.start) - - @property - def smoothed_value(self) -> float: - val = self.avg - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class StopwatchMeter(Meter): - """Computes the sum/avg duration of some event in seconds""" - - def __init__(self, round: Optional[int] = None): - self.round = round - self.sum = 0 - self.n = 0 - self.start_time = None - - def start(self): - self.start_time = time.perf_counter() - - def stop(self, n=1, prehook=None): - if self.start_time is not None: - if prehook is not None: - prehook() - delta = time.perf_counter() - self.start_time - self.sum = self.sum + delta - self.n = type_as(self.n, n) + n - - def reset(self): - self.sum = 0 # cumulative time during which stopwatch was active - self.n = 0 # total n across all start/stop - self.start() - - def state_dict(self): - return { - "sum": self.sum, - "n": self.n, - "round": self.round, - } - - def load_state_dict(self, state_dict): - self.sum = state_dict["sum"] - self.n = state_dict["n"] - self.start_time = None - self.round = state_dict.get("round", None) - - @property - def avg(self): - return self.sum / self.n if self.n > 0 else self.sum - - @property - def elapsed_time(self): - if self.start_time is None: - return 0.0 - return time.perf_counter() - self.start_time - - @property - def smoothed_value(self) -> float: - val = self.avg if self.sum > 0 else self.elapsed_time - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class MetersDict(OrderedDict): - """A sorted dictionary of :class:`Meters`. - - Meters are sorted according to a priority that is given when the - meter is first added to the dictionary. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.priorities = [] - - def __setitem__(self, key, value): - assert key not in self, "MetersDict doesn't support reassignment" - priority, value = value - bisect.insort(self.priorities, (priority, len(self.priorities), key)) - super().__setitem__(key, value) - for _, _, key in self.priorities: # reorder dict to match priorities - self.move_to_end(key) - - def add_meter(self, key, meter, priority): - self.__setitem__(key, (priority, meter)) - - def state_dict(self): - return [ - (pri, key, self[key].__class__.__name__, self[key].state_dict()) - for pri, _, key in self.priorities - # can't serialize DerivedMeter instances - if not isinstance(self[key], MetersDict._DerivedMeter) - ] - - def load_state_dict(self, state_dict): - self.clear() - self.priorities.clear() - for pri, key, meter_cls, meter_state in state_dict: - meter = globals()[meter_cls]() - meter.load_state_dict(meter_state) - self.add_meter(key, meter, pri) - - def get_smoothed_value(self, key: str) -> float: - """Get a single smoothed value.""" - meter = self[key] - if isinstance(meter, MetersDict._DerivedMeter): - return meter.fn(self) - else: - return meter.smoothed_value - - def get_smoothed_values(self) -> Dict[str, float]: - """Get all smoothed values.""" - return OrderedDict( - [ - (key, self.get_smoothed_value(key)) - for key in self.keys() - if not key.startswith("_") - ] - ) - - def reset(self): - """Reset Meter instances.""" - for meter in self.values(): - if isinstance(meter, MetersDict._DerivedMeter): - continue - meter.reset() - - class _DerivedMeter(Meter): - """A Meter whose values are derived from other Meters.""" - - def __init__(self, fn): - self.fn = fn - - def reset(self): - pass diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/bmuf.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/bmuf.py deleted file mode 100644 index d6d0e04e86eb894efe59e13a78843d01ca9e651d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/bmuf.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field - -import torch -import torch.distributed as dist -from fairseq.dataclass.configs import FairseqBMUFConfig -from fairseq.dataclass.utils import gen_parser_from_dataclass -from fairseq.optim.fairseq_optimizer import FairseqOptimizer - - -class FairseqBMUF(FairseqOptimizer): - """ - Implements incremental block distributed data parallelism similar to - https://ieeexplore.ieee.org/document/7472805 - - Paper title: Scalable training of deep learning machines by incremental - block training with intra-block parallel optimization and blockwise - model-update filtering - """ - - def __init__(self, cfg: FairseqBMUFConfig, optimizer): - super().__init__(cfg) - self._optimizer = optimizer - self._num_updates = 0 - self.sync_iter = cfg.global_sync_iter - self.block_momentum = cfg.block_momentum - self.block_lr = cfg.block_lr - self._reset_local_data() - self.warmup_iteration = cfg.warmup_iterations - self.use_nbm = cfg.use_nbm - self.initial_state = self._optimizer.state_dict() - self.average_sync = self.cfg.average_sync - self.world_size = self.cfg.distributed_world_size - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - gen_parser_from_dataclass(parser, FairseqBMUFConfig()) - - @property - def optimizer(self): - return self._optimizer.optimizer - - @property - def optimizer_config(self): - return self._optimizer.optimizer_config - - def get_lr(self): - return self._optimizer.get_lr() - - def set_lr(self, lr): - self._optimizer.set_lr(lr) - - def state_dict(self): - return self._optimizer.state_dict() - - def load_state_dict(self, state_dict, optimizer_overrides=None): - self._optimizer.load_state_dict(state_dict, optimizer_overrides) - self.initial_state = self._optimizer.state_dict() - - def multiply_grads(self, c): - """Multiplies grads by a constant *c*.""" - self._optimizer.multiply_grads(c) - - def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): - """Clips gradient norm.""" - return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) - - def average_params(self): - self._optimizer.average_params() - - def _block_sync(self): - if self.world_size <= 1: - return - # Update the global model using local models from all GPUs - # (Step-1) Calculate grad between previously synced model and - # currrent local model - if self.block_momentum != 0: - self._calc_grad() - - # (Step-2) Average gradient from all GPUs - self._avg_grad_from_all_gpus() - - # (Step-3) Calculate global momentum and update the global model - if self.block_momentum != 0: - self._update_global_model() - - # (Step-4) Average local optimizer params - if self.average_sync: - self.average_params() - - def _is_warmup_end(self): - # Check whether train iterations is equal to warmup iter - if self.get_num_updates() == self.warmup_iteration: - return True - return False - - def _is_bmuf_iter(self): - # Check whether train iterations is equal to bmuf sync iter - if (self.get_num_updates() > self.warmup_iteration) and ( - self.get_num_updates() % self.sync_iter == 0 - ): - return True - return False - - def _warmup_sync(self, root_rank=0): - if self.world_size <= 1: - return - # Broadcast the local model to all gpus - for param in self.params: - dist.broadcast(param.data, src=root_rank) - - # Update local optimizer state - if self.average_sync: - self._optimizer.average_params() - else: - self._optimizer.load_state_dict(self.initial_state) - - self._reset_local_data() - - def step(self, closure=None): - """Performs a single optimization step.""" - self._optimizer.step(closure) - self.set_num_updates(self.get_num_updates() + 1) - if self._is_warmup_end(): - self._warmup_sync() - elif self._is_bmuf_iter(): - self._block_sync() - - def zero_grad(self): - """Clears the gradients of all optimized parameters.""" - self._optimizer.zero_grad() - - def get_num_updates(self): - """Get the number of parameters updates.""" - return self._num_updates - - def set_num_updates(self, num_updates): - """Set the number of parameters updates.""" - self._num_updates = num_updates - - @torch.no_grad() - def _reset_local_data(self): - # (Step-0) Initialize global momentum parameters and store global copy on each gpu - self.global_params = [torch.zeros_like(p.data) for p in self.params] - self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params] - self.grads = [p.data.new_zeros(p.data.size()) for p in self.params] - - # saving the global model locally for calculating gradient during bmuf sync - for param, global_param in zip(self.params, self.global_params): - global_param.copy_(param.data) - - @torch.no_grad() - def _calc_grad(self): - # global_params is basically the global copy from the previously finished - # synchronisation. param.data is local parameter after block_sync_freq - # for the local gpu. so grad is difference between previously synced - # model and currrent local model. - for index, (param, global_param) in enumerate( - zip(self.params, self.global_params) - ): - self.grads[index] = global_param - param.data - - def _avg_grad_from_all_gpus(self): - for index, param in enumerate(self.params): - sync_para = param.data if self.block_momentum == 0 else self.grads[index] - sync_para /= float(dist.get_world_size()) - dist.all_reduce(sync_para, op=dist.ReduceOp.SUM) - - @torch.no_grad() - def _update_global_model(self): - for index, (param, global_param, smoothed_grad, grad) in enumerate( - zip( - self.params, - self.global_params, - self.smoothed_grads, - # all gpus would share the same value of smoothed_grad, since it is - # always computed on synchronized gradients. - self.grads, - ) - ): - # global_param is basically last syncrhornized parameter. though - # smoothed_grad is local, all processes will have same value of - # smoothed_grad and hence param is globally synchronized copy. - # smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t) - smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad - param.data.copy_(global_param - smoothed_grad) - - # A Nesterov momentum here is to do a partial weight update before - # calculating the gradient - if self.use_nbm: - param.data.copy_(param.data - self.block_momentum * smoothed_grad) - - # backup for the next synchronization. - self.smoothed_grads[index] = smoothed_grad - global_param.copy_(param.data) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/distributed/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/distributed/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/constraints/extract.py b/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/constraints/extract.py deleted file mode 100644 index f6155d0a0538aadb46bf612256b6b949728de69e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/constraints/extract.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -"""Extracts random constraints from reference files.""" - -import argparse -import random -import sys - -from sacrebleu import extract_ngrams - - -def get_phrase(words, index, length): - assert index < len(words) - length + 1 - phr = " ".join(words[index : index + length]) - for i in range(index, index + length): - words.pop(index) - return phr - - -def main(args): - - if args.seed: - random.seed(args.seed) - - for line in sys.stdin: - constraints = [] - - def add_constraint(constraint): - constraints.append(constraint) - - source = line.rstrip() - if "\t" in line: - source, target = line.split("\t") - if args.add_sos: - target = f" {target}" - if args.add_eos: - target = f"{target} " - - if len(target.split()) >= args.len: - words = [target] - - num = args.number - - choices = {} - for i in range(num): - if len(words) == 0: - break - segmentno = random.choice(range(len(words))) - segment = words.pop(segmentno) - tokens = segment.split() - phrase_index = random.choice(range(len(tokens))) - choice = " ".join( - tokens[phrase_index : min(len(tokens), phrase_index + args.len)] - ) - for j in range( - phrase_index, min(len(tokens), phrase_index + args.len) - ): - tokens.pop(phrase_index) - if phrase_index > 0: - words.append(" ".join(tokens[0:phrase_index])) - if phrase_index + 1 < len(tokens): - words.append(" ".join(tokens[phrase_index:])) - choices[target.find(choice)] = choice - - # mask out with spaces - target = target.replace(choice, " " * len(choice), 1) - - for key in sorted(choices.keys()): - add_constraint(choices[key]) - - print(source, *constraints, sep="\t") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") - parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") - parser.add_argument( - "--add-sos", default=False, action="store_true", help="add token" - ) - parser.add_argument( - "--add-eos", default=False, action="store_true", help="add token" - ) - parser.add_argument("--seed", "-s", default=0, type=int) - args = parser.parse_args() - - main(args) diff --git a/spaces/ORI-Muchim/MarinTTS/transforms.py b/spaces/ORI-Muchim/MarinTTS/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/MarinTTS/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/detector.py b/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/detector.py deleted file mode 100644 index b162cff3194cc0114abd1a840e5dc772a55edd25..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/detector.py +++ /dev/null @@ -1,126 +0,0 @@ -import numpy as np -import torch -from torch.autograd import Variable -from .get_nets import PNet, RNet, ONet -from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square -from .first_stage import run_first_stage - - -def detect_faces(image, min_face_size=20.0, - thresholds=[0.6, 0.7, 0.8], - nms_thresholds=[0.7, 0.7, 0.7]): - """ - Arguments: - image: an instance of PIL.Image. - min_face_size: a float number. - thresholds: a list of length 3. - nms_thresholds: a list of length 3. - - Returns: - two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10], - bounding boxes and facial landmarks. - """ - - # LOAD MODELS - pnet = PNet() - rnet = RNet() - onet = ONet() - onet.eval() - - # BUILD AN IMAGE PYRAMID - width, height = image.size - min_length = min(height, width) - - min_detection_size = 12 - factor = 0.707 # sqrt(0.5) - - # scales for scaling the image - scales = [] - - # scales the image so that - # minimum size that we can detect equals to - # minimum face size that we want to detect - m = min_detection_size / min_face_size - min_length *= m - - factor_count = 0 - while min_length > min_detection_size: - scales.append(m * factor ** factor_count) - min_length *= factor - factor_count += 1 - - # STAGE 1 - - # it will be returned - bounding_boxes = [] - - with torch.no_grad(): - # run P-Net on different scales - for s in scales: - boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0]) - bounding_boxes.append(boxes) - - # collect boxes (and offsets, and scores) from different scales - bounding_boxes = [i for i in bounding_boxes if i is not None] - bounding_boxes = np.vstack(bounding_boxes) - - keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0]) - bounding_boxes = bounding_boxes[keep] - - # use offsets predicted by pnet to transform bounding boxes - bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:]) - # shape [n_boxes, 5] - - bounding_boxes = convert_to_square(bounding_boxes) - bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) - - # STAGE 2 - - img_boxes = get_image_boxes(bounding_boxes, image, size=24) - img_boxes = torch.FloatTensor(img_boxes) - - output = rnet(img_boxes) - offsets = output[0].data.numpy() # shape [n_boxes, 4] - probs = output[1].data.numpy() # shape [n_boxes, 2] - - keep = np.where(probs[:, 1] > thresholds[1])[0] - bounding_boxes = bounding_boxes[keep] - bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) - offsets = offsets[keep] - - keep = nms(bounding_boxes, nms_thresholds[1]) - bounding_boxes = bounding_boxes[keep] - bounding_boxes = calibrate_box(bounding_boxes, offsets[keep]) - bounding_boxes = convert_to_square(bounding_boxes) - bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) - - # STAGE 3 - - img_boxes = get_image_boxes(bounding_boxes, image, size=48) - if len(img_boxes) == 0: - return [], [] - img_boxes = torch.FloatTensor(img_boxes) - output = onet(img_boxes) - landmarks = output[0].data.numpy() # shape [n_boxes, 10] - offsets = output[1].data.numpy() # shape [n_boxes, 4] - probs = output[2].data.numpy() # shape [n_boxes, 2] - - keep = np.where(probs[:, 1] > thresholds[2])[0] - bounding_boxes = bounding_boxes[keep] - bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) - offsets = offsets[keep] - landmarks = landmarks[keep] - - # compute landmark points - width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0 - height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0 - xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1] - landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1) * landmarks[:, 0:5] - landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1) * landmarks[:, 5:10] - - bounding_boxes = calibrate_box(bounding_boxes, offsets) - keep = nms(bounding_boxes, nms_thresholds[2], mode='min') - bounding_boxes = bounding_boxes[keep] - landmarks = landmarks[keep] - - return bounding_boxes, landmarks diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/r5rs.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/r5rs.go deleted file mode 100644 index 204952e149882d1f014ecb5a7dbc4ef1898bca8a..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/r5rs.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/harp-pedals.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/harp-pedals.go deleted file mode 100644 index e5e9b866001e6d5b8a12c26754fdd998ef3e2e2f..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/harp-pedals.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/EleutherAI-gpt-j-6B/app.py b/spaces/PeepDaSlan9/EleutherAI-gpt-j-6B/app.py deleted file mode 100644 index b4ab9549994514c1b64784efe8b81534bb3fde6e..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/EleutherAI-gpt-j-6B/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/EleutherAI/gpt-j-6B").launch() \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/scatter_points.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/scatter_points.py deleted file mode 100644 index 2b8aa4169e9f6ca4a6f845ce17d6d1e4db416bb8..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/scatter_points.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', - ['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward']) - - -class _DynamicScatter(Function): - - @staticmethod - def forward(ctx, feats, coors, reduce_type='max'): - """convert kitti points(N, >=3) to voxels. - - Args: - feats (torch.Tensor): [N, C]. Points features to be reduced - into voxels. - coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates - (specifically multi-dim voxel index) of each points. - reduce_type (str, optional): Reduce op. support 'max', 'sum' and - 'mean'. Default: 'max'. - - Returns: - voxel_feats (torch.Tensor): [M, C]. Reduced features, input - features that shares the same voxel coordinates are reduced to - one row. - voxel_coors (torch.Tensor): [M, ndim]. Voxel coordinates. - """ - results = ext_module.dynamic_point_to_voxel_forward( - feats, coors, reduce_type) - (voxel_feats, voxel_coors, point2voxel_map, - voxel_points_count) = results - ctx.reduce_type = reduce_type - ctx.save_for_backward(feats, voxel_feats, point2voxel_map, - voxel_points_count) - ctx.mark_non_differentiable(voxel_coors) - return voxel_feats, voxel_coors - - @staticmethod - def backward(ctx, grad_voxel_feats, grad_voxel_coors=None): - (feats, voxel_feats, point2voxel_map, - voxel_points_count) = ctx.saved_tensors - grad_feats = torch.zeros_like(feats) - # TODO: whether to use index put or use cuda_backward - # To use index put, need point to voxel index - ext_module.dynamic_point_to_voxel_backward( - grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats, - point2voxel_map, voxel_points_count, ctx.reduce_type) - return grad_feats, None, None - - -dynamic_scatter = _DynamicScatter.apply - - -class DynamicScatter(nn.Module): - """Scatters points into voxels, used in the voxel encoder with dynamic - voxelization. - - Note: - The CPU and GPU implementation get the same output, but have numerical - difference after summation and division (e.g., 5e-7). - - Args: - voxel_size (list): list [x, y, z] size of three dimension. - point_cloud_range (list): The coordinate range of points, [x_min, - y_min, z_min, x_max, y_max, z_max]. - average_points (bool): whether to use avg pooling to scatter points - into voxel. - """ - - def __init__(self, voxel_size, point_cloud_range, average_points: bool): - super().__init__() - - self.voxel_size = voxel_size - self.point_cloud_range = point_cloud_range - self.average_points = average_points - - def forward_single(self, points, coors): - """Scatters points into voxels. - - Args: - points (torch.Tensor): Points to be reduced into voxels. - coors (torch.Tensor): Corresponding voxel coordinates (specifically - multi-dim voxel index) of each points. - - Returns: - voxel_feats (torch.Tensor): Reduced features, input features that - shares the same voxel coordinates are reduced to one row. - voxel_coors (torch.Tensor): Voxel coordinates. - """ - reduce = 'mean' if self.average_points else 'max' - return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce) - - def forward(self, points, coors): - """Scatters points/features into voxels. - - Args: - points (torch.Tensor): Points to be reduced into voxels. - coors (torch.Tensor): Corresponding voxel coordinates (specifically - multi-dim voxel index) of each points. - - Returns: - voxel_feats (torch.Tensor): Reduced features, input features that - shares the same voxel coordinates are reduced to one row. - voxel_coors (torch.Tensor): Voxel coordinates. - """ - if coors.size(-1) == 3: - return self.forward_single(points, coors) - else: - batch_size = coors[-1, 0] + 1 - voxels, voxel_coors = [], [] - for i in range(batch_size): - inds = torch.where(coors[:, 0] == i) - voxel, voxel_coor = self.forward_single( - points[inds], coors[inds][:, 1:]) - coor_pad = nn.functional.pad( - voxel_coor, (1, 0), mode='constant', value=i) - voxel_coors.append(coor_pad) - voxels.append(voxel) - features = torch.cat(voxels, dim=0) - feature_coors = torch.cat(voxel_coors, dim=0) - - return features, feature_coors - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += 'voxel_size=' + str(self.voxel_size) - s += ', point_cloud_range=' + str(self.point_cloud_range) - s += ', average_points=' + str(self.average_points) - s += ')' - return s diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/deform_conv.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/deform_conv.py deleted file mode 100644 index 79602d6e29f54633bb6b716203eb1a618711e2b4..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/deform_conv.py +++ /dev/null @@ -1,436 +0,0 @@ -import torch -import math -from torch import nn -from torch.nn import init -from torch.nn.modules.utils import _pair -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from maskrcnn_benchmark.utils.amp import custom_fwd, custom_bwd - -from maskrcnn_benchmark import _C - -class DeformConvFunction(Function): - - @staticmethod - def forward( - ctx, - input, - offset, - weight, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - im2col_step=64 - ): - if input is not None and input.dim() != 4: - raise ValueError( - "Expected 4D tensor as input, got {}D tensor instead.".format( - input.dim())) - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.groups = groups - ctx.deformable_groups = deformable_groups - ctx.im2col_step = im2col_step - - ctx.save_for_backward(input, offset, weight) - - output = input.new_empty( - DeformConvFunction._output_size(input, weight, ctx.padding, - ctx.dilation, ctx.stride)) - - ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones - - if not input.is_cuda: - raise NotImplementedError - else: - cur_im2col_step = min(ctx.im2col_step, input.shape[0]) - assert (input.shape[0] % - cur_im2col_step) == 0, 'im2col step must divide batchsize' - _C.deform_conv_forward( - input, - weight, - offset, - output, - ctx.bufs_[0], - ctx.bufs_[1], - weight.size(3), - weight.size(2), - ctx.stride[1], - ctx.stride[0], - ctx.padding[1], - ctx.padding[0], - ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, - cur_im2col_step - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, weight = ctx.saved_tensors - - grad_input = grad_offset = grad_weight = None - - if not grad_output.is_cuda: - raise NotImplementedError - else: - cur_im2col_step = min(ctx.im2col_step, input.shape[0]) - assert (input.shape[0] % - cur_im2col_step) == 0, 'im2col step must divide batchsize' - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - _C.deform_conv_backward_input( - input, - offset, - grad_output, - grad_input, - grad_offset, - weight, - ctx.bufs_[0], - weight.size(3), - weight.size(2), - ctx.stride[1], - ctx.stride[0], - ctx.padding[1], - ctx.padding[0], - ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, - cur_im2col_step - ) - - if ctx.needs_input_grad[2]: - grad_weight = torch.zeros_like(weight) - _C.deform_conv_backward_parameters( - input, - offset, - grad_output, - grad_weight, - ctx.bufs_[0], - ctx.bufs_[1], - weight.size(3), - weight.size(2), - ctx.stride[1], - ctx.stride[0], - ctx.padding[1], - ctx.padding[0], - ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, - 1, - cur_im2col_step - ) - - return (grad_input, grad_offset, grad_weight, None, None, None, None, None) - - @staticmethod - def _output_size(input, weight, padding, dilation, stride): - channels = weight.size(0) - output_size = (input.size(0), channels) - for d in range(input.dim() - 2): - in_size = input.size(d + 2) - pad = padding[d] - kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 - stride_ = stride[d] - output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) - if not all(map(lambda s: s > 0, output_size)): - raise ValueError( - "convolution input is too small (output would be {})".format( - 'x'.join(map(str, output_size)))) - return output_size - -class ModulatedDeformConvFunction(Function): - - @staticmethod - def forward( - ctx, - input, - offset, - mask, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1 - ): - ctx.stride = stride - ctx.padding = padding - ctx.dilation = dilation - ctx.groups = groups - ctx.deformable_groups = deformable_groups - ctx.with_bias = bias is not None - if not ctx.with_bias: - bias = input.new_empty(1) # fake tensor - if not input.is_cuda: - raise NotImplementedError - if weight.requires_grad or mask.requires_grad or offset.requires_grad \ - or input.requires_grad: - ctx.save_for_backward(input, offset, mask, weight, bias) - output = input.new_empty( - ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) - ctx._bufs = [input.new_empty(0), input.new_empty(0)] - _C.modulated_deform_conv_forward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - output, - ctx._bufs[1], - weight.shape[2], - weight.shape[3], - ctx.stride, - ctx.stride, - ctx.padding, - ctx.padding, - ctx.dilation, - ctx.dilation, - ctx.groups, - ctx.deformable_groups, - ctx.with_bias - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - if not grad_output.is_cuda: - raise NotImplementedError - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - grad_mask = torch.zeros_like(mask) - grad_weight = torch.zeros_like(weight) - grad_bias = torch.zeros_like(bias) - _C.modulated_deform_conv_backward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - ctx._bufs[1], - grad_input, - grad_weight, - grad_bias, - grad_offset, - grad_mask, - grad_output, - weight.shape[2], - weight.shape[3], - ctx.stride, - ctx.stride, - ctx.padding, - ctx.padding, - ctx.dilation, - ctx.dilation, - ctx.groups, - ctx.deformable_groups, - ctx.with_bias - ) - if not ctx.with_bias: - grad_bias = None - - return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, - None, None, None, None, None) - - @staticmethod - def _infer_shape(ctx, input, weight): - n = input.size(0) - channels_out = weight.size(0) - height, width = input.shape[2:4] - kernel_h, kernel_w = weight.shape[2:4] - height_out = (height + 2 * ctx.padding - - (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1 - width_out = (width + 2 * ctx.padding - - (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1 - return n, channels_out, height_out, width_out - - -deform_conv = DeformConvFunction.apply -modulated_deform_conv = ModulatedDeformConvFunction.apply - - -class DeformConv(nn.Module): - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=False - ): - assert not bias - super(DeformConv, self).__init__() - self.with_bias = bias - - assert in_channels % groups == 0, \ - 'in_channels {} cannot be divisible by groups {}'.format( - in_channels, groups) - assert out_channels % groups == 0, \ - 'out_channels {} cannot be divisible by groups {}'.format( - out_channels, groups) - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.groups = groups - self.deformable_groups = deformable_groups - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // self.groups, - *self.kernel_size)) - - self.reset_parameters() - - def reset_parameters(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - - @custom_fwd(cast_inputs=torch.float32) - def forward(self, input, offset): - return deform_conv(input, offset, self.weight, self.stride, - self.padding, self.dilation, self.groups, - self.deformable_groups) - - def __repr__(self): - return "".join([ - "{}(".format(self.__class__.__name__), - "in_channels={}, ".format(self.in_channels), - "out_channels={}, ".format(self.out_channels), - "kernel_size={}, ".format(self.kernel_size), - "stride={}, ".format(self.stride), - "dilation={}, ".format(self.dilation), - "padding={}, ".format(self.padding), - "groups={}, ".format(self.groups), - "deformable_groups={}, ".format(self.deformable_groups), - "bias={})".format(self.with_bias), - ]) - -class ModulatedDeformConv(nn.Module): - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=True - ): - super(ModulatedDeformConv, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = stride - self.padding = padding - self.dilation = dilation - self.groups = groups - self.deformable_groups = deformable_groups - self.with_bias = bias - - self.weight = nn.Parameter(torch.Tensor( - out_channels, - in_channels // groups, - *self.kernel_size - )) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - self.reset_parameters() - - def reset_parameters(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - if self.bias is not None: - self.bias.data.zero_() - - @custom_fwd(cast_inputs=torch.float32) - def forward(self, input, offset, mask): - return modulated_deform_conv( - input, offset, mask, self.weight, self.bias, self.stride, - self.padding, self.dilation, self.groups, self.deformable_groups) - - def __repr__(self): - return "".join([ - "{}(".format(self.__class__.__name__), - "in_channels={}, ".format(self.in_channels), - "out_channels={}, ".format(self.out_channels), - "kernel_size={}, ".format(self.kernel_size), - "stride={}, ".format(self.stride), - "dilation={}, ".format(self.dilation), - "padding={}, ".format(self.padding), - "groups={}, ".format(self.groups), - "deformable_groups={}, ".format(self.deformable_groups), - "bias={})".format(self.with_bias), - ]) - -class ModulatedDeformConvPack(ModulatedDeformConv): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=True): - super(ModulatedDeformConvPack, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - groups, deformable_groups, bias) - - self.conv_offset_mask = nn.Conv2d( - self.in_channels // self.groups, - self.deformable_groups * 3 * self.kernel_size[0] * - self.kernel_size[1], - kernel_size=self.kernel_size, - stride=_pair(self.stride), - padding=_pair(self.padding), - bias=True) - self.init_offset() - - def init_offset(self): - self.conv_offset_mask.weight.data.zero_() - self.conv_offset_mask.bias.data.zero_() - - @custom_fwd(cast_inputs=torch.float32) - def forward(self, input): - out = self.conv_offset_mask(input) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - return modulated_deform_conv( - input, offset, mask, self.weight, self.bias, self.stride, - self.padding, self.dilation, self.groups, self.deformable_groups) diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/med.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/med.py deleted file mode 100644 index 99b0abab574a850320cc784aef4cc016f2b174c1..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/med.py +++ /dev/null @@ -1,955 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on huggingface code base - * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert -''' - -import math -import os -import warnings -from dataclasses import dataclass -from typing import Optional, Tuple - -import torch -from torch import Tensor, device, dtype, nn -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss -import torch.nn.functional as F - -from transformers.activations import ACT2FN -from transformers.file_utils import ( - ModelOutput, -) -from transformers.modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPoolingAndCrossAttentions, - CausalLMOutputWithCrossAttentions, - MaskedLMOutput, - MultipleChoiceModelOutput, - NextSentencePredictorOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from transformers.modeling_utils import ( - PreTrainedModel, - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - prune_linear_layer, -) -from transformers.utils import logging -from transformers.models.bert.configuration_bert import BertConfig - - -logger = logging.get_logger(__name__) - - -class BertEmbeddings(nn.Module): - """Construct the embeddings from word and position embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - - self.config = config - - def forward( - self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 - ): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - - embeddings = inputs_embeds - - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(nn.Module): - def __init__(self, config, is_cross_attention): - super().__init__() - self.config = config - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - if is_cross_attention: - self.key = nn.Linear(config.encoder_width, self.all_head_size) - self.value = nn.Linear(config.encoder_width, self.all_head_size) - else: - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) - self.save_attention = False - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(*new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - mixed_query_layer = self.query(hidden_states) - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - is_cross_attention = encoder_hidden_states is not None - - if is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([past_key_value[0], key_layer], dim=2) - value_layer = torch.cat([past_key_value[1], value_layer], dim=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - past_key_value = (key_layer, value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - seq_length = hidden_states.size()[1] - position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) - position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) - distance = position_ids_l - position_ids_r - positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) - positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in BertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.Softmax(dim=-1)(attention_scores) - - if is_cross_attention and self.save_attention: - self.save_attention_map(attention_probs) - attention_probs.register_hook(self.save_attn_gradients) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs_dropped = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs_dropped = attention_probs_dropped * head_mask - - context_layer = torch.matmul(attention_probs_dropped, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(*new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - outputs = outputs + (past_key_value,) - return outputs - - -class BertSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(nn.Module): - def __init__(self, config, is_cross_attention=False): - super().__init__() - self.self = BertSelfAttention(config, is_cross_attention) - self.output = BertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -class BertIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class BertOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(nn.Module): - def __init__(self, config, layer_num): - super().__init__() - self.config = config - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = BertAttention(config) - self.layer_num = layer_num - if self.config.add_cross_attention: - self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - mode=None, - ): - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - - if mode=='multimodal': - assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" - - cross_attention_outputs = self.crossattention( - attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - output_attentions=output_attentions, - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output - ) - outputs = (layer_output,) + outputs - - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -class BertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, - mode='multimodal', - ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None - - next_decoder_cache = () if use_cache else None - - for i in range(self.config.num_hidden_layers): - layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if self.gradient_checkpointing and self.training: - - if use_cache: - logger.warn( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - mode=mode, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - mode=mode, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class BertPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class BertPredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class BertLMPredictionHead(nn.Module): - def __init__(self, config): - super().__init__() - self.transform = BertPredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - - -class BertOnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = BertLMPredictionHead(config) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -class BertPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = BertConfig - base_model_prefix = "bert" - _keys_to_ignore_on_load_missing = [r"position_ids"] - - def _init_weights(self, module): - """ Initialize the weights """ - if isinstance(module, (nn.Linear, nn.Embedding)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - - -class BertModel(BertPreTrainedModel): - """ - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in `Attention is - all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an - input to the forward pass. - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - self.embeddings = BertEmbeddings(config) - - self.encoder = BertEncoder(config) - - self.pooler = BertPooler(config) if add_pooling_layer else None - - self.init_weights() - - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - - def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: - """ - Makes broadcastable attention and causal masks so that future and masked tokens are ignored. - - Arguments: - attention_mask (:obj:`torch.Tensor`): - Mask with ones indicating tokens to attend to, zeros for tokens to ignore. - input_shape (:obj:`Tuple[int]`): - The shape of the input to the model. - device: (:obj:`torch.device`): - The device of the input to the model. - - Returns: - :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. - """ - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if attention_mask.dim() == 3: - extended_attention_mask = attention_mask[:, None, :, :] - elif attention_mask.dim() == 2: - # Provided a padding mask of dimensions [batch_size, seq_length] - # - if the model is a decoder, apply a causal mask in addition to the padding mask - # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] - if is_decoder: - batch_size, seq_length = input_shape - - seq_ids = torch.arange(seq_length, device=device) - causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] - # in case past_key_values are used we need to add a prefix ones mask to the causal mask - # causal and attention masks must have same type with pytorch version < 1.3 - causal_mask = causal_mask.to(attention_mask.dtype) - - if causal_mask.shape[1] < attention_mask.shape[1]: - prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] - causal_mask = torch.cat( - [ - torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), - causal_mask, - ], - axis=-1, - ) - - extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] - else: - extended_attention_mask = attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( - input_shape, attention_mask.shape - ) - ) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - return extended_attention_mask - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - is_decoder=False, - mode='multimodal', - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if is_decoder: - use_cache = use_cache if use_cache is not None else self.config.use_cache - else: - use_cache = False - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - batch_size, seq_length = input_shape - device = input_ids.device - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - batch_size, seq_length = input_shape - device = inputs_embeds.device - elif encoder_embeds is not None: - input_shape = encoder_embeds.size()[:-1] - batch_size, seq_length = input_shape - device = encoder_embeds.device - else: - raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - if attention_mask is None: - attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, - device, is_decoder) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() - else: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - - if type(encoder_attention_mask) == list: - encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] - elif encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - if encoder_embeds is None: - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - else: - embedding_output = encoder_embeds - - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - mode=mode, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - - -class BertLMHeadModel(BertPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] - - def __init__(self, config): - super().__init__(config) - - self.bert = BertModel(config, add_pooling_layer=False) - self.cls = BertOnlyMLMHead(config) - - self.init_weights() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - return_logits=False, - is_decoder=True, - reduction='mean', - mode='multimodal', - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in - ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are - ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - Returns: - Example:: - >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig - >>> import torch - >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') - >>> config = BertConfig.from_pretrained("bert-base-cased") - >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) - >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") - >>> outputs = model(**inputs) - >>> prediction_logits = outputs.logits - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if labels is not None: - use_cache = False - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - is_decoder=is_decoder, - mode=mode, - ) - - sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) - - if return_logits: - return prediction_scores[:, :-1, :].contiguous() - - lm_loss = None - if labels is not None: - # we are doing next-token prediction; shift prediction scores and input ids by one - shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() - labels = labels[:, 1:].contiguous() - loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) - lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - if reduction=='none': - lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((lm_loss,) + output) if lm_loss is not None else output - - return CausalLMOutputWithCrossAttentions( - loss=lm_loss, - logits=prediction_scores, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - cross_attentions=outputs.cross_attentions, - ) - - def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): - input_shape = input_ids.shape - # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly - if attention_mask is None: - attention_mask = input_ids.new_ones(input_shape) - - # cut decoder_input_ids if past is used - if past is not None: - input_ids = input_ids[:, -1:] - - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "past_key_values": past, - "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), - "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), - "is_decoder": True, - } - - def _reorder_cache(self, past, beam_idx): - reordered_past = () - for layer_past in past: - reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) - return reordered_past diff --git a/spaces/Podtekatel/Avatar2VSK/inference/box_utils.py b/spaces/Podtekatel/Avatar2VSK/inference/box_utils.py deleted file mode 100644 index ddbd40bc4861ba88a242129f195b0de9ff82fe40..0000000000000000000000000000000000000000 --- a/spaces/Podtekatel/Avatar2VSK/inference/box_utils.py +++ /dev/null @@ -1,31 +0,0 @@ -import numpy as np - - -def convert_to_square(bboxes): - """Convert bounding boxes to a square form. - Arguments: - bboxes: a float numpy array of shape [n, 4]. - Returns: - a float numpy array of shape [4], - squared bounding boxes. - """ - - square_bboxes = np.zeros_like(bboxes) - x1, y1, x2, y2 = bboxes - h = y2 - y1 + 1.0 - w = x2 - x1 + 1.0 - max_side = np.maximum(h, w) - square_bboxes[0] = x1 + w * 0.5 - max_side * 0.5 - square_bboxes[1] = y1 + h * 0.5 - max_side * 0.5 - square_bboxes[2] = square_bboxes[0] + max_side - 1.0 - square_bboxes[3] = square_bboxes[1] + max_side - 1.0 - return square_bboxes - - -def scale_box(box, scale): - x1, y1, x2, y2 = box - center_x, center_y = (x1 + x2) / 2, (y1 + y2) / 2 - w, h = x2 - x1, y2 - y1 - new_w, new_h = w * scale, h * scale - y1, y2, x1, x2 = center_y - new_h / 2, center_y + new_h / 2, center_x - new_w / 2, center_x + new_w / 2, - return np.array((x1, y1, x2, y2)) diff --git a/spaces/Priyanka-Kumavat/Customer-Complaint-Segmentation-Model/README.md b/spaces/Priyanka-Kumavat/Customer-Complaint-Segmentation-Model/README.md deleted file mode 100644 index bdce2aa9379caa41e97189c573082d185e9b90d2..0000000000000000000000000000000000000000 --- a/spaces/Priyanka-Kumavat/Customer-Complaint-Segmentation-Model/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Customer Complaint Segmentation Model -emoji: 🔥 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Proxy1/Turbo/README.md b/spaces/Proxy1/Turbo/README.md deleted file mode 100644 index c040b7c99fe3520a608bc7b634d466e6ab5ba44a..0000000000000000000000000000000000000000 --- a/spaces/Proxy1/Turbo/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Turbo -emoji: 🐢 -colorFrom: blue -colorTo: gray -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/conditional_builder/objects_center_points.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/conditional_builder/objects_center_points.py deleted file mode 100644 index 9a480329cc47fb38a7b8729d424e092b77d40749..0000000000000000000000000000000000000000 --- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/conditional_builder/objects_center_points.py +++ /dev/null @@ -1,168 +0,0 @@ -import math -import random -import warnings -from itertools import cycle -from typing import List, Optional, Tuple, Callable - -from PIL import Image as pil_image, ImageDraw as pil_img_draw, ImageFont -from more_itertools.recipes import grouper -from taming.data.conditional_builder.utils import COLOR_PALETTE, WHITE, GRAY_75, BLACK, FULL_CROP, filter_annotations, \ - additional_parameters_string, horizontally_flip_bbox, pad_list, get_circle_size, get_plot_font_size, \ - absolute_bbox, rescale_annotations -from taming.data.helper_types import BoundingBox, Annotation -from taming.data.image_transforms import convert_pil_to_tensor -from torch import LongTensor, Tensor - - -class ObjectsCenterPointsConditionalBuilder: - def __init__(self, no_object_classes: int, no_max_objects: int, no_tokens: int, encode_crop: bool, - use_group_parameter: bool, use_additional_parameters: bool): - self.no_object_classes = no_object_classes - self.no_max_objects = no_max_objects - self.no_tokens = no_tokens - self.encode_crop = encode_crop - self.no_sections = int(math.sqrt(self.no_tokens)) - self.use_group_parameter = use_group_parameter - self.use_additional_parameters = use_additional_parameters - - @property - def none(self) -> int: - return self.no_tokens - 1 - - @property - def object_descriptor_length(self) -> int: - return 2 - - @property - def embedding_dim(self) -> int: - extra_length = 2 if self.encode_crop else 0 - return self.no_max_objects * self.object_descriptor_length + extra_length - - def tokenize_coordinates(self, x: float, y: float) -> int: - """ - Express 2d coordinates with one number. - Example: assume self.no_tokens = 16, then no_sections = 4: - 0 0 0 0 - 0 0 # 0 - 0 0 0 0 - 0 0 0 x - Then the # position corresponds to token 6, the x position to token 15. - @param x: float in [0, 1] - @param y: float in [0, 1] - @return: discrete tokenized coordinate - """ - x_discrete = int(round(x * (self.no_sections - 1))) - y_discrete = int(round(y * (self.no_sections - 1))) - return y_discrete * self.no_sections + x_discrete - - def coordinates_from_token(self, token: int) -> (float, float): - x = token % self.no_sections - y = token // self.no_sections - return x / (self.no_sections - 1), y / (self.no_sections - 1) - - def bbox_from_token_pair(self, token1: int, token2: int) -> BoundingBox: - x0, y0 = self.coordinates_from_token(token1) - x1, y1 = self.coordinates_from_token(token2) - return x0, y0, x1 - x0, y1 - y0 - - def token_pair_from_bbox(self, bbox: BoundingBox) -> Tuple[int, int]: - return self.tokenize_coordinates(bbox[0], bbox[1]), \ - self.tokenize_coordinates(bbox[0] + bbox[2], bbox[1] + bbox[3]) - - def inverse_build(self, conditional: LongTensor) \ - -> Tuple[List[Tuple[int, Tuple[float, float]]], Optional[BoundingBox]]: - conditional_list = conditional.tolist() - crop_coordinates = None - if self.encode_crop: - crop_coordinates = self.bbox_from_token_pair(conditional_list[-2], conditional_list[-1]) - conditional_list = conditional_list[:-2] - table_of_content = grouper(conditional_list, self.object_descriptor_length) - assert conditional.shape[0] == self.embedding_dim - return [ - (object_tuple[0], self.coordinates_from_token(object_tuple[1])) - for object_tuple in table_of_content if object_tuple[0] != self.none - ], crop_coordinates - - def plot(self, conditional: LongTensor, label_for_category_no: Callable[[int], str], figure_size: Tuple[int, int], - line_width: int = 3, font_size: Optional[int] = None) -> Tensor: - plot = pil_image.new('RGB', figure_size, WHITE) - draw = pil_img_draw.Draw(plot) - circle_size = get_circle_size(figure_size) - font = ImageFont.truetype('/usr/share/fonts/truetype/lato/Lato-Regular.ttf', - size=get_plot_font_size(font_size, figure_size)) - width, height = plot.size - description, crop_coordinates = self.inverse_build(conditional) - for (representation, (x, y)), color in zip(description, cycle(COLOR_PALETTE)): - x_abs, y_abs = x * width, y * height - ann = self.representation_to_annotation(representation) - label = label_for_category_no(ann.category_no) + ' ' + additional_parameters_string(ann) - ellipse_bbox = [x_abs - circle_size, y_abs - circle_size, x_abs + circle_size, y_abs + circle_size] - draw.ellipse(ellipse_bbox, fill=color, width=0) - draw.text((x_abs, y_abs), label, anchor='md', fill=BLACK, font=font) - if crop_coordinates is not None: - draw.rectangle(absolute_bbox(crop_coordinates, width, height), outline=GRAY_75, width=line_width) - return convert_pil_to_tensor(plot) / 127.5 - 1. - - def object_representation(self, annotation: Annotation) -> int: - modifier = 0 - if self.use_group_parameter: - modifier |= 1 * (annotation.is_group_of is True) - if self.use_additional_parameters: - modifier |= 2 * (annotation.is_occluded is True) - modifier |= 4 * (annotation.is_depiction is True) - modifier |= 8 * (annotation.is_inside is True) - return annotation.category_no + self.no_object_classes * modifier - - def representation_to_annotation(self, representation: int) -> Annotation: - category_no = representation % self.no_object_classes - modifier = representation // self.no_object_classes - # noinspection PyTypeChecker - return Annotation( - area=None, image_id=None, bbox=None, category_id=None, id=None, source=None, confidence=None, - category_no=category_no, - is_group_of=bool((modifier & 1) * self.use_group_parameter), - is_occluded=bool((modifier & 2) * self.use_additional_parameters), - is_depiction=bool((modifier & 4) * self.use_additional_parameters), - is_inside=bool((modifier & 8) * self.use_additional_parameters) - ) - - def _crop_encoder(self, crop_coordinates: BoundingBox) -> List[int]: - return list(self.token_pair_from_bbox(crop_coordinates)) - - def _make_object_descriptors(self, annotations: List[Annotation]) -> List[Tuple[int, ...]]: - object_tuples = [ - (self.object_representation(a), - self.tokenize_coordinates(a.bbox[0] + a.bbox[2] / 2, a.bbox[1] + a.bbox[3] / 2)) - for a in annotations - ] - empty_tuple = (self.none, self.none) - object_tuples = pad_list(object_tuples, empty_tuple, self.no_max_objects) - return object_tuples - - def build(self, annotations: List, crop_coordinates: Optional[BoundingBox] = None, horizontal_flip: bool = False) \ - -> LongTensor: - if len(annotations) == 0: - warnings.warn('Did not receive any annotations.') - if len(annotations) > self.no_max_objects: - warnings.warn('Received more annotations than allowed.') - annotations = annotations[:self.no_max_objects] - - if not crop_coordinates: - crop_coordinates = FULL_CROP - - random.shuffle(annotations) - annotations = filter_annotations(annotations, crop_coordinates) - if self.encode_crop: - annotations = rescale_annotations(annotations, FULL_CROP, horizontal_flip) - if horizontal_flip: - crop_coordinates = horizontally_flip_bbox(crop_coordinates) - extra = self._crop_encoder(crop_coordinates) - else: - annotations = rescale_annotations(annotations, crop_coordinates, horizontal_flip) - extra = [] - - object_tuples = self._make_object_descriptors(annotations) - flattened = [token for tuple_ in object_tuples for token in tuple_] + extra - assert len(flattened) == self.embedding_dim - assert all(0 <= value < self.no_tokens for value in flattened) - return LongTensor(flattened) diff --git a/spaces/Realcat/image-matching-webui/third_party/DeDoDe/data_prep/prep_keypoints.py b/spaces/Realcat/image-matching-webui/third_party/DeDoDe/data_prep/prep_keypoints.py deleted file mode 100644 index 616f91b875879f726218efdfe4bb6dc95297b33a..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DeDoDe/data_prep/prep_keypoints.py +++ /dev/null @@ -1,96 +0,0 @@ -import argparse - -import imagesize - -import numpy as np - -import os - - -base_path = "data/megadepth" -# Remove the trailing / if need be. -if base_path[-1] in ["/", "\\"]: - base_path = base_path[:-1] - - -base_depth_path = os.path.join(base_path, "phoenix/S6/zl548/MegaDepth_v1") -base_undistorted_sfm_path = os.path.join(base_path, "Undistorted_SfM") - -scene_ids = os.listdir(base_undistorted_sfm_path) -for scene_id in scene_ids: - if os.path.exists( - f"{base_path}/prep_scene_info/detections/detections_{scene_id}.npy" - ): - print(f"skipping {scene_id} as it exists") - continue - undistorted_sparse_path = os.path.join( - base_undistorted_sfm_path, scene_id, "sparse-txt" - ) - if not os.path.exists(undistorted_sparse_path): - print("sparse path doesnt exist") - continue - - depths_path = os.path.join(base_depth_path, scene_id, "dense0", "depths") - if not os.path.exists(depths_path): - print("depths doesnt exist") - - continue - - images_path = os.path.join(base_undistorted_sfm_path, scene_id, "images") - if not os.path.exists(images_path): - print("images path doesnt exist") - continue - - # Process cameras.txt - if not os.path.exists(os.path.join(undistorted_sparse_path, "cameras.txt")): - print("no cameras") - continue - with open(os.path.join(undistorted_sparse_path, "cameras.txt"), "r") as f: - raw = f.readlines()[3:] # skip the header - - camera_intrinsics = {} - for camera in raw: - camera = camera.split(" ") - camera_intrinsics[int(camera[0])] = [float(elem) for elem in camera[2:]] - - # Process points3D.txt - with open(os.path.join(undistorted_sparse_path, "points3D.txt"), "r") as f: - raw = f.readlines()[3:] # skip the header - - points3D = {} - for point3D in raw: - point3D = point3D.split(" ") - points3D[int(point3D[0])] = np.array( - [float(point3D[1]), float(point3D[2]), float(point3D[3])] - ) - - # Process images.txt - with open(os.path.join(undistorted_sparse_path, "images.txt"), "r") as f: - raw = f.readlines()[4:] # skip the header - - image_id_to_idx = {} - image_names = [] - raw_pose = [] - camera = [] - points3D_id_to_2D = [] - n_points3D = [] - id_to_detections = {} - for idx, (image, points) in enumerate(zip(raw[::2], raw[1::2])): - image = image.split(" ") - points = points.split(" ") - - image_id_to_idx[int(image[0])] = idx - - image_name = image[-1].strip("\n") - image_names.append(image_name) - - raw_pose.append([float(elem) for elem in image[1:-2]]) - camera.append(int(image[-2])) - points_np = np.array(points).astype(np.float32).reshape(len(points) // 3, 3) - visible_points = points_np[points_np[:, 2] != -1] - id_to_detections[idx] = visible_points - np.save( - f"{base_path}/prep_scene_info/detections/detections_{scene_id}.npy", - id_to_detections, - ) - print(f"{scene_id} done") diff --git a/spaces/Recognai/veganuary_ner/app.py b/spaces/Recognai/veganuary_ner/app.py deleted file mode 100644 index 8b99fd0e7144a1556cc230c6b839487f3804cecb..0000000000000000000000000000000000000000 --- a/spaces/Recognai/veganuary_ner/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import spacy -import gradio as gr -from transformers import pipeline, AutoTokenizer -from pysentimiento.preprocessing import preprocess_tweet - -nlp = spacy.load("en_core_web_sm") - -tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base", add_prefix_space=True, model_max_length=512) -pl = pipeline("ner", tokenizer=tokenizer, model="Recognai/veganuary_ner", aggregation_strategy="first") - -def ner(text): - text = preprocess_tweet(text) - doc = nlp(text) - text = " ".join([token.text for token in doc]) - predictions = pl(text) - mentions = [pred["word"].strip() for pred in predictions if pred["entity_group"] == "FOOD"] - return "\n".join(mentions) - -iface = gr.Interface( - ner, - gr.inputs.Textbox(placeholder="copy&paste your veganuary tweet here ...", label="Tweet"), - gr.outputs.Textbox(label="List of detected food mentions in the tweet"), - examples=[ - ["Fruit is delicious 😋 AND healthy 🥗! Brighten up your plate & palate with fresh watermelon, Greek yoghurt & berries, smashed avocado or lime added to water. A piece of #fruit a day keeps the doctor away! #Veganuary2022"] - ], - allow_flagging=False, - title="Veganuary NER", - description="Extract food entities from veganuary tweets 😋", -) - -iface.launch(share=False) \ No newline at end of file diff --git a/spaces/Reeve/Ohayou_Face/torch_utils/ops/conv2d_gradfix.py b/spaces/Reeve/Ohayou_Face/torch_utils/ops/conv2d_gradfix.py deleted file mode 100644 index e95e10d0b1d0315a63a76446fd4c5c293c8bbc6d..0000000000000000000000000000000000000000 --- a/spaces/Reeve/Ohayou_Face/torch_utils/ops/conv2d_gradfix.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.conv2d` that supports -arbitrarily high order gradients with zero performance penalty.""" - -import warnings -import contextlib -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. -weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - -#---------------------------------------------------------------------------- - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) - -def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(input): - assert isinstance(input, torch.Tensor) - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - if input.device.type != 'cuda': - return False - if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']): - return True - warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().') - return False - -def _tuple_of_ints(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - assert len(xs) == ndim - assert all(isinstance(x, int) for x in xs) - return xs - -#---------------------------------------------------------------------------- - -_conv2d_gradfix_cache = dict() - -def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): - # Parse arguments. - ndim = 2 - weight_shape = tuple(weight_shape) - stride = _tuple_of_ints(stride, ndim) - padding = _tuple_of_ints(padding, ndim) - output_padding = _tuple_of_ints(output_padding, ndim) - dilation = _tuple_of_ints(dilation, ndim) - - # Lookup from cache. - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in _conv2d_gradfix_cache: - return _conv2d_gradfix_cache[key] - - # Validate arguments. - assert groups >= 1 - assert len(weight_shape) == ndim + 2 - assert all(stride[i] >= 1 for i in range(ndim)) - assert all(padding[i] >= 0 for i in range(ndim)) - assert all(dilation[i] >= 0 for i in range(ndim)) - if not transpose: - assert all(output_padding[i] == 0 for i in range(ndim)) - else: # transpose - assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) - - # Helpers. - common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - # Forward & backward. - class Conv2d(torch.autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - assert weight.shape == weight_shape - if not transpose: - output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - else: # transpose - output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) - ctx.save_for_backward(input, weight) - return output - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input = None - grad_weight = None - grad_bias = None - - if ctx.needs_input_grad[0]: - p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape) - grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None) - assert grad_input.shape == input.shape - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - assert grad_weight.shape == weight_shape - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum([0, 2, 3]) - - return grad_input, grad_weight, grad_bias - - # Gradient with respect to the weights. - class Conv2dGradWeight(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight') - flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] - grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) - assert grad_weight.shape == weight_shape - ctx.save_for_backward(grad_output, input) - return grad_weight - - @staticmethod - def backward(ctx, grad2_grad_weight): - grad_output, input = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) - assert grad2_grad_output.shape == grad_output.shape - - if ctx.needs_input_grad[1]: - p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape) - grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None) - assert grad2_input.shape == input.shape - - return grad2_grad_output, grad2_input - - _conv2d_gradfix_cache[key] = Conv2d - return Conv2d - -#---------------------------------------------------------------------------- diff --git a/spaces/Reha2704/VToonify/vtoonify/model/raft/core/datasets.py b/spaces/Reha2704/VToonify/vtoonify/model/raft/core/datasets.py deleted file mode 100644 index 9991f15f4c3861c19d1a4b8766d49f83af11db70..0000000000000000000000000000000000000000 --- a/spaces/Reha2704/VToonify/vtoonify/model/raft/core/datasets.py +++ /dev/null @@ -1,235 +0,0 @@ -# Data loading based on https://github.com/NVIDIA/flownet2-pytorch - -import numpy as np -import torch -import torch.utils.data as data -import torch.nn.functional as F - -import os -import math -import random -from glob import glob -import os.path as osp - -from model.raft.core.utils import frame_utils -from model.raft.core.utils.augmentor import FlowAugmentor, SparseFlowAugmentor - - -class FlowDataset(data.Dataset): - def __init__(self, aug_params=None, sparse=False): - self.augmentor = None - self.sparse = sparse - if aug_params is not None: - if sparse: - self.augmentor = SparseFlowAugmentor(**aug_params) - else: - self.augmentor = FlowAugmentor(**aug_params) - - self.is_test = False - self.init_seed = False - self.flow_list = [] - self.image_list = [] - self.extra_info = [] - - def __getitem__(self, index): - - if self.is_test: - img1 = frame_utils.read_gen(self.image_list[index][0]) - img2 = frame_utils.read_gen(self.image_list[index][1]) - img1 = np.array(img1).astype(np.uint8)[..., :3] - img2 = np.array(img2).astype(np.uint8)[..., :3] - img1 = torch.from_numpy(img1).permute(2, 0, 1).float() - img2 = torch.from_numpy(img2).permute(2, 0, 1).float() - return img1, img2, self.extra_info[index] - - if not self.init_seed: - worker_info = torch.utils.data.get_worker_info() - if worker_info is not None: - torch.manual_seed(worker_info.id) - np.random.seed(worker_info.id) - random.seed(worker_info.id) - self.init_seed = True - - index = index % len(self.image_list) - valid = None - if self.sparse: - flow, valid = frame_utils.readFlowKITTI(self.flow_list[index]) - else: - flow = frame_utils.read_gen(self.flow_list[index]) - - img1 = frame_utils.read_gen(self.image_list[index][0]) - img2 = frame_utils.read_gen(self.image_list[index][1]) - - flow = np.array(flow).astype(np.float32) - img1 = np.array(img1).astype(np.uint8) - img2 = np.array(img2).astype(np.uint8) - - # grayscale images - if len(img1.shape) == 2: - img1 = np.tile(img1[...,None], (1, 1, 3)) - img2 = np.tile(img2[...,None], (1, 1, 3)) - else: - img1 = img1[..., :3] - img2 = img2[..., :3] - - if self.augmentor is not None: - if self.sparse: - img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid) - else: - img1, img2, flow = self.augmentor(img1, img2, flow) - - img1 = torch.from_numpy(img1).permute(2, 0, 1).float() - img2 = torch.from_numpy(img2).permute(2, 0, 1).float() - flow = torch.from_numpy(flow).permute(2, 0, 1).float() - - if valid is not None: - valid = torch.from_numpy(valid) - else: - valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000) - - return img1, img2, flow, valid.float() - - - def __rmul__(self, v): - self.flow_list = v * self.flow_list - self.image_list = v * self.image_list - return self - - def __len__(self): - return len(self.image_list) - - -class MpiSintel(FlowDataset): - def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'): - super(MpiSintel, self).__init__(aug_params) - flow_root = osp.join(root, split, 'flow') - image_root = osp.join(root, split, dstype) - - if split == 'test': - self.is_test = True - - for scene in os.listdir(image_root): - image_list = sorted(glob(osp.join(image_root, scene, '*.png'))) - for i in range(len(image_list)-1): - self.image_list += [ [image_list[i], image_list[i+1]] ] - self.extra_info += [ (scene, i) ] # scene and frame_id - - if split != 'test': - self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo'))) - - -class FlyingChairs(FlowDataset): - def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'): - super(FlyingChairs, self).__init__(aug_params) - - images = sorted(glob(osp.join(root, '*.ppm'))) - flows = sorted(glob(osp.join(root, '*.flo'))) - assert (len(images)//2 == len(flows)) - - split_list = np.loadtxt('chairs_split.txt', dtype=np.int32) - for i in range(len(flows)): - xid = split_list[i] - if (split=='training' and xid==1) or (split=='validation' and xid==2): - self.flow_list += [ flows[i] ] - self.image_list += [ [images[2*i], images[2*i+1]] ] - - -class FlyingThings3D(FlowDataset): - def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'): - super(FlyingThings3D, self).__init__(aug_params) - - for cam in ['left']: - for direction in ['into_future', 'into_past']: - image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*'))) - image_dirs = sorted([osp.join(f, cam) for f in image_dirs]) - - flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*'))) - flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs]) - - for idir, fdir in zip(image_dirs, flow_dirs): - images = sorted(glob(osp.join(idir, '*.png')) ) - flows = sorted(glob(osp.join(fdir, '*.pfm')) ) - for i in range(len(flows)-1): - if direction == 'into_future': - self.image_list += [ [images[i], images[i+1]] ] - self.flow_list += [ flows[i] ] - elif direction == 'into_past': - self.image_list += [ [images[i+1], images[i]] ] - self.flow_list += [ flows[i+1] ] - - -class KITTI(FlowDataset): - def __init__(self, aug_params=None, split='training', root='datasets/KITTI'): - super(KITTI, self).__init__(aug_params, sparse=True) - if split == 'testing': - self.is_test = True - - root = osp.join(root, split) - images1 = sorted(glob(osp.join(root, 'image_2/*_10.png'))) - images2 = sorted(glob(osp.join(root, 'image_2/*_11.png'))) - - for img1, img2 in zip(images1, images2): - frame_id = img1.split('/')[-1] - self.extra_info += [ [frame_id] ] - self.image_list += [ [img1, img2] ] - - if split == 'training': - self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png'))) - - -class HD1K(FlowDataset): - def __init__(self, aug_params=None, root='datasets/HD1k'): - super(HD1K, self).__init__(aug_params, sparse=True) - - seq_ix = 0 - while 1: - flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix))) - images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix))) - - if len(flows) == 0: - break - - for i in range(len(flows)-1): - self.flow_list += [flows[i]] - self.image_list += [ [images[i], images[i+1]] ] - - seq_ix += 1 - - -def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'): - """ Create the data loader for the corresponding trainign set """ - - if args.stage == 'chairs': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True} - train_dataset = FlyingChairs(aug_params, split='training') - - elif args.stage == 'things': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True} - clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass') - final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass') - train_dataset = clean_dataset + final_dataset - - elif args.stage == 'sintel': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True} - things = FlyingThings3D(aug_params, dstype='frames_cleanpass') - sintel_clean = MpiSintel(aug_params, split='training', dstype='clean') - sintel_final = MpiSintel(aug_params, split='training', dstype='final') - - if TRAIN_DS == 'C+T+K+S+H': - kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True}) - hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True}) - train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things - - elif TRAIN_DS == 'C+T+K/S': - train_dataset = 100*sintel_clean + 100*sintel_final + things - - elif args.stage == 'kitti': - aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False} - train_dataset = KITTI(aug_params, split='training') - - train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, - pin_memory=False, shuffle=True, num_workers=4, drop_last=True) - - print('Training with %d image pairs' % len(train_dataset)) - return train_loader - diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/__init__.py deleted file mode 100644 index 04011130435cf9fdfadeb821919046b1bddab7d4..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from .atss import ATSS -from .base import BaseDetector -from .cascade_rcnn import CascadeRCNN -from .cornernet import CornerNet -from .detr import DETR -from .fast_rcnn import FastRCNN -from .faster_rcnn import FasterRCNN -from .fcos import FCOS -from .fovea import FOVEA -from .fsaf import FSAF -from .gfl import GFL -from .grid_rcnn import GridRCNN -from .htc import HybridTaskCascade -from .kd_one_stage import KnowledgeDistillationSingleStageDetector -from .mask_rcnn import MaskRCNN -from .mask_scoring_rcnn import MaskScoringRCNN -from .nasfcos import NASFCOS -from .paa import PAA -from .point_rend import PointRend -from .reppoints_detector import RepPointsDetector -from .retinanet import RetinaNet -from .rpn import RPN -from .scnet import SCNet -from .single_stage import SingleStageDetector -from .sparse_rcnn import SparseRCNN -from .trident_faster_rcnn import TridentFasterRCNN -from .two_stage import TwoStageDetector -from .vfnet import VFNet -from .yolact import YOLACT -from .yolo import YOLOV3 - -__all__ = [ - 'ATSS', 'BaseDetector', 'SingleStageDetector', - 'KnowledgeDistillationSingleStageDetector', 'TwoStageDetector', 'RPN', - 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', - 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', - 'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', - 'YOLOV3', 'YOLACT', 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', - 'SCNet' -] diff --git a/spaces/Rongjiehuang/ProDiff/vocoders/fastdiff.py b/spaces/Rongjiehuang/ProDiff/vocoders/fastdiff.py deleted file mode 100644 index 1769085832bfc902eeff0155b788141ae194e85e..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/vocoders/fastdiff.py +++ /dev/null @@ -1,162 +0,0 @@ -import glob -import re -import librosa -import torch -import yaml -from sklearn.preprocessing import StandardScaler -from torch import nn -from modules.FastDiff.module.FastDiff_model import FastDiff as FastDiff_model -from utils.hparams import hparams -from modules.parallel_wavegan.utils import read_hdf5 -from vocoders.base_vocoder import BaseVocoder, register_vocoder -import numpy as np -from modules.FastDiff.module.util import theta_timestep_loss, compute_hyperparams_given_schedule, sampling_given_noise_schedule - -def load_fastdiff_model(config_path, checkpoint_path): - # load config - with open(config_path) as f: - config = yaml.load(f, Loader=yaml.Loader) - - # setup - if torch.cuda.is_available(): - device = torch.device("cuda") - else: - device = torch.device("cpu") - model = FastDiff_model(audio_channels=config['audio_channels'], - inner_channels=config['inner_channels'], - cond_channels=config['cond_channels'], - upsample_ratios=config['upsample_ratios'], - lvc_layers_each_block=config['lvc_layers_each_block'], - lvc_kernel_size=config['lvc_kernel_size'], - kpnet_hidden_channels=config['kpnet_hidden_channels'], - kpnet_conv_size=config['kpnet_conv_size'], - dropout=config['dropout'], - diffusion_step_embed_dim_in=config['diffusion_step_embed_dim_in'], - diffusion_step_embed_dim_mid=config['diffusion_step_embed_dim_mid'], - diffusion_step_embed_dim_out=config['diffusion_step_embed_dim_out'], - use_weight_norm=config['use_weight_norm']) - - model.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["state_dict"]["model"], strict=True) - - # Init hyperparameters by linear schedule - noise_schedule = torch.linspace(float(config["beta_0"]), float(config["beta_T"]), int(config["T"])).cuda() - diffusion_hyperparams = compute_hyperparams_given_schedule(noise_schedule) - - # map diffusion hyperparameters to gpu - for key in diffusion_hyperparams: - if key in ["beta", "alpha", "sigma"]: - diffusion_hyperparams[key] = diffusion_hyperparams[key].cuda() - diffusion_hyperparams = diffusion_hyperparams - - - if config['noise_schedule'] != '': - noise_schedule = config['noise_schedule'] - if isinstance(noise_schedule, list): - noise_schedule = torch.FloatTensor(noise_schedule).cuda() - else: - # Select Schedule - try: - reverse_step = int(hparams.get('N')) - except: - print('Please specify $N (the number of revere iterations) in config file. Now denoise with 4 iterations.') - reverse_step = 4 - if reverse_step == 1000: - noise_schedule = torch.linspace(0.000001, 0.01, 1000).cuda() - elif reverse_step == 200: - noise_schedule = torch.linspace(0.0001, 0.02, 200).cuda() - - # Below are schedules derived by Noise Predictor - elif reverse_step == 8: - noise_schedule = [6.689325005027058e-07, 1.0033881153503899e-05, 0.00015496854030061513, - 0.002387222135439515, 0.035597629845142365, 0.3681158423423767, 0.4735414385795593, 0.5] - elif reverse_step == 6: - noise_schedule = [1.7838445955931093e-06, 2.7984189728158526e-05, 0.00043231004383414984, - 0.006634317338466644, 0.09357017278671265, 0.6000000238418579] - elif reverse_step == 4: - noise_schedule = [3.2176e-04, 2.5743e-03, 2.5376e-02, 7.0414e-01] - elif reverse_step == 3: - noise_schedule = [9.0000e-05, 9.0000e-03, 6.0000e-01] - else: - raise NotImplementedError - - if isinstance(noise_schedule, list): - noise_schedule = torch.FloatTensor(noise_schedule).cuda() - - model.remove_weight_norm() - model = model.eval().to(device) - print(f"| Loaded model parameters from {checkpoint_path}.") - print(f"| FastDiff device: {device}.") - return model, diffusion_hyperparams, noise_schedule, config, device - - -@register_vocoder -class FastDiff(BaseVocoder): - def __init__(self): - if hparams['vocoder_ckpt'] == '': # load LJSpeech FastDiff pretrained model - base_dir = 'checkpoint/FastDiff' - config_path = f'{base_dir}/config.yaml' - ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= - lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1] - print('| load FastDiff: ', ckpt) - self.scaler = None - self.model, self.dh, self.noise_schedule, self.config, self.device = load_fastdiff_model( - config_path=config_path, - checkpoint_path=ckpt, - ) - else: - base_dir = hparams['vocoder_ckpt'] - print(base_dir) - config_path = f'{base_dir}/config.yaml' - ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key= - lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1] - print('| load FastDiff: ', ckpt) - self.scaler = None - self.model, self.dh, self.noise_schedule, self.config, self.device = load_fastdiff_model( - config_path=config_path, - checkpoint_path=ckpt, - ) - - def spec2wav(self, mel, **kwargs): - # start generation - device = self.device - with torch.no_grad(): - c = torch.FloatTensor(mel).unsqueeze(0).transpose(2, 1).to(device) - audio_length = c.shape[-1] * hparams["hop_size"] - y = sampling_given_noise_schedule( - self.model, (1, 1, audio_length), self.dh, self.noise_schedule, condition=c, ddim=False, return_sequence=False) - wav_out = y.cpu().numpy() - return wav_out - - @staticmethod - def wav2spec(wav_fn, return_linear=False): - from data_gen.tts.data_gen_utils import process_utterance - res = process_utterance( - wav_fn, fft_size=hparams['fft_size'], - hop_size=hparams['hop_size'], - win_length=hparams['win_size'], - num_mels=hparams['audio_num_mel_bins'], - fmin=hparams['fmin'], - fmax=hparams['fmax'], - sample_rate=hparams['audio_sample_rate'], - loud_norm=hparams['loud_norm'], - min_level_db=hparams['min_level_db'], - return_linear=return_linear, vocoder='fastdiff', eps=float(hparams.get('wav2spec_eps', 1e-10))) - if return_linear: - return res[0], res[1].T, res[2].T # [T, 80], [T, n_fft] - else: - return res[0], res[1].T - - @staticmethod - def wav2mfcc(wav_fn): - fft_size = hparams['fft_size'] - hop_size = hparams['hop_size'] - win_length = hparams['win_size'] - sample_rate = hparams['audio_sample_rate'] - wav, _ = librosa.core.load(wav_fn, sr=sample_rate) - mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=13, - n_fft=fft_size, hop_length=hop_size, - win_length=win_length, pad_mode="constant", power=1.0) - mfcc_delta = librosa.feature.delta(mfcc, order=1) - mfcc_delta_delta = librosa.feature.delta(mfcc, order=2) - mfcc = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta]).T - return mfcc diff --git a/spaces/SaiRaam/AIAvatarchatbot/README.md b/spaces/SaiRaam/AIAvatarchatbot/README.md deleted file mode 100644 index ca2abe8e85a6a83884e002971f334b9bc6ee43a2..0000000000000000000000000000000000000000 --- a/spaces/SaiRaam/AIAvatarchatbot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AIAvatarchatbot -emoji: 🚀 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Saketh-Reddy/testing/index.html b/spaces/Saketh-Reddy/testing/index.html deleted file mode 100644 index 58275de3b1c343a98420342baa076b9baaafa157..0000000000000000000000000000000000000000 --- a/spaces/Saketh-Reddy/testing/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - My static Space - - - -
    -

    Welcome to your static Space!

    -

    You can modify this app directly by editing index.html in the Files and versions tab.

    -

    - Also don't forget to check the - Spaces documentation. -

    -
    - - diff --git a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/train.py b/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/train.py deleted file mode 100644 index 4dff8b280d76c53abdfc2fbce83cafaf3022ab96..0000000000000000000000000000000000000000 --- a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/train.py +++ /dev/null @@ -1,301 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import librosa -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) - -import commons -import utils -from data_utils import ( - TextAudioLoader, - TextAudioCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '8000' - - hps = utils.get_hparams() - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32,300,400,500,600,700,800,900,1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioCollate() - train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False, - batch_size=hps.train.batch_size, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) - net_d = DDP(net_d, device_ids=[rank]) - - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) - global_step = (epoch_str - 1) * len(train_loader) - except: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank==0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader): - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask,\ - (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths) - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank==0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-2000)) - old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-2000)) - if os.path.exists(old_g): - os.remove(old_g) - if os.path.exists(old_d): - os.remove(old_d) - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader): - x, x_lengths = x.cuda(0), x_lengths.cuda(0) - spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0) - y, y_lengths = y.cuda(0), y_lengths.cuda(0) - - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - spec = spec[:1] - spec_lengths = spec_lengths[:1] - y = y[:1] - y_lengths = y_lengths[:1] - break - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict = { - "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - } - audio_dict = { - "gen/audio": y_hat[0,:,:y_hat_lengths[0]] - } - if global_step == 0: - image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/Sandiago21/automatic-speech-recognition-spanish/README.md b/spaces/Sandiago21/automatic-speech-recognition-spanish/README.md deleted file mode 100644 index 3ade7b595f0a710ab90cbea29d53cbf78595a294..0000000000000000000000000000000000000000 --- a/spaces/Sandiago21/automatic-speech-recognition-spanish/README.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: automatic-speech-recognition-spanish -app_file: app.py -sdk: gradio -sdk_version: 3.36.0 ---- diff --git a/spaces/SantiagoTesla/image_generator/README.md b/spaces/SantiagoTesla/image_generator/README.md deleted file mode 100644 index cb70fac228600b06dbddaca2de0f260126b30022..0000000000000000000000000000000000000000 --- a/spaces/SantiagoTesla/image_generator/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Stable Diffusion Nano -emoji: 📊 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m -tags: -- jax-diffusers-event -duplicated_from: bguisard/stable-diffusion-nano ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ServerX/PorcoDiaz/infer/modules/train/preprocess.py b/spaces/ServerX/PorcoDiaz/infer/modules/train/preprocess.py deleted file mode 100644 index fbe81307ee661a95b2ac479336671a44ee02151a..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/infer/modules/train/preprocess.py +++ /dev/null @@ -1,147 +0,0 @@ -import multiprocessing -import os -import sys - -from scipy import signal - -now_dir = os.getcwd() -sys.path.append(now_dir) -print(sys.argv) -inp_root = sys.argv[1] -sr = int(sys.argv[2]) -n_p = int(sys.argv[3]) -exp_dir = sys.argv[4] -noparallel = sys.argv[5] == "True" -per = float(sys.argv[6]) -import multiprocessing -import os -import traceback - -import librosa -import numpy as np -from scipy.io import wavfile - -from infer.lib.audio import load_audio -from infer.lib.slicer2 import Slicer - -mutex = multiprocessing.Lock() -f = open("%s/preprocess.log" % exp_dir, "a+") - - -def println(strr): - mutex.acquire() - print(strr) - f.write("%s\n" % strr) - f.flush() - mutex.release() - - -class PreProcess: - def __init__(self, sr, exp_dir, per=3.7): - self.slicer = Slicer( - sr=sr, - threshold=-42, - min_length=1500, - min_interval=400, - hop_size=15, - max_sil_kept=500, - ) - self.sr = sr - self.bh, self.ah = signal.butter(N=5, Wn=48, btype="high", fs=self.sr) - self.per = per - self.overlap = 0.3 - self.tail = self.per + self.overlap - self.max = 0.9 - self.alpha = 0.75 - self.exp_dir = exp_dir - self.gt_wavs_dir = "%s/0_gt_wavs" % exp_dir - self.wavs16k_dir = "%s/1_16k_wavs" % exp_dir - os.makedirs(self.exp_dir, exist_ok=True) - os.makedirs(self.gt_wavs_dir, exist_ok=True) - os.makedirs(self.wavs16k_dir, exist_ok=True) - - def norm_write(self, tmp_audio, idx0, idx1): - tmp_max = np.abs(tmp_audio).max() - if tmp_max > 2.5: - print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max)) - return - tmp_audio = (tmp_audio / tmp_max * (self.max * self.alpha)) + ( - 1 - self.alpha - ) * tmp_audio - wavfile.write( - "%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1), - self.sr, - tmp_audio.astype(np.float32), - ) - tmp_audio = librosa.resample( - tmp_audio, orig_sr=self.sr, target_sr=16000 - ) # , res_type="soxr_vhq" - wavfile.write( - "%s/%s_%s.wav" % (self.wavs16k_dir, idx0, idx1), - 16000, - tmp_audio.astype(np.float32), - ) - - def pipeline(self, path, idx0): - try: - audio = load_audio(path, self.sr) - # zero phased digital filter cause pre-ringing noise... - # audio = signal.filtfilt(self.bh, self.ah, audio) - audio = signal.lfilter(self.bh, self.ah, audio) - - idx1 = 0 - for audio in self.slicer.slice(audio): - i = 0 - while 1: - start = int(self.sr * (self.per - self.overlap) * i) - i += 1 - if len(audio[start:]) > self.tail * self.sr: - tmp_audio = audio[start : start + int(self.per * self.sr)] - self.norm_write(tmp_audio, idx0, idx1) - idx1 += 1 - else: - tmp_audio = audio[start:] - idx1 += 1 - break - self.norm_write(tmp_audio, idx0, idx1) - println("%s->Suc." % path) - except: - println("%s->%s" % (path, traceback.format_exc())) - - def pipeline_mp(self, infos): - for path, idx0 in infos: - self.pipeline(path, idx0) - - def pipeline_mp_inp_dir(self, inp_root, n_p): - try: - infos = [ - ("%s/%s" % (inp_root, name), idx) - for idx, name in enumerate(sorted(list(os.listdir(inp_root)))) - ] - if noparallel: - for i in range(n_p): - self.pipeline_mp(infos[i::n_p]) - else: - ps = [] - for i in range(n_p): - p = multiprocessing.Process( - target=self.pipeline_mp, args=(infos[i::n_p],) - ) - ps.append(p) - p.start() - for i in range(n_p): - ps[i].join() - except: - println("Fail. %s" % traceback.format_exc()) - - -def preprocess_trainset(inp_root, sr, n_p, exp_dir, per): - pp = PreProcess(sr, exp_dir, per) - println("start preprocess") - println(sys.argv) - pp.pipeline_mp_inp_dir(inp_root, n_p) - println("end preprocess") - - -if __name__ == "__main__": - preprocess_trainset(inp_root, sr, n_p, exp_dir, per) diff --git a/spaces/Shredder/CONBERT-2/Cuad_others.py b/spaces/Shredder/CONBERT-2/Cuad_others.py deleted file mode 100644 index e1d53f3f2575e0fb29baf09b1e2316142094ac28..0000000000000000000000000000000000000000 --- a/spaces/Shredder/CONBERT-2/Cuad_others.py +++ /dev/null @@ -1,67 +0,0 @@ -from predict import run_prediction -from io import StringIO -import json -import spacy -from spacy import displacy -from transformers import pipeline -import torch -import nltk -nltk.download('punkt') - - - - -##Summarization -summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY") -def summarize_text(text): - resp = summarizer(text) - stext = resp[0]['summary_text'] - return stext - - -##Company Extraction -ner=pipeline('ner',model='Jean-Baptiste/camembert-ner-with-dates',tokenizer='Jean-Baptiste/camembert-ner-with-dates', aggregation_strategy="simple") -def fin_ner(text): - replaced_spans = ner(text) - new_spans=[] - for item in replaced_spans: - item['entity']=item['entity_group'] - del item['entity_group'] - new_spans.append(item) - return {"text": text, "entities": new_spans} - - -#CUAD STARTS -def load_questions(): - questions = [] - with open('questions.txt') as f: - questions = f.readlines() - return questions - - -def load_questions_short(): - questions_short = [] - with open('questionshort.txt') as f: - questions_short = f.readlines() - return questions_short - -def quad(query,file): - with open(file) as f: - paragraph = f.read() - questions = load_questions() - questions_short = load_questions_short() - if (not len(paragraph)==0) and not (len(query)==0): - print('getting predictions') - predictions = run_prediction([query], paragraph, 'marshmellow77/roberta-base-cuad',n_best_size=5) - answer = "" - answer_p="" - if predictions['0'] == "": - answer = 'No answer found in document' - else: - with open("nbest.json") as jf: - data = json.load(jf) - for i in range(1): - raw_answer=data['0'][i]['text'] - answer += f"{data['0'][i]['text']}\n" - answer_p =answer+ f"Probability: {round(data['0'][i]['probability']*100,1)}%\n\n" - return answer,answer_p diff --git a/spaces/Shriharshan/Image-Caption-Generator/app.py b/spaces/Shriharshan/Image-Caption-Generator/app.py deleted file mode 100644 index fe186999504f758f7eb082cabc140a54ff627b2d..0000000000000000000000000000000000000000 --- a/spaces/Shriharshan/Image-Caption-Generator/app.py +++ /dev/null @@ -1,42 +0,0 @@ -from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast - -model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") -tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2") - -def vit2distilgpt2(img): - pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values - encoder_outputs = model.generate(pixel_values.to('cpu'), num_beams=5, num_return_sequences=3) - generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) - - return generated_sentences - -import gradio as gr - -inputs = [ - gr.inputs.Image(type="pil", label="Original Images") -] - -outputs = [ - gr.outputs.Textbox(label="Caption 1"), - gr.outputs.Textbox(label="Caption 2"), - gr.outputs.Textbox(label="Caption 3") -] - -title = "Image Captioning using ViT + GPT2" -description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO DataSet is used for Training" -examples = [ - ["Image1.png"], - ["Image2.png"], - ["Image3.png"] -] - -gr.Interface( - vit2distilgpt2, - inputs, - outputs, - title=title, - description=description, - examples=examples, - theme="huggingface", -).launch(debug=True, enable_queue=True) diff --git a/spaces/Skyler123/TangGPT/ChuanhuChatbot.py b/spaces/Skyler123/TangGPT/ChuanhuChatbot.py deleted file mode 100644 index 5d18393a7cc42c6545d90e9a8ebf949745ebe5bf..0000000000000000000000000000000000000000 --- a/spaces/Skyler123/TangGPT/ChuanhuChatbot.py +++ /dev/null @@ -1,423 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules import config -from modules.config import * -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.chat_func import * -from modules.openai_func import get_usage - -gr.Chatbot.postprocess = postprocess -PromptHelper.compact_text_chunks = compact_text_chunks - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - user_name = gr.State("") - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_api_key = gr.State(my_api_key) - user_question = gr.State("") - outputing = gr.State(False) - topic = gr.State("未命名对话历史记录") - - with gr.Row(): - with gr.Column(): - gr.HTML(title) - user_info = gr.Markdown(value="", elem_id="user_info") - gr.HTML('
    Duplicate Space
    ') - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - - # https://github.com/gradio-app/gradio/pull/3296 - def create_greeting(request: gr.Request): - if hasattr(request, "username") and request.username: # is not None or is not "" - logging.info(f"Get User Name: {request.username}") - return gr.Markdown.update(value=f"User: {request.username}"), request.username - else: - return gr.Markdown.update(value=f"User: default", visible=False), "" - demo.load(create_greeting, inputs=None, outputs=[user_info, user_name]) - - with gr.Row().style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(scale=12): - user_input = gr.Textbox( - elem_id="user_input_tb", - show_label=False, placeholder="在这里输入" - ).style(container=False) - with gr.Column(min_width=70, scale=1): - submitBtn = gr.Button("发送", variant="primary") - cancelBtn = gr.Button("取消", variant="secondary", visible=False) - with gr.Row(): - emptyBtn = gr.Button( - "🧹 新的对话", - ) - retryBtn = gr.Button("🔄 重新生成") - delFirstBtn = gr.Button("🗑️ 删除最旧对话") - delLastBtn = gr.Button("🗑️ 删除最新对话") - reduceTokenBtn = gr.Button("♻️ 总结对话") - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label="ChatGPT"): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"OpenAI API-key...", - value=hide_middle_chars(my_api_key), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - if multi_api_key: - usageTxt = gr.Markdown("多账号模式已开启,无需输入key,可直接开始对话", elem_id="usage_display") - else: - usageTxt = gr.Markdown("**发送消息** 或 **提交key** 以显示额度", elem_id="usage_display") - model_select_dropdown = gr.Dropdown( - label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0] - ) - use_streaming_checkbox = gr.Checkbox( - label="实时传输回答", value=True, visible=enable_streaming_option - ) - use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False) - language_select_dropdown = gr.Dropdown( - label="选择回复语言(针对搜索&索引功能)", - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label="上传索引文件", type="file", multiple=True) - two_column = gr.Checkbox(label="双栏pdf", value=advance_docs["pdf"].get("two_column", False)) - # TODO: 公式ocr - # formula_ocr = gr.Checkbox(label="识别公式", value=advance_docs["pdf"].get("formula_ocr", False)) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入System Prompt...", - label="System prompt", - value=initial_prompt, - lines=10, - ).style(container=False) - with gr.Accordion(label="加载Prompt模板", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label="选择Prompt模板集合文件", - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label="从Prompt模板中加载", - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - ).style(container=False) - - with gr.Tab(label="保存/加载"): - with gr.Accordion(label="保存/加载对话历史记录", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label="从列表中加载对话", - choices=get_history_names(plain=True), - multiselect=False, - value=get_history_names(plain=True)[0], - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=f"设置文件名: 默认为.json,可选为.md", - label="设置保存文件名", - value="对话历史记录", - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button("💾 保存对话") - exportMarkdownBtn = gr.Button("📝 导出为Markdown") - gr.Markdown("默认保存于history文件夹") - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label="高级"): - gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置") - default_btn = gr.Button("🔙 恢复默认设置") - - with gr.Accordion("参数", open=False): - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="Top-p", - ) - temperature = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="Temperature", - ) - - with gr.Accordion("网络设置", open=False, visible=False): - # 优先展示自定义的api_host - apihostTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入API-Host...", - label="API-Host", - value=config.api_host or shared.API_HOST, - lines=1, - ) - changeAPIURLBtn = gr.Button("🔄 切换API地址") - proxyTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入代理地址...", - label="代理地址(示例:http://127.0.0.1:10809)", - value="", - lines=2, - ) - changeProxyBtn = gr.Button("🔄 设置代理地址") - - gr.Markdown(description) - gr.HTML(footer.format(versions=versions_html()), elem_id="footer") - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - user_api_key, - systemPromptTxt, - history, - user_question, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, history, status_display, token_count], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True - ) - - get_usage_args = dict( - fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False - ) - - - # Chatbot - cancelBtn.click(cancel_outputing, [], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - emptyBtn.click( - reset_state, - outputs=[chatbot, history, token_count, status_display], - show_progress=True, - ) - emptyBtn.click(**reset_textbox_args) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [history, token_count], - [history, token_count, status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [chatbot, history, token_count], - [chatbot, history, token_count, status_display], - show_progress=True, - ) - - reduceTokenBtn.click( - reduce_token_size, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - gr.State(sum(token_count.value[-4:])), - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ) - reduceTokenBtn.click(**get_usage_args) - - two_column.change(update_doc_config, [two_column], None) - - # ChatGPT - keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args) - keyTxt.submit(**get_usage_args) - - # Template - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [saveFileName, systemPromptTxt, history, chatbot, user_name], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [saveFileName, systemPromptTxt, history, chatbot, user_name], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - historyFileSelectDropdown.change( - load_chat_history, - [historyFileSelectDropdown, systemPromptTxt, history, chatbot, user_name], - [saveFileName, systemPromptTxt, history, chatbot], - show_progress=True, - ) - downloadFile.change( - load_chat_history, - [downloadFile, systemPromptTxt, history, chatbot, user_name], - [saveFileName, systemPromptTxt, history, chatbot], - ) - - # Advanced - default_btn.click( - reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_host, - [apihostTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = "川虎ChatGPT 🚀" - -if __name__ == "__main__": - reload_javascript() - # if running in Docker - if dockerflag: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - auth=auth_list, - favicon_path="./assets/favicon.ico", - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - share=False, - favicon_path="./assets/favicon.ico", - ) - # if not running in Docker - else: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, - auth=auth_list, - favicon_path="./assets/favicon.ico", - inbrowser=True, - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, favicon_path="./assets/favicon.ico", inbrowser=True - ) # 改为 share=True 可以创建公开分享链接 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/SoArizonaAI/README/README.md b/spaces/SoArizonaAI/README/README.md deleted file mode 100644 index 65ec03f5f08eb45dfff573ab4579af509473db91..0000000000000000000000000000000000000000 --- a/spaces/SoArizonaAI/README/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: README -emoji: 🚀 -colorFrom: gray -colorTo: red -sdk: static -pinned: false ---- -## This is the BASIC Hugging Face Classroom for So Arizona AI.
    -It is our intention to utilize this classroom to demonstrate
    -various Machine Learning techniques.
    -They will include (but are not limited to) NLP, Vision, and
    -Audio Machine Learning. -![9.jpeg](9.jpeg) diff --git a/spaces/SpacesExamples/docker-examples/README.md b/spaces/SpacesExamples/docker-examples/README.md deleted file mode 100644 index 81785808038ffb212ba7826d5f08f4845264d931..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/docker-examples/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Docker Examples -emoji: 🐳 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_pretty.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_pretty.py deleted file mode 100644 index c18606c8afbd09af4649722f2a8f728a09d38bfa..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_pretty.py +++ /dev/null @@ -1,540 +0,0 @@ -# coding: utf-8 -"""Tests for IPython.lib.pretty.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - - -from collections import Counter, defaultdict, deque, OrderedDict, UserList -import os -import pytest -import types -import string -import sys -import unittest - -import pytest - -from IPython.lib import pretty - -from io import StringIO - - -class MyList(object): - def __init__(self, content): - self.content = content - def _repr_pretty_(self, p, cycle): - if cycle: - p.text("MyList(...)") - else: - with p.group(3, "MyList(", ")"): - for (i, child) in enumerate(self.content): - if i: - p.text(",") - p.breakable() - else: - p.breakable("") - p.pretty(child) - - -class MyDict(dict): - def _repr_pretty_(self, p, cycle): - p.text("MyDict(...)") - -class MyObj(object): - def somemethod(self): - pass - - -class Dummy1(object): - def _repr_pretty_(self, p, cycle): - p.text("Dummy1(...)") - -class Dummy2(Dummy1): - _repr_pretty_ = None - -class NoModule(object): - pass - -NoModule.__module__ = None - -class Breaking(object): - def _repr_pretty_(self, p, cycle): - with p.group(4,"TG: ",":"): - p.text("Breaking(") - p.break_() - p.text(")") - -class BreakingRepr(object): - def __repr__(self): - return "Breaking(\n)" - -class BadRepr(object): - def __repr__(self): - return 1/0 - - -def test_indentation(): - """Test correct indentation in groups""" - count = 40 - gotoutput = pretty.pretty(MyList(range(count))) - expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")" - - assert gotoutput == expectedoutput - - -def test_dispatch(): - """ - Test correct dispatching: The _repr_pretty_ method for MyDict - must be found before the registered printer for dict. - """ - gotoutput = pretty.pretty(MyDict()) - expectedoutput = "MyDict(...)" - - assert gotoutput == expectedoutput - - -def test_callability_checking(): - """ - Test that the _repr_pretty_ method is tested for callability and skipped if - not. - """ - gotoutput = pretty.pretty(Dummy2()) - expectedoutput = "Dummy1(...)" - - assert gotoutput == expectedoutput - - -@pytest.mark.parametrize( - "obj,expected_output", - zip( - [ - set(), - frozenset(), - set([1]), - frozenset([1]), - set([1, 2]), - frozenset([1, 2]), - set([-1, -2, -3]), - ], - [ - "set()", - "frozenset()", - "{1}", - "frozenset({1})", - "{1, 2}", - "frozenset({1, 2})", - "{-3, -2, -1}", - ], - ), -) -def test_sets(obj, expected_output): - """ - Test that set and frozenset use Python 3 formatting. - """ - got_output = pretty.pretty(obj) - assert got_output == expected_output - - -def test_pprint_heap_allocated_type(): - """ - Test that pprint works for heap allocated types. - """ - module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35" - expected_output = ( - "xxlimited.Null" if sys.version_info < (3, 10, 6) else "xxlimited_35.Null" - ) - xxlimited = pytest.importorskip(module_name) - output = pretty.pretty(xxlimited.Null) - assert output == expected_output - - -def test_pprint_nomod(): - """ - Test that pprint works for classes with no __module__. - """ - output = pretty.pretty(NoModule) - assert output == "NoModule" - - -def test_pprint_break(): - """ - Test that p.break_ produces expected output - """ - output = pretty.pretty(Breaking()) - expected = "TG: Breaking(\n ):" - assert output == expected - -def test_pprint_break_repr(): - """ - Test that p.break_ is used in repr - """ - output = pretty.pretty([[BreakingRepr()]]) - expected = "[[Breaking(\n )]]" - assert output == expected - - output = pretty.pretty([[BreakingRepr()]*2]) - expected = "[[Breaking(\n ),\n Breaking(\n )]]" - assert output == expected - -def test_bad_repr(): - """Don't catch bad repr errors""" - with pytest.raises(ZeroDivisionError): - pretty.pretty(BadRepr()) - -class BadException(Exception): - def __str__(self): - return -1 - -class ReallyBadRepr(object): - __module__ = 1 - @property - def __class__(self): - raise ValueError("I am horrible") - - def __repr__(self): - raise BadException() - -def test_really_bad_repr(): - with pytest.raises(BadException): - pretty.pretty(ReallyBadRepr()) - - -class SA(object): - pass - -class SB(SA): - pass - -class TestsPretty(unittest.TestCase): - - def test_super_repr(self): - # "" - output = pretty.pretty(super(SA)) - self.assertRegex(output, r"") - - # ">" - sb = SB() - output = pretty.pretty(super(SA, sb)) - self.assertRegex(output, r">") - - - def test_long_list(self): - lis = list(range(10000)) - p = pretty.pretty(lis) - last2 = p.rsplit('\n', 2)[-2:] - self.assertEqual(last2, [' 999,', ' ...]']) - - def test_long_set(self): - s = set(range(10000)) - p = pretty.pretty(s) - last2 = p.rsplit('\n', 2)[-2:] - self.assertEqual(last2, [' 999,', ' ...}']) - - def test_long_tuple(self): - tup = tuple(range(10000)) - p = pretty.pretty(tup) - last2 = p.rsplit('\n', 2)[-2:] - self.assertEqual(last2, [' 999,', ' ...)']) - - def test_long_dict(self): - d = { n:n for n in range(10000) } - p = pretty.pretty(d) - last2 = p.rsplit('\n', 2)[-2:] - self.assertEqual(last2, [' 999: 999,', ' ...}']) - - def test_unbound_method(self): - output = pretty.pretty(MyObj.somemethod) - self.assertIn('MyObj.somemethod', output) - - -class MetaClass(type): - def __new__(cls, name): - return type.__new__(cls, name, (object,), {'name': name}) - - def __repr__(self): - return "[CUSTOM REPR FOR CLASS %s]" % self.name - - -ClassWithMeta = MetaClass('ClassWithMeta') - - -def test_metaclass_repr(): - output = pretty.pretty(ClassWithMeta) - assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]" - - -def test_unicode_repr(): - u = u"üniçodé" - ustr = u - - class C(object): - def __repr__(self): - return ustr - - c = C() - p = pretty.pretty(c) - assert p == u - p = pretty.pretty([c]) - assert p == "[%s]" % u - - -def test_basic_class(): - def type_pprint_wrapper(obj, p, cycle): - if obj is MyObj: - type_pprint_wrapper.called = True - return pretty._type_pprint(obj, p, cycle) - type_pprint_wrapper.called = False - - stream = StringIO() - printer = pretty.RepresentationPrinter(stream) - printer.type_pprinters[type] = type_pprint_wrapper - printer.pretty(MyObj) - printer.flush() - output = stream.getvalue() - - assert output == "%s.MyObj" % __name__ - assert type_pprint_wrapper.called is True - - -def test_collections_userlist(): - # Create userlist with cycle - a = UserList() - a.append(a) - - cases = [ - (UserList(), "UserList([])"), - ( - UserList(i for i in range(1000, 1020)), - "UserList([1000,\n" - " 1001,\n" - " 1002,\n" - " 1003,\n" - " 1004,\n" - " 1005,\n" - " 1006,\n" - " 1007,\n" - " 1008,\n" - " 1009,\n" - " 1010,\n" - " 1011,\n" - " 1012,\n" - " 1013,\n" - " 1014,\n" - " 1015,\n" - " 1016,\n" - " 1017,\n" - " 1018,\n" - " 1019])", - ), - (a, "UserList([UserList(...)])"), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - - -# TODO : pytest.mark.parametrise once nose is gone. -def test_collections_defaultdict(): - # Create defaultdicts with cycles - a = defaultdict() - a.default_factory = a - b = defaultdict(list) - b['key'] = b - - # Dictionary order cannot be relied on, test against single keys. - cases = [ - (defaultdict(list), 'defaultdict(list, {})'), - (defaultdict(list, {'key': '-' * 50}), - "defaultdict(list,\n" - " {'key': '--------------------------------------------------'})"), - (a, 'defaultdict(defaultdict(...), {})'), - (b, "defaultdict(list, {'key': defaultdict(...)})"), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - - -# TODO : pytest.mark.parametrise once nose is gone. -def test_collections_ordereddict(): - # Create OrderedDict with cycle - a = OrderedDict() - a['key'] = a - - cases = [ - (OrderedDict(), 'OrderedDict()'), - (OrderedDict((i, i) for i in range(1000, 1010)), - 'OrderedDict([(1000, 1000),\n' - ' (1001, 1001),\n' - ' (1002, 1002),\n' - ' (1003, 1003),\n' - ' (1004, 1004),\n' - ' (1005, 1005),\n' - ' (1006, 1006),\n' - ' (1007, 1007),\n' - ' (1008, 1008),\n' - ' (1009, 1009)])'), - (a, "OrderedDict([('key', OrderedDict(...))])"), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - - -# TODO : pytest.mark.parametrise once nose is gone. -def test_collections_deque(): - # Create deque with cycle - a = deque() - a.append(a) - - cases = [ - (deque(), 'deque([])'), - (deque(i for i in range(1000, 1020)), - 'deque([1000,\n' - ' 1001,\n' - ' 1002,\n' - ' 1003,\n' - ' 1004,\n' - ' 1005,\n' - ' 1006,\n' - ' 1007,\n' - ' 1008,\n' - ' 1009,\n' - ' 1010,\n' - ' 1011,\n' - ' 1012,\n' - ' 1013,\n' - ' 1014,\n' - ' 1015,\n' - ' 1016,\n' - ' 1017,\n' - ' 1018,\n' - ' 1019])'), - (a, 'deque([deque(...)])'), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - - -# TODO : pytest.mark.parametrise once nose is gone. -def test_collections_counter(): - class MyCounter(Counter): - pass - cases = [ - (Counter(), 'Counter()'), - (Counter(a=1), "Counter({'a': 1})"), - (MyCounter(a=1), "MyCounter({'a': 1})"), - (Counter(a=1, c=22), "Counter({'c': 22, 'a': 1})"), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - -# TODO : pytest.mark.parametrise once nose is gone. -def test_mappingproxy(): - MP = types.MappingProxyType - underlying_dict = {} - mp_recursive = MP(underlying_dict) - underlying_dict[2] = mp_recursive - underlying_dict[3] = underlying_dict - - cases = [ - (MP({}), "mappingproxy({})"), - (MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"), - (MP({k: k.upper() for k in string.ascii_lowercase}), - "mappingproxy({'a': 'A',\n" - " 'b': 'B',\n" - " 'c': 'C',\n" - " 'd': 'D',\n" - " 'e': 'E',\n" - " 'f': 'F',\n" - " 'g': 'G',\n" - " 'h': 'H',\n" - " 'i': 'I',\n" - " 'j': 'J',\n" - " 'k': 'K',\n" - " 'l': 'L',\n" - " 'm': 'M',\n" - " 'n': 'N',\n" - " 'o': 'O',\n" - " 'p': 'P',\n" - " 'q': 'Q',\n" - " 'r': 'R',\n" - " 's': 'S',\n" - " 't': 'T',\n" - " 'u': 'U',\n" - " 'v': 'V',\n" - " 'w': 'W',\n" - " 'x': 'X',\n" - " 'y': 'Y',\n" - " 'z': 'Z'})"), - (mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"), - (underlying_dict, - "{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - - -# TODO : pytest.mark.parametrise once nose is gone. -def test_simplenamespace(): - SN = types.SimpleNamespace - - sn_recursive = SN() - sn_recursive.first = sn_recursive - sn_recursive.second = sn_recursive - cases = [ - (SN(), "namespace()"), - (SN(x=SN()), "namespace(x=namespace())"), - (SN(a_long_name=[SN(s=string.ascii_lowercase)]*3, a_short_name=None), - "namespace(a_long_name=[namespace(s='abcdefghijklmnopqrstuvwxyz'),\n" - " namespace(s='abcdefghijklmnopqrstuvwxyz'),\n" - " namespace(s='abcdefghijklmnopqrstuvwxyz')],\n" - " a_short_name=None)"), - (sn_recursive, "namespace(first=namespace(...), second=namespace(...))"), - ] - for obj, expected in cases: - assert pretty.pretty(obj) == expected - - -def test_pretty_environ(): - dict_repr = pretty.pretty(dict(os.environ)) - # reindent to align with 'environ' prefix - dict_indented = dict_repr.replace('\n', '\n' + (' ' * len('environ'))) - env_repr = pretty.pretty(os.environ) - assert env_repr == "environ" + dict_indented - - -def test_function_pretty(): - "Test pretty print of function" - # posixpath is a pure python module, its interface is consistent - # across Python distributions - import posixpath - - assert pretty.pretty(posixpath.join) == "" - - # custom function - def meaning_of_life(question=None): - if question: - return 42 - return "Don't panic" - - assert "meaning_of_life(question=None)" in pretty.pretty(meaning_of_life) - - -class OrderedCounter(Counter, OrderedDict): - 'Counter that remembers the order elements are first encountered' - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, OrderedDict(self)) - - def __reduce__(self): - return self.__class__, (OrderedDict(self),) - -class MySet(set): # Override repr of a basic type - def __repr__(self): - return 'mine' - -def test_custom_repr(): - """A custom repr should override a pretty printer for a parent type""" - oc = OrderedCounter("abracadabra") - assert "OrderedCounter(OrderedDict" in pretty.pretty(oc) - - assert pretty.pretty(MySet()) == "mine" diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/click/formatting.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/click/formatting.py deleted file mode 100644 index ddd2a2f825f206164eb9efb0a5c41528365beb85..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/click/formatting.py +++ /dev/null @@ -1,301 +0,0 @@ -import typing as t -from contextlib import contextmanager -from gettext import gettext as _ - -from ._compat import term_len -from .parser import split_opt - -# Can force a width. This is used by the test system -FORCED_WIDTH: t.Optional[int] = None - - -def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]: - widths: t.Dict[int, int] = {} - - for row in rows: - for idx, col in enumerate(row): - widths[idx] = max(widths.get(idx, 0), term_len(col)) - - return tuple(y for x, y in sorted(widths.items())) - - -def iter_rows( - rows: t.Iterable[t.Tuple[str, str]], col_count: int -) -> t.Iterator[t.Tuple[str, ...]]: - for row in rows: - yield row + ("",) * (col_count - len(row)) - - -def wrap_text( - text: str, - width: int = 78, - initial_indent: str = "", - subsequent_indent: str = "", - preserve_paragraphs: bool = False, -) -> str: - """A helper function that intelligently wraps text. By default, it - assumes that it operates on a single paragraph of text but if the - `preserve_paragraphs` parameter is provided it will intelligently - handle paragraphs (defined by two empty lines). - - If paragraphs are handled, a paragraph can be prefixed with an empty - line containing the ``\\b`` character (``\\x08``) to indicate that - no rewrapping should happen in that block. - - :param text: the text that should be rewrapped. - :param width: the maximum width for the text. - :param initial_indent: the initial indent that should be placed on the - first line as a string. - :param subsequent_indent: the indent string that should be placed on - each consecutive line. - :param preserve_paragraphs: if this flag is set then the wrapping will - intelligently handle paragraphs. - """ - from ._textwrap import TextWrapper - - text = text.expandtabs() - wrapper = TextWrapper( - width, - initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - replace_whitespace=False, - ) - if not preserve_paragraphs: - return wrapper.fill(text) - - p: t.List[t.Tuple[int, bool, str]] = [] - buf: t.List[str] = [] - indent = None - - def _flush_par() -> None: - if not buf: - return - if buf[0].strip() == "\b": - p.append((indent or 0, True, "\n".join(buf[1:]))) - else: - p.append((indent or 0, False, " ".join(buf))) - del buf[:] - - for line in text.splitlines(): - if not line: - _flush_par() - indent = None - else: - if indent is None: - orig_len = term_len(line) - line = line.lstrip() - indent = orig_len - term_len(line) - buf.append(line) - _flush_par() - - rv = [] - for indent, raw, text in p: - with wrapper.extra_indent(" " * indent): - if raw: - rv.append(wrapper.indent_only(text)) - else: - rv.append(wrapper.fill(text)) - - return "\n\n".join(rv) - - -class HelpFormatter: - """This class helps with formatting text-based help pages. It's - usually just needed for very special internal cases, but it's also - exposed so that developers can write their own fancy outputs. - - At present, it always writes into memory. - - :param indent_increment: the additional increment for each level. - :param width: the width for the text. This defaults to the terminal - width clamped to a maximum of 78. - """ - - def __init__( - self, - indent_increment: int = 2, - width: t.Optional[int] = None, - max_width: t.Optional[int] = None, - ) -> None: - import shutil - - self.indent_increment = indent_increment - if max_width is None: - max_width = 80 - if width is None: - width = FORCED_WIDTH - if width is None: - width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50) - self.width = width - self.current_indent = 0 - self.buffer: t.List[str] = [] - - def write(self, string: str) -> None: - """Writes a unicode string into the internal buffer.""" - self.buffer.append(string) - - def indent(self) -> None: - """Increases the indentation.""" - self.current_indent += self.indent_increment - - def dedent(self) -> None: - """Decreases the indentation.""" - self.current_indent -= self.indent_increment - - def write_usage( - self, prog: str, args: str = "", prefix: t.Optional[str] = None - ) -> None: - """Writes a usage line into the buffer. - - :param prog: the program name. - :param args: whitespace separated list of arguments. - :param prefix: The prefix for the first line. Defaults to - ``"Usage: "``. - """ - if prefix is None: - prefix = f"{_('Usage:')} " - - usage_prefix = f"{prefix:>{self.current_indent}}{prog} " - text_width = self.width - self.current_indent - - if text_width >= (term_len(usage_prefix) + 20): - # The arguments will fit to the right of the prefix. - indent = " " * term_len(usage_prefix) - self.write( - wrap_text( - args, - text_width, - initial_indent=usage_prefix, - subsequent_indent=indent, - ) - ) - else: - # The prefix is too long, put the arguments on the next line. - self.write(usage_prefix) - self.write("\n") - indent = " " * (max(self.current_indent, term_len(prefix)) + 4) - self.write( - wrap_text( - args, text_width, initial_indent=indent, subsequent_indent=indent - ) - ) - - self.write("\n") - - def write_heading(self, heading: str) -> None: - """Writes a heading into the buffer.""" - self.write(f"{'':>{self.current_indent}}{heading}:\n") - - def write_paragraph(self) -> None: - """Writes a paragraph into the buffer.""" - if self.buffer: - self.write("\n") - - def write_text(self, text: str) -> None: - """Writes re-indented text into the buffer. This rewraps and - preserves paragraphs. - """ - indent = " " * self.current_indent - self.write( - wrap_text( - text, - self.width, - initial_indent=indent, - subsequent_indent=indent, - preserve_paragraphs=True, - ) - ) - self.write("\n") - - def write_dl( - self, - rows: t.Sequence[t.Tuple[str, str]], - col_max: int = 30, - col_spacing: int = 2, - ) -> None: - """Writes a definition list into the buffer. This is how options - and commands are usually formatted. - - :param rows: a list of two item tuples for the terms and values. - :param col_max: the maximum width of the first column. - :param col_spacing: the number of spaces between the first and - second column. - """ - rows = list(rows) - widths = measure_table(rows) - if len(widths) != 2: - raise TypeError("Expected two columns for definition list") - - first_col = min(widths[0], col_max) + col_spacing - - for first, second in iter_rows(rows, len(widths)): - self.write(f"{'':>{self.current_indent}}{first}") - if not second: - self.write("\n") - continue - if term_len(first) <= first_col - col_spacing: - self.write(" " * (first_col - term_len(first))) - else: - self.write("\n") - self.write(" " * (first_col + self.current_indent)) - - text_width = max(self.width - first_col - 2, 10) - wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) - lines = wrapped_text.splitlines() - - if lines: - self.write(f"{lines[0]}\n") - - for line in lines[1:]: - self.write(f"{'':>{first_col + self.current_indent}}{line}\n") - else: - self.write("\n") - - @contextmanager - def section(self, name: str) -> t.Iterator[None]: - """Helpful context manager that writes a paragraph, a heading, - and the indents. - - :param name: the section name that is written as heading. - """ - self.write_paragraph() - self.write_heading(name) - self.indent() - try: - yield - finally: - self.dedent() - - @contextmanager - def indentation(self) -> t.Iterator[None]: - """A context manager that increases the indentation.""" - self.indent() - try: - yield - finally: - self.dedent() - - def getvalue(self) -> str: - """Returns the buffer contents.""" - return "".join(self.buffer) - - -def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]: - """Given a list of option strings this joins them in the most appropriate - way and returns them in the form ``(formatted_string, - any_prefix_is_slash)`` where the second item in the tuple is a flag that - indicates if any of the option prefixes was a slash. - """ - rv = [] - any_prefix_is_slash = False - - for opt in options: - prefix = split_opt(opt)[0] - - if prefix == "/": - any_prefix_is_slash = True - - rv.append((len(prefix), opt)) - - rv.sort(key=lambda x: x[0]) - return ", ".join(x[1] for x in rv), any_prefix_is_slash diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/ref_utils.hpp b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/ref_utils.hpp deleted file mode 100644 index d1fa07d55d287db577c6604eaa1e79bff3f5fd67..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/ref_utils.hpp +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef _REF_UTILS_HPP_ -#define _REF_UTILS_HPP_ - - -PyObject* GetPyObjectPointerNoDebugInfo(bool isDebug, PyObject* object) { - if (object != nullptr && isDebug) { - // debug builds have 2 extra pointers at the front that we don't care about - return (PyObject*)((size_t*)object + 2); - } - return object; -} - -void DecRef(PyObject* object, bool isDebug) { - auto noDebug = GetPyObjectPointerNoDebugInfo(isDebug, object); - - if (noDebug != nullptr && --noDebug->ob_refcnt == 0) { - ((PyTypeObject*)GetPyObjectPointerNoDebugInfo(isDebug, noDebug->ob_type))->tp_dealloc(object); - } -} - -void IncRef(PyObject* object) { - object->ob_refcnt++; -} - -class PyObjectHolder { -private: - PyObject* _object; -public: - bool _isDebug; - - PyObjectHolder(bool isDebug) { - _object = nullptr; - _isDebug = isDebug; - } - - PyObjectHolder(bool isDebug, PyObject *object) { - _object = object; - _isDebug = isDebug; - }; - - PyObjectHolder(bool isDebug, PyObject *object, bool addRef) { - _object = object; - _isDebug = isDebug; - if (_object != nullptr && addRef) { - GetPyObjectPointerNoDebugInfo(_isDebug, _object)->ob_refcnt++; - } - }; - - PyObject* ToPython() { - return _object; - } - - ~PyObjectHolder() { - DecRef(_object, _isDebug); - } - - PyObject* operator* () { - return GetPyObjectPointerNoDebugInfo(_isDebug, _object); - } -}; - - -#endif //_REF_UTILS_HPP_ \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/__init__.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/__init__.py deleted file mode 100644 index 259f669b78bd05815cb8d3351fd6c5fc9a1b85a1..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from . import transforms # isort:skip - -from .build import ( - build_batch_data_loader, - build_detection_test_loader, - build_detection_train_loader, - get_detection_dataset_dicts, - load_proposals_into_dataset, - print_instances_class_histogram, -) -from .catalog import DatasetCatalog, MetadataCatalog, Metadata -from .common import DatasetFromList, MapDataset, ToIterableDataset -from .dataset_mapper import DatasetMapper - -# ensure the builtin datasets are registered -from . import datasets, samplers # isort:skip - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/oneformer_model.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/oneformer_model.py deleted file mode 100644 index 8bb18a85a8ecdfa6a7bef912bd6eb038e79e5251..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/oneformer_model.py +++ /dev/null @@ -1,470 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/maskformer_model.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -from typing import Tuple - -import torch -from torch import nn -from torch.nn import functional as F - -from annotator.oneformer.detectron2.config import configurable -from annotator.oneformer.detectron2.data import MetadataCatalog -from annotator.oneformer.detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head -from annotator.oneformer.detectron2.modeling.backbone import Backbone -from annotator.oneformer.detectron2.modeling.postprocessing import sem_seg_postprocess -from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, BitMasks -from annotator.oneformer.detectron2.utils.memory import retry_if_cuda_oom - -from .modeling.matcher import HungarianMatcher -from einops import rearrange -from .modeling.transformer_decoder.text_transformer import TextTransformer -from .modeling.transformer_decoder.oneformer_transformer_decoder import MLP -from annotator.oneformer.oneformer.data.tokenizer import SimpleTokenizer, Tokenize - -@META_ARCH_REGISTRY.register() -class OneFormer(nn.Module): - """ - Main class for mask classification semantic segmentation architectures. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - sem_seg_head: nn.Module, - task_mlp: nn.Module, - text_encoder: nn.Module, - text_projector: nn.Module, - prompt_ctx: nn.Embedding, - num_queries: int, - object_mask_threshold: float, - overlap_threshold: float, - metadata, - size_divisibility: int, - sem_seg_postprocess_before_inference: bool, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - # inference - semantic_on: bool, - panoptic_on: bool, - instance_on: bool, - detection_on: bool, - test_topk_per_image: int, - task_seq_len: int, - max_seq_len: int, - is_demo: bool, - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - sem_seg_head: a module that predicts semantic segmentation from backbone features - criterion: a module that defines the loss - num_queries: int, number of queries - object_mask_threshold: float, threshold to filter query based on classification score - for panoptic segmentation inference - overlap_threshold: overlap threshold used in general inference for panoptic segmentation - metadata: dataset meta, get `thing` and `stuff` category names for panoptic - segmentation inference - size_divisibility: Some backbones require the input height and width to be divisible by a - specific integer. We can use this to override such requirement. - sem_seg_postprocess_before_inference: whether to resize the prediction back - to original input size before semantic segmentation inference or after. - For high-resolution dataset like Mapillary, resizing predictions before - inference will cause OOM error. - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - semantic_on: bool, whether to output semantic segmentation prediction - instance_on: bool, whether to output instance segmentation prediction - panoptic_on: bool, whether to output panoptic segmentation prediction - test_topk_per_image: int, instance segmentation parameter, keep topk instances per image - """ - super().__init__() - self.backbone = backbone - self.sem_seg_head = sem_seg_head - self.task_mlp = task_mlp - self.text_encoder = text_encoder - self.text_projector = text_projector - self.prompt_ctx = prompt_ctx - self.num_queries = num_queries - self.overlap_threshold = overlap_threshold - self.object_mask_threshold = object_mask_threshold - self.metadata = metadata - if size_divisibility < 0: - # use backbone size_divisibility if not set - size_divisibility = self.backbone.size_divisibility - self.size_divisibility = size_divisibility - self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference - self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) - - # additional args - self.semantic_on = semantic_on - self.instance_on = instance_on - self.panoptic_on = panoptic_on - self.detection_on = detection_on - self.test_topk_per_image = test_topk_per_image - - self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len) - self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) - self.is_demo = is_demo - - self.thing_indices = [k for k in self.metadata.thing_dataset_id_to_contiguous_id.keys()] - - if not self.semantic_on: - assert self.sem_seg_postprocess_before_inference - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) - - if cfg.MODEL.IS_TRAIN: - text_encoder = TextTransformer(context_length=cfg.MODEL.TEXT_ENCODER.CONTEXT_LENGTH, - width=cfg.MODEL.TEXT_ENCODER.WIDTH, - layers=cfg.MODEL.TEXT_ENCODER.NUM_LAYERS, - vocab_size=cfg.MODEL.TEXT_ENCODER.VOCAB_SIZE) - text_projector = MLP(text_encoder.width, cfg.MODEL.ONE_FORMER.HIDDEN_DIM, - cfg.MODEL.ONE_FORMER.HIDDEN_DIM, cfg.MODEL.TEXT_ENCODER.PROJ_NUM_LAYERS) - if cfg.MODEL.TEXT_ENCODER.N_CTX > 0: - prompt_ctx = nn.Embedding(cfg.MODEL.TEXT_ENCODER.N_CTX, cfg.MODEL.TEXT_ENCODER.WIDTH) - else: - prompt_ctx = None - else: - text_encoder = None - text_projector = None - prompt_ctx = None - - task_mlp = MLP(cfg.INPUT.TASK_SEQ_LEN, cfg.MODEL.ONE_FORMER.HIDDEN_DIM, - cfg.MODEL.ONE_FORMER.HIDDEN_DIM, 2) - - # Loss parameters: - deep_supervision = cfg.MODEL.ONE_FORMER.DEEP_SUPERVISION - no_object_weight = cfg.MODEL.ONE_FORMER.NO_OBJECT_WEIGHT - - # loss weights - class_weight = cfg.MODEL.ONE_FORMER.CLASS_WEIGHT - dice_weight = cfg.MODEL.ONE_FORMER.DICE_WEIGHT - mask_weight = cfg.MODEL.ONE_FORMER.MASK_WEIGHT - contrastive_weight = cfg.MODEL.ONE_FORMER.CONTRASTIVE_WEIGHT - - # building criterion - matcher = HungarianMatcher( - cost_class=class_weight, - cost_mask=mask_weight, - cost_dice=dice_weight, - num_points=cfg.MODEL.ONE_FORMER.TRAIN_NUM_POINTS, - ) - - weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, - "loss_dice": dice_weight, "loss_contrastive": contrastive_weight} - - - if deep_supervision: - dec_layers = cfg.MODEL.ONE_FORMER.DEC_LAYERS - aux_weight_dict = {} - for i in range(dec_layers - 1): - aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - - losses = ["labels", "masks", "contrastive"] - - return { - "backbone": backbone, - "sem_seg_head": sem_seg_head, - "task_mlp": task_mlp, - "prompt_ctx": prompt_ctx, - "text_encoder": text_encoder, - "text_projector": text_projector, - "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES, - "object_mask_threshold": cfg.MODEL.TEST.OBJECT_MASK_THRESHOLD, - "overlap_threshold": cfg.MODEL.TEST.OVERLAP_THRESHOLD, - "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), - "size_divisibility": cfg.MODEL.ONE_FORMER.SIZE_DIVISIBILITY, - "sem_seg_postprocess_before_inference": ( - cfg.MODEL.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE - or cfg.MODEL.TEST.PANOPTIC_ON - or cfg.MODEL.TEST.INSTANCE_ON - ), - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - # inference - "semantic_on": cfg.MODEL.TEST.SEMANTIC_ON, - "instance_on": cfg.MODEL.TEST.INSTANCE_ON, - "panoptic_on": cfg.MODEL.TEST.PANOPTIC_ON, - "detection_on": cfg.MODEL.TEST.DETECTION_ON, - "test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, - "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, - "max_seq_len": cfg.INPUT.MAX_SEQ_LEN, - "is_demo": cfg.MODEL.IS_DEMO, - } - - @property - def device(self): - return self.pixel_mean.device - - def encode_text(self, text): - assert text.ndim in [2, 3], text.ndim - b = text.shape[0] - squeeze_dim = False - num_text = 1 - if text.ndim == 3: - num_text = text.shape[1] - text = rearrange(text, 'b n l -> (b n) l', n=num_text) - squeeze_dim = True - - # [B, C] - x = self.text_encoder(text) - - text_x = self.text_projector(x) - - if squeeze_dim: - text_x = rearrange(text_x, '(b n) c -> b n c', n=num_text) - if self.prompt_ctx is not None: - text_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_x.shape[0], 1, 1) - text_x = torch.cat([text_x, text_ctx], dim=1) - - return {"texts": text_x} - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - * "image": Tensor, image in (C, H, W) format. - * "instances": per-region ground truth - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model (may be different - from input resolution), used in inference. - Returns: - list[dict]: - each dict has the results for one image. The dict contains the following keys: - * "sem_seg": - A Tensor that represents the - per-pixel segmentation prediced by the head. - The prediction has shape KxHxW that represents the logits of - each class for each pixel. - * "panoptic_seg": - A tuple that represent panoptic output - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. - segments_info (list[dict]): Describe each segment in `panoptic_seg`. - Each dict contains keys "id", "category_id", "isthing". - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.size_divisibility) - - tasks = torch.cat([self.task_tokenizer(x["task"]).to(self.device).unsqueeze(0) for x in batched_inputs], dim=0) - tasks = self.task_mlp(tasks.float()) - - features = self.backbone(images.tensor) - outputs = self.sem_seg_head(features, tasks) - - if self.training: - texts = torch.cat([self.text_tokenizer(x["text"]).to(self.device).unsqueeze(0) for x in batched_inputs], dim=0) - texts_x = self.encode_text(texts) - - outputs = {**outputs, **texts_x} - - # mask classification target - if "instances" in batched_inputs[0]: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - targets = self.prepare_targets(gt_instances, images) - else: - targets = None - - # bipartite matching-based loss - losses = self.criterion(outputs, targets) - - for k in list(losses.keys()): - if k in self.criterion.weight_dict: - losses[k] *= self.criterion.weight_dict[k] - else: - # remove this loss if not specified in `weight_dict` - losses.pop(k) - return losses - else: - mask_cls_results = outputs["pred_logits"] - mask_pred_results = outputs["pred_masks"] - # upsample masks - mask_pred_results = F.interpolate( - mask_pred_results, - size=(images.tensor.shape[-2], images.tensor.shape[-1]), - mode="bilinear", - align_corners=False, - ) - - del outputs - - processed_results = [] - for i, data in enumerate(zip( - mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes - )): - mask_cls_result, mask_pred_result, input_per_image, image_size = data - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - processed_results.append({}) - - if self.sem_seg_postprocess_before_inference: - mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)( - mask_pred_result, image_size, height, width - ) - mask_cls_result = mask_cls_result.to(mask_pred_result) - - # semantic segmentation inference - if self.semantic_on: - r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result) - if not self.sem_seg_postprocess_before_inference: - r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width) - processed_results[-1]["sem_seg"] = r - - # panoptic segmentation inference - if self.panoptic_on: - panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["panoptic_seg"] = panoptic_r - - # instance segmentation inference - if self.instance_on: - instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["instances"] = instance_r - - if self.detection_on: - bbox_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["box_instances"] = bbox_r - - return processed_results - - def prepare_targets(self, targets, images): - h_pad, w_pad = images.tensor.shape[-2:] - new_targets = [] - for targets_per_image in targets: - # pad gt - gt_masks = targets_per_image.gt_masks - padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device) - padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks - new_targets.append( - { - "labels": targets_per_image.gt_classes, - "masks": padded_masks, - } - ) - return new_targets - - def semantic_inference(self, mask_cls, mask_pred): - mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] - mask_pred = mask_pred.sigmoid() - semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) - return semseg - - def panoptic_inference(self, mask_cls, mask_pred): - scores, labels = F.softmax(mask_cls, dim=-1).max(-1) - mask_pred = mask_pred.sigmoid() - - keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) - cur_scores = scores[keep] - cur_classes = labels[keep] - cur_masks = mask_pred[keep] - cur_mask_cls = mask_cls[keep] - cur_mask_cls = cur_mask_cls[:, :-1] - - cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks - - h, w = cur_masks.shape[-2:] - panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device) - segments_info = [] - - current_segment_id = 0 - - if cur_masks.shape[0] == 0: - # We didn't detect any mask :( - return panoptic_seg, segments_info - else: - # take argmax - cur_mask_ids = cur_prob_masks.argmax(0) - stuff_memory_list = {} - for k in range(cur_classes.shape[0]): - pred_class = cur_classes[k].item() - isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values() - mask_area = (cur_mask_ids == k).sum().item() - original_area = (cur_masks[k] >= 0.5).sum().item() - mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) - - if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: - if mask_area / original_area < self.overlap_threshold: - continue - - # merge stuff regions - if not isthing: - if int(pred_class) in stuff_memory_list.keys(): - panoptic_seg[mask] = stuff_memory_list[int(pred_class)] - continue - else: - stuff_memory_list[int(pred_class)] = current_segment_id + 1 - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - - segments_info.append( - { - "id": current_segment_id, - "isthing": bool(isthing), - "category_id": int(pred_class), - } - ) - - return panoptic_seg, segments_info - - def instance_inference(self, mask_cls, mask_pred): - # mask_pred is already processed to have the same shape as original input - image_size = mask_pred.shape[-2:] - - # [Q, K] - scores = F.softmax(mask_cls, dim=-1)[:, :-1] - labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) - - # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) - scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False) - labels_per_image = labels[topk_indices] - - topk_indices = topk_indices // self.sem_seg_head.num_classes - # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) - mask_pred = mask_pred[topk_indices] - - # Only consider scores with confidence over [self.object_mask_threshold] for demo - if self.is_demo: - keep = scores_per_image > self.object_mask_threshold - scores_per_image = scores_per_image[keep] - labels_per_image = labels_per_image[keep] - mask_pred = mask_pred[keep] - - # if this is panoptic segmentation, we only keep the "thing" classes - if self.panoptic_on: - keep = torch.zeros_like(scores_per_image).bool() - for i, lab in enumerate(labels_per_image): - keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values() - - scores_per_image = scores_per_image[keep] - labels_per_image = labels_per_image[keep] - mask_pred = mask_pred[keep] - - if 'ade20k' in self.metadata.name: - for i in range(labels_per_image.shape[0]): - labels_per_image[i] = self.thing_indices.index(labels_per_image[i].item()) - - result = Instances(image_size) - # mask (before sigmoid) - result.pred_masks = (mask_pred > 0).float() - if self.detection_on: - # Uncomment the following to get boxes from masks (this is slow) - result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes() - else: - result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4)) - - # calculate average mask prob - mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6) - result.scores = scores_per_image * mask_scores_per_image - result.pred_classes = labels_per_image - return result \ No newline at end of file diff --git a/spaces/Surn/UnlimitedMusicGen/audiocraft/modules/rope.py b/spaces/Surn/UnlimitedMusicGen/audiocraft/modules/rope.py deleted file mode 100644 index 4b8c70b9aba28eeb53d12ddc3de8852492847808..0000000000000000000000000000000000000000 --- a/spaces/Surn/UnlimitedMusicGen/audiocraft/modules/rope.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from torch import nn -import torch - - -class XPos(nn.Module): - """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1). - This applies an exponential decay to the RoPE rotation matrix. - - Args: - dim (int): Embedding dimension. - smoothing (float): Smoothing factor applied to the decay rates. - base_scale (int): Base decay rate, given in terms of scaling time. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512, - device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - self.base_scale = base_scale - - half_dim = dim // 2 - adim = torch.arange(half_dim, device=device, dtype=dtype) - decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing) - self.register_buffer("decay_rates", decay_rates) - self.decay: tp.Optional[torch.Tensor] = None - - def get_decay(self, start: int, end: int): - """Create complex decay tensor, cache values for fast computation. - """ - if self.decay is None or end > self.decay.shape[0]: - assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype) - power = idx / self.base_scale - scale = self.decay_rates ** power.unsqueeze(-1) - self.decay = torch.polar(scale, torch.zeros_like(scale)) - return self.decay[start:end] # [T, C/2] - - -class RotaryEmbedding(nn.Module): - """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864). - - Args: - dim (int): Embedding dimension (twice the number of frequencies). - max_period (float): Maximum period of the rotation frequencies. - xpos (bool): Use xPos, applies an exponential decay to rotation matrix. - scale (float): Scale of positional embedding, set to 0 to deactivate. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False, - scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - self.scale = scale - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - - adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)] - frequencies = 1.0 / (max_period ** (adim / dim)) - self.register_buffer("frequencies", frequencies) - self.rotation: tp.Optional[torch.Tensor] = None - - self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None - - def get_rotation(self, start: int, end: int): - """Create complex rotation tensor, cache values for fast computation. - """ - if self.rotation is None or end > self.rotation.shape[0]: - assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype) - angles = torch.outer(idx, self.frequencies) - self.rotation = torch.polar(torch.ones_like(angles), angles) - return self.rotation[start:end] - - def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False): - """Apply rope rotation to query or key tensor. - """ - T = x.shape[1] - rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2) - - if self.xpos: - decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2) - else: - decay = 1.0 - - if invert_decay: - decay = decay ** -1 - - x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2)) - scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale) - x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2) - - return x_out.type_as(x) - - def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0): - """ Apply rope rotation to both query and key tensors. - Supports streaming mode, in which query and key are not expected to have the same shape. - In streaming mode, key will be of legnth [P + C] with P the cached past timesteps, but - query will be [C] (typically C == 1). - - Args: - query (torch.Tensor): Query to rotate. - key (torch.Tensor): Key to rotate. - start (int): Start index of the sequence for time offset. - """ - query_timesteps = query.shape[1] - key_timesteps = key.shape[1] - streaming_offset = key_timesteps - query_timesteps - - query_out = self.rotate(query, start + streaming_offset) - key_out = self.rotate(key, start, invert_decay=True) - - return query_out, key_out diff --git a/spaces/SusiePHaltmann/HaltmannDiffusionv0/README.md b/spaces/SusiePHaltmann/HaltmannDiffusionv0/README.md deleted file mode 100644 index 4dc457528b6ca6c82994bfe0d8459747a65d1390..0000000000000000000000000000000000000000 --- a/spaces/SusiePHaltmann/HaltmannDiffusionv0/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: HaltmannDiffusionv0 -emoji: 🦀 -colorFrom: purple -colorTo: pink -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py deleted file mode 100644 index 65c043c87eff27e9405316fdbc0c695f2b347441..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py +++ /dev/null @@ -1,224 +0,0 @@ -import email.message -import importlib.metadata -import os -import pathlib -import zipfile -from typing import ( - Collection, - Dict, - Iterable, - Iterator, - Mapping, - Optional, - Sequence, - cast, -) - -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.exceptions import InvalidWheel, UnsupportedWheel -from pip._internal.metadata.base import ( - BaseDistribution, - BaseEntryPoint, - DistributionVersion, - InfoPath, - Wheel, -) -from pip._internal.utils.misc import normalize_path -from pip._internal.utils.packaging import safe_extra -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file - -from ._compat import BasePath, get_dist_name - - -class WheelDistribution(importlib.metadata.Distribution): - """An ``importlib.metadata.Distribution`` read from a wheel. - - Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``, - its implementation is too "lazy" for pip's needs (we can't keep the ZipFile - handle open for the entire lifetime of the distribution object). - - This implementation eagerly reads the entire metadata directory into the - memory instead, and operates from that. - """ - - def __init__( - self, - files: Mapping[pathlib.PurePosixPath, bytes], - info_location: pathlib.PurePosixPath, - ) -> None: - self._files = files - self.info_location = info_location - - @classmethod - def from_zipfile( - cls, - zf: zipfile.ZipFile, - name: str, - location: str, - ) -> "WheelDistribution": - info_dir, _ = parse_wheel(zf, name) - paths = ( - (name, pathlib.PurePosixPath(name.split("/", 1)[-1])) - for name in zf.namelist() - if name.startswith(f"{info_dir}/") - ) - files = { - relpath: read_wheel_metadata_file(zf, fullpath) - for fullpath, relpath in paths - } - info_location = pathlib.PurePosixPath(location, info_dir) - return cls(files, info_location) - - def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]: - # Only allow iterating through the metadata directory. - if pathlib.PurePosixPath(str(path)) in self._files: - return iter(self._files) - raise FileNotFoundError(path) - - def read_text(self, filename: str) -> Optional[str]: - try: - data = self._files[pathlib.PurePosixPath(filename)] - except KeyError: - return None - try: - text = data.decode("utf-8") - except UnicodeDecodeError as e: - wheel = self.info_location.parent - error = f"Error decoding metadata for {wheel}: {e} in {filename} file" - raise UnsupportedWheel(error) - return text - - -class Distribution(BaseDistribution): - def __init__( - self, - dist: importlib.metadata.Distribution, - info_location: Optional[BasePath], - installed_location: Optional[BasePath], - ) -> None: - self._dist = dist - self._info_location = info_location - self._installed_location = installed_location - - @classmethod - def from_directory(cls, directory: str) -> BaseDistribution: - info_location = pathlib.Path(directory) - dist = importlib.metadata.Distribution.at(info_location) - return cls(dist, info_location, info_location.parent) - - @classmethod - def from_metadata_file_contents( - cls, - metadata_contents: bytes, - filename: str, - project_name: str, - ) -> BaseDistribution: - # Generate temp dir to contain the metadata file, and write the file contents. - temp_dir = pathlib.Path( - TempDirectory(kind="metadata", globally_managed=True).path - ) - metadata_path = temp_dir / "METADATA" - metadata_path.write_bytes(metadata_contents) - # Construct dist pointing to the newly created directory. - dist = importlib.metadata.Distribution.at(metadata_path.parent) - return cls(dist, metadata_path.parent, None) - - @classmethod - def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: - try: - with wheel.as_zipfile() as zf: - dist = WheelDistribution.from_zipfile(zf, name, wheel.location) - except zipfile.BadZipFile as e: - raise InvalidWheel(wheel.location, name) from e - except UnsupportedWheel as e: - raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") - return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location)) - - @property - def location(self) -> Optional[str]: - if self._info_location is None: - return None - return str(self._info_location.parent) - - @property - def info_location(self) -> Optional[str]: - if self._info_location is None: - return None - return str(self._info_location) - - @property - def installed_location(self) -> Optional[str]: - if self._installed_location is None: - return None - return normalize_path(str(self._installed_location)) - - def _get_dist_name_from_location(self) -> Optional[str]: - """Try to get the name from the metadata directory name. - - This is much faster than reading metadata. - """ - if self._info_location is None: - return None - stem, suffix = os.path.splitext(self._info_location.name) - if suffix not in (".dist-info", ".egg-info"): - return None - return stem.split("-", 1)[0] - - @property - def canonical_name(self) -> NormalizedName: - name = self._get_dist_name_from_location() or get_dist_name(self._dist) - return canonicalize_name(name) - - @property - def version(self) -> DistributionVersion: - return parse_version(self._dist.version) - - def is_file(self, path: InfoPath) -> bool: - return self._dist.read_text(str(path)) is not None - - def iter_distutils_script_names(self) -> Iterator[str]: - # A distutils installation is always "flat" (not in e.g. egg form), so - # if this distribution's info location is NOT a pathlib.Path (but e.g. - # zipfile.Path), it can never contain any distutils scripts. - if not isinstance(self._info_location, pathlib.Path): - return - for child in self._info_location.joinpath("scripts").iterdir(): - yield child.name - - def read_text(self, path: InfoPath) -> str: - content = self._dist.read_text(str(path)) - if content is None: - raise FileNotFoundError(path) - return content - - def iter_entry_points(self) -> Iterable[BaseEntryPoint]: - # importlib.metadata's EntryPoint structure sasitfies BaseEntryPoint. - return self._dist.entry_points - - def _metadata_impl(self) -> email.message.Message: - # From Python 3.10+, importlib.metadata declares PackageMetadata as the - # return type. This protocol is unfortunately a disaster now and misses - # a ton of fields that we need, including get() and get_payload(). We - # rely on the implementation that the object is actually a Message now, - # until upstream can improve the protocol. (python/cpython#94952) - return cast(email.message.Message, self._dist.metadata) - - def iter_provided_extras(self) -> Iterable[str]: - return ( - safe_extra(extra) for extra in self.metadata.get_all("Provides-Extra", []) - ) - - def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: - contexts: Sequence[Dict[str, str]] = [{"extra": safe_extra(e)} for e in extras] - for req_string in self.metadata.get_all("Requires-Dist", []): - req = Requirement(req_string) - if not req.marker: - yield req - elif not extras and req.marker.evaluate({"extra": ""}): - yield req - elif any(req.marker.evaluate(context) for context in contexts): - yield req diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/packaging/_manylinux.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/packaging/_manylinux.py deleted file mode 100644 index 4c379aa6f69ff56c8f19612002c6e3e939ea6012..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/packaging/_manylinux.py +++ /dev/null @@ -1,301 +0,0 @@ -import collections -import functools -import os -import re -import struct -import sys -import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf() -> bool: - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_abi(arch: str) -> bool: - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) - - -class _GLibCVersion(NamedTuple): - major: int - minor: int - - -def _glibc_version_string_confstr() -> Optional[str]: - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") - assert version_string is not None - _, version = version_string.split() - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes() -> Optional[str]: - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - process_namespace = ctypes.CDLL(None) - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str: str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _glibc_version_string() -> Optional[str]: - """Returns glibc version string, or None if not using glibc.""" - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _parse_glibc_version(version_str: str) -> Tuple[int, int]: - """Parse glibc version. - - We use a regexp instead of str.split because we want to discard any - random junk that might come after the minor version -- this might happen - in patched/forked versions of glibc (e.g. Linaro's version of glibc - uses version strings like "2.20-2014.11"). See gh-3588. - """ - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return int(m.group("major")), int(m.group("minor")) - - -@functools.lru_cache() -def _get_glibc_version() -> Tuple[int, int]: - version_str = _glibc_version_string() - if version_str is None: - return (-1, -1) - return _parse_glibc_version(version_str) - - -# From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: - sys_glibc = _get_glibc_version() - if sys_glibc < version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - return True - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible(version[0], version[1], arch) - if result is not None: - return bool(result) - return True - if version == _GLibCVersion(2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if version == _GLibCVersion(2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if version == _GLibCVersion(2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", -} - - -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): - return - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = _GLibCVersion(2, 4) - current_glibc = _GLibCVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_minor = _LAST_GLIBC_MINOR[glibc_major] - glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py deleted file mode 100644 index fe7e8e261c1ab1bb1636bd7a245068d64e632306..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. -from collections import namedtuple - - -class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): - """ - A simple structure that contains basic shape specification about a tensor. - It is often used as the auxiliary inputs/outputs of models, - to complement the lack of shape inference ability among pytorch modules. - - Attributes: - channels: - height: - width: - stride: - """ - - def __new__(cls, channels=None, height=None, width=None, stride=None): - return super().__new__(cls, channels, height, width, stride) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py deleted file mode 100644 index b31b2d395dffb9d3694239d1aa73615899975f4e..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from __future__ import division -from typing import Any, List, Tuple -import torch -from torch import device -from torch.nn import functional as F - -from detectron2.layers.wrappers import shapes_to_tensor - - -class ImageList(object): - """ - Structure that holds a list of images (of possibly - varying sizes) as a single tensor. - This works by padding the images to the same size. - The original sizes of each image is stored in `image_sizes`. - - Attributes: - image_sizes (list[tuple[int, int]]): each tuple is (h, w). - During tracing, it becomes list[Tensor] instead. - """ - - def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): - """ - Arguments: - tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 - image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can - be smaller than (H, W) due to padding. - """ - self.tensor = tensor - self.image_sizes = image_sizes - - def __len__(self) -> int: - return len(self.image_sizes) - - def __getitem__(self, idx) -> torch.Tensor: - """ - Access the individual image in its original size. - - Args: - idx: int or slice - - Returns: - Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 - """ - size = self.image_sizes[idx] - return self.tensor[idx, ..., : size[0], : size[1]] - - @torch.jit.unused - def to(self, *args: Any, **kwargs: Any) -> "ImageList": - cast_tensor = self.tensor.to(*args, **kwargs) - return ImageList(cast_tensor, self.image_sizes) - - @property - def device(self) -> device: - return self.tensor.device - - @staticmethod - def from_tensors( - tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 - ) -> "ImageList": - """ - Args: - tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or - (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded - to the same shape with `pad_value`. - size_divisibility (int): If `size_divisibility > 0`, add padding to ensure - the common height and width is divisible by `size_divisibility`. - This depends on the model and many models need a divisibility of 32. - pad_value (float): value to pad - - Returns: - an `ImageList`. - """ - assert len(tensors) > 0 - assert isinstance(tensors, (tuple, list)) - for t in tensors: - assert isinstance(t, torch.Tensor), type(t) - assert t.shape[:-2] == tensors[0].shape[:-2], t.shape - - image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] - image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] - max_size = torch.stack(image_sizes_tensor).max(0).values - - if size_divisibility > 1: - stride = size_divisibility - # the last two dims are H,W, both subject to divisibility requirement - max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride - - # handle weirdness of scripting and tracing ... - if torch.jit.is_scripting(): - max_size: List[int] = max_size.to(dtype=torch.long).tolist() - else: - if torch.jit.is_tracing(): - image_sizes = image_sizes_tensor - - if len(tensors) == 1: - # This seems slightly (2%) faster. - # TODO: check whether it's faster for multiple images as well - image_size = image_sizes[0] - padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] - batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) - else: - # max_size can be a tensor in tracing mode, therefore convert to list - batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) - batched_imgs = tensors[0].new_full(batch_shape, pad_value) - for img, pad_img in zip(tensors, batched_imgs): - pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) - - return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/spaces/ThirdEyeData/Text-Summarization/app.py b/spaces/ThirdEyeData/Text-Summarization/app.py deleted file mode 100644 index 489db87054a5199082a650fe6f2bce5a0cf42e20..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Text-Summarization/app.py +++ /dev/null @@ -1,95 +0,0 @@ -from heapq import nlargest -import spacy -from spacy.lang.en.stop_words import STOP_WORDS -from string import punctuation -import gradio as gr - - - -# Stopwords -stopwords = list(STOP_WORDS) -nlp = spacy.load('en_core_web_sm') -punctuation = punctuation + '\n' -import spacy -from spacy.lang.en.stop_words import STOP_WORDS -from string import punctuation - -# Prediction -def prediction(text): - doc = nlp(text) - len1 = len(text) - tokens = [token.text for token in doc] - word_frequencies = {} - for word in doc: - if word.text.lower() not in stopwords: - if word.text.lower() not in punctuation: - if word.text not in word_frequencies.keys(): - word_frequencies[word.text] = 1 - else: - word_frequencies[word.text] += 1 - max_frequency = max(word_frequencies.values()) - for word in word_frequencies.keys(): - word_frequencies[word] = word_frequencies[word]/max_frequency - sentence_tokens = [sent for sent in doc.sents] - sentence_scores = {} - for sent in sentence_tokens: - for word in sent: - if word.text.lower() in word_frequencies.keys(): - if sent not in sentence_scores.keys(): - sentence_scores[sent] = word_frequencies[word.text.lower()] - else: - sentence_scores[sent] += word_frequencies[word.text.lower()] - select_length = int(len(sentence_tokens)*0.3) - summary = nlargest(select_length, sentence_scores, key = sentence_scores.get) - org_len = len(text.split(' ')) - summary = (str(summary[0])) - sum_len = len(summary.split(' ')) - return summary,org_len,sum_len - - - - -EXAMPLES = [[""" - Maria Sharapova has basically no friends as tennis players on the WTA Tour. The Russian player has no problems in openly speaking about it and in a recent interview she said: 'I don't really hide any feelings too much. - I think everyone knows this is my job here. When I'm on the courts or when I'm on the court playing, I'm a competitor and I want to beat every single person whether they're in the locker room or across the net. - So I'm not the one to strike up a conversation about the weather and know that in the next few minutes I have to go and try to win a tennis match. - I'm a pretty competitive girl. I say my hellos, but I'm not sending any players flowers as well. Uhm, I'm not really friendly or close to many players. - I have not a lot of friends away from the courts.' When she said she is not really close to a lot of players, is that something strategic that she is doing? Is it different on the men's tour than the women's tour? 'No, not at all. - I think just because you're in the same sport doesn't mean that you have to be friends with everyone just because you're categorized, you're a tennis player, so you're going to get along with tennis players. - I think every person has different interests. I have friends that have completely different jobs and interests, and I've met them in very different parts of my life. - I think everyone just thinks because we're tennis players we should be the greatest of friends. But ultimately tennis is just a very small part of what we do. - There are so many other things that we're interested in, that we do.' - """],["""The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, - and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. - During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, - a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. - It was the first structure to reach a height of 300 metres. - Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). - Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."""]] - -DESCRIPTION = """We are bombarded with lakhs of characters of text and information and not so much time. -Text summarization reads the whole documents, based on frequency of words and sentences it understands the -important sentences and gives us the summary of text. -We have used a pre-trained model which is a small English pipeline trained on written web text like news, comments for this demo. -This can be used in organizations that deal with lots of text documents like a law firm where the documents will be summarized in -one to two paragraph as per our needs.""" -outputs = [ - gr.Textbox(lines =5,label = "Summarization of text"), - gr.Number(label="Word Count of given Text"), - gr.Number(label="Word Count of Summarized Text") -] - -demo_app = gr.Interface( - fn=prediction, - inputs=gr.Textbox(lines =10,label = " Enter the Text", max_lines = 20), - outputs= outputs, - title = "Text Summarization", - examples = EXAMPLES, - description = DESCRIPTION, - #cache_example = True, - #live = True, - theme = 'huggingface' -) -#if __name__ == "__main__": -demo_app.launch() -#demo_app.launch(debug=True, enable_queue = True) diff --git a/spaces/Trangluna2002/AI_Cover_Gen/src/infer_pack/transforms.py b/spaces/Trangluna2002/AI_Cover_Gen/src/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/Trangluna2002/AI_Cover_Gen/src/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Vedarutvija/ZebraGPT/app.py b/spaces/Vedarutvija/ZebraGPT/app.py deleted file mode 100644 index e14a8795e941622107eb28c7feacbc3bb5f773f9..0000000000000000000000000000000000000000 --- a/spaces/Vedarutvija/ZebraGPT/app.py +++ /dev/null @@ -1,43 +0,0 @@ -# Let's get pipelines from transformers -import gradio as gr -import requests, uuid, json, time, openai - - -#credentials -openai.api_key = "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" -model = "gpt-3.5-turbo" - - - - - -#code -def Mutilingual(prompt, ordshnv): - - DEFAULT_SYSTEM_PROMPT = "You are Zebra GPT, an AI assistant developed by Veda. Your purpose is to provide intelligent and helpful assistance to users. Analyze user queries comprehensively and respond with accurate and concise answers. Focus on understanding the user's needs and offer solutions in a clear and informative manner. If additional information is required, ask polite and clarifying questions. Your goal is to assist users effectively, demonstrating the capabilities of Veda's advanced AI" - response = openai.ChatCompletion.create( - model= model, - messages=[ - {f"role": "system", "content": DEFAULT_SYSTEM_PROMPT}, - {f"role": "user", "content": prompt}, - ], - ) - return response.choices[0].message.content - -css = """ - #mkd { - height: 500px; - overflow: auto; - border: 1px solid #ccc; - } -""" - -with gr.Blocks(css=css) as demo: - gr.HTML("

    Open AI Multilinugual

    ") - gr.HTML("

    Etown AI Assistance model. 💬

    ") - gr.ChatInterface( - Mutilingual, - examples=[["What is the quantum computers ?"], ["what is large language models"]] - ) - -demo.queue().launch(debug=True) \ No newline at end of file diff --git a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/util/visualizer.py b/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/util/visualizer.py deleted file mode 100644 index 7a1b7b101e9b73f75f9136bc67f2063c7c1cf1c1..0000000000000000000000000000000000000000 --- a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/util/visualizer.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@File : visualizer.py -@Time : 2022/04/05 11:39:33 -@Author : Shilong Liu -@Contact : slongliu86@gmail.com -""" - -import datetime -import os - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from matplotlib import transforms -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon -from pycocotools import mask as maskUtils - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class ColorMap: - def __init__(self, basergb=[255, 255, 0]): - self.basergb = np.array(basergb) - - def __call__(self, attnmap): - # attnmap: h, w. np.uint8. - # return: h, w, 4. np.uint8. - assert attnmap.dtype == np.uint8 - h, w = attnmap.shape - res = self.basergb.copy() - res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3 - attn1 = attnmap.copy()[..., None] # h, w, 1 - res = np.concatenate((res, attn1), axis=-1).astype(np.uint8) - return res - - -def rainbow_text(x, y, ls, lc, **kw): - """ - Take a list of strings ``ls`` and colors ``lc`` and place them next to each - other, with text ls[i] being shown in color lc[i]. - - This example shows how to do both vertical and horizontal text, and will - pass all keyword arguments to plt.text, so you can set the font size, - family, etc. - """ - t = plt.gca().transData - fig = plt.gcf() - plt.show() - - # horizontal version - for s, c in zip(ls, lc): - text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw) - text.draw(fig.canvas.get_renderer()) - ex = text.get_window_extent() - t = transforms.offset_copy(text._transform, x=ex.width, units="dots") - - # #vertical version - # for s,c in zip(ls,lc): - # text = plt.text(x,y," "+s+" ",color=c, transform=t, - # rotation=90,va='bottom',ha='center',**kw) - # text.draw(fig.canvas.get_renderer()) - # ex = text.get_window_extent() - # t = transforms.offset_copy(text._transform, y=ex.height, units='dots') - - -class COCOVisualizer: - def __init__(self, coco=None, tokenlizer=None) -> None: - self.coco = coco - - def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"): - """ - img: tensor(3, H, W) - tgt: make sure they are all on cpu. - must have items: 'image_id', 'boxes', 'size' - """ - plt.figure(dpi=dpi) - plt.rcParams["font.size"] = "5" - ax = plt.gca() - img = renorm(img).permute(1, 2, 0) - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - ax.imshow(img) - - self.addtgt(tgt) - - if tgt is None: - image_id = 0 - elif "image_id" not in tgt: - image_id = 0 - else: - image_id = tgt["image_id"] - - if caption is None: - savename = "{}/{}-{}.png".format( - savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - else: - savename = "{}/{}-{}-{}.png".format( - savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - print("savename: {}".format(savename)) - os.makedirs(os.path.dirname(savename), exist_ok=True) - plt.savefig(savename) - plt.close() - - def addtgt(self, tgt): - """ """ - if tgt is None or not "boxes" in tgt: - ax = plt.gca() - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - - ax.set_axis_off() - return - - ax = plt.gca() - H, W = tgt["size"] - numbox = tgt["boxes"].shape[0] - - color = [] - polygons = [] - boxes = [] - for box in tgt["boxes"].cpu(): - unnormbbox = box * torch.Tensor([W, H, W, H]) - unnormbbox[:2] -= unnormbbox[2:] / 2 - [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist() - boxes.append([bbox_x, bbox_y, bbox_w, bbox_h]) - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - color.append(c) - - p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1) - ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - - if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0: - assert ( - len(tgt["strings_positive"]) == numbox - ), f"{len(tgt['strings_positive'])} = {numbox}, " - for idx, strlist in enumerate(tgt["strings_positive"]): - cate_id = int(tgt["labels"][idx]) - _string = str(cate_id) + ":" + " ".join(strlist) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "box_label" in tgt: - assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, " - for idx, bl in enumerate(tgt["box_label"]): - _string = str(bl) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - # plt.figure() - # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(), - # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black']) - - if "attn" in tgt: - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - if isinstance(tgt["attn"], tuple): - tgt["attn"] = [tgt["attn"]] - for item in tgt["attn"]: - attn_map, basergb = item - attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3) - attn_map = (attn_map * 255).astype(np.uint8) - cm = ColorMap(basergb) - heatmap = cm(attn_map) - ax.imshow(heatmap) - ax.set_axis_off() - - def showAnns(self, anns, draw_bbox=False): - """ - Display the specified annotations. - :param anns (array of object): annotations to display - :return: None - """ - if len(anns) == 0: - return 0 - if "segmentation" in anns[0] or "keypoints" in anns[0]: - datasetType = "instances" - elif "caption" in anns[0]: - datasetType = "captions" - else: - raise Exception("datasetType not supported") - if datasetType == "instances": - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in anns: - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - if "segmentation" in ann: - if type(ann["segmentation"]) == list: - # polygon - for seg in ann["segmentation"]: - poly = np.array(seg).reshape((int(len(seg) / 2), 2)) - polygons.append(Polygon(poly)) - color.append(c) - else: - # mask - t = self.imgs[ann["image_id"]] - if type(ann["segmentation"]["counts"]) == list: - rle = maskUtils.frPyObjects( - [ann["segmentation"]], t["height"], t["width"] - ) - else: - rle = [ann["segmentation"]] - m = maskUtils.decode(rle) - img = np.ones((m.shape[0], m.shape[1], 3)) - if ann["iscrowd"] == 1: - color_mask = np.array([2.0, 166.0, 101.0]) / 255 - if ann["iscrowd"] == 0: - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:, :, i] = color_mask[i] - ax.imshow(np.dstack((img, m * 0.5))) - if "keypoints" in ann and type(ann["keypoints"]) == list: - # turn skeleton into zero-based index - sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 - kp = np.array(ann["keypoints"]) - x = kp[0::3] - y = kp[1::3] - v = kp[2::3] - for sk in sks: - if np.all(v[sk] > 0): - plt.plot(x[sk], y[sk], linewidth=3, color=c) - plt.plot( - x[v > 0], - y[v > 0], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor="k", - markeredgewidth=2, - ) - plt.plot( - x[v > 1], - y[v > 1], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor=c, - markeredgewidth=2, - ) - - if draw_bbox: - [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"] - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(c) - - # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) - # ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - elif datasetType == "captions": - for ann in anns: - print(ann["caption"]) diff --git a/spaces/Vorkrath/CarperAI-diff-codegen-6b-v2/app.py b/spaces/Vorkrath/CarperAI-diff-codegen-6b-v2/app.py deleted file mode 100644 index a57edeee668a58afe8f2c8770e32d42d10f55842..0000000000000000000000000000000000000000 --- a/spaces/Vorkrath/CarperAI-diff-codegen-6b-v2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/CarperAI/diff-codegen-6b-v2").launch() \ No newline at end of file diff --git a/spaces/WangQvQ/BEiT_Gradio/app.py b/spaces/WangQvQ/BEiT_Gradio/app.py deleted file mode 100644 index 2607ad47a037e8764f66821bb3e6dc478cb5bf6a..0000000000000000000000000000000000000000 --- a/spaces/WangQvQ/BEiT_Gradio/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import gradio as gr -from transformers import BeitFeatureExtractor, BeitForImageClassification -from PIL import Image -import requests -import numpy as np - -# Load the pre-trained BEiT model and feature extractor -feature_extractor = BeitFeatureExtractor.from_pretrained('microsoft/beit-large-patch16-512') -model = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-512') - - -def classify_image(input_image): - image = Image.fromarray(input_image.astype('uint8')) - inputs = feature_extractor(images=image, return_tensors="pt") - outputs = model(**inputs) - logits = outputs.logits - predicted_class_idx = logits.argmax(-1).item() - predicted_class = model.config.id2label[predicted_class_idx] - return {"Predicted Class": predicted_class} - - -iface = gr.Interface( - fn=classify_image, - inputs=gr.inputs.Image(type="numpy"), # Specify input type as numpy array - outputs="json", - live=True, - title="BEiT Classification", - description="Upload an image and you will get a description" -) - -if __name__ == "__main__": - iface.launch() diff --git a/spaces/Wazzzabeee/image-video-colorization/models/deep_colorization/colorizers/eccv16.py b/spaces/Wazzzabeee/image-video-colorization/models/deep_colorization/colorizers/eccv16.py deleted file mode 100644 index 896ed477c20934dc86a6088117eed63af773ace8..0000000000000000000000000000000000000000 --- a/spaces/Wazzzabeee/image-video-colorization/models/deep_colorization/colorizers/eccv16.py +++ /dev/null @@ -1,105 +0,0 @@ - -import torch -import torch.nn as nn -import numpy as np -from IPython import embed - -from .base_color import * - -class ECCVGenerator(BaseColor): - def __init__(self, norm_layer=nn.BatchNorm2d): - super(ECCVGenerator, self).__init__() - - model1=[nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=True),] - model1+=[nn.ReLU(True),] - model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=True),] - model1+=[nn.ReLU(True),] - model1+=[norm_layer(64),] - - model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model2+=[nn.ReLU(True),] - model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=True),] - model2+=[nn.ReLU(True),] - model2+=[norm_layer(128),] - - model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[norm_layer(256),] - - model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[norm_layer(512),] - - model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[norm_layer(512),] - - model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[norm_layer(512),] - - model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[norm_layer(512),] - - model8=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - - model8+=[nn.Conv2d(256, 313, kernel_size=1, stride=1, padding=0, bias=True),] - - self.model1 = nn.Sequential(*model1) - self.model2 = nn.Sequential(*model2) - self.model3 = nn.Sequential(*model3) - self.model4 = nn.Sequential(*model4) - self.model5 = nn.Sequential(*model5) - self.model6 = nn.Sequential(*model6) - self.model7 = nn.Sequential(*model7) - self.model8 = nn.Sequential(*model8) - - self.softmax = nn.Softmax(dim=1) - self.model_out = nn.Conv2d(313, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=False) - self.upsample4 = nn.Upsample(scale_factor=4, mode='bilinear') - - def forward(self, input_l): - conv1_2 = self.model1(self.normalize_l(input_l)) - conv2_2 = self.model2(conv1_2) - conv3_3 = self.model3(conv2_2) - conv4_3 = self.model4(conv3_3) - conv5_3 = self.model5(conv4_3) - conv6_3 = self.model6(conv5_3) - conv7_3 = self.model7(conv6_3) - conv8_3 = self.model8(conv7_3) - out_reg = self.model_out(self.softmax(conv8_3)) - - return self.unnormalize_ab(self.upsample4(out_reg)) - -def eccv16(pretrained=True): - model = ECCVGenerator() - if(pretrained): - import torch.utils.model_zoo as model_zoo - model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/colorization_release_v2-9b330a0b.pth',map_location='cpu',check_hash=True)) - return model diff --git a/spaces/Xenova/next-example-app/_next/static/chunks/596-abb71f9569186505.js b/spaces/Xenova/next-example-app/_next/static/chunks/596-abb71f9569186505.js deleted file mode 100644 index 365bcbe468c04be51e0258ab7eb5856e41e7e8b9..0000000000000000000000000000000000000000 --- a/spaces/Xenova/next-example-app/_next/static/chunks/596-abb71f9569186505.js +++ /dev/null @@ -1,25 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[596],{2335:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var e=/\((.*)\)/.exec(this.toString());return e?e[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(e,t){return t=this.concat.apply([],this),e>1&&t.some(Array.isArray)?t.flat(e-1):t},Array.prototype.flatMap=function(e,t){return this.map(e,t).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(e){if("function"!=typeof e)return this.then(e,e);var t=this.constructor||Promise;return this.then(function(r){return t.resolve(e()).then(function(){return r})},function(r){return t.resolve(e()).then(function(){throw r})})}),Object.fromEntries||(Object.fromEntries=function(e){return Array.from(e).reduce(function(e,t){return e[t[0]]=t[1],e},{})})},6711:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"addBasePath",{enumerable:!0,get:function(){return o}});let n=r(7253),u=r(6070);function o(e,t){return(0,u.normalizePathTrailingSlash)((0,n.addPathPrefix)(e,""))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4843:function(e,t){"use strict";function r(e){var t,r;t=self.__next_s,r=()=>{e()},t&&t.length?t.reduce((e,t)=>{let[r,n]=t;return e.then(()=>new Promise((e,t)=>{let u=document.createElement("script");if(n)for(let e in n)"children"!==e&&u.setAttribute(e,n[e]);r?(u.src=r,u.onload=()=>e(),u.onerror=t):n&&(u.innerHTML=n.children,setTimeout(e)),document.head.appendChild(u)}))},Promise.resolve()).catch(e=>{console.error(e)}).then(()=>{r()}):r()}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"appBootstrap",{enumerable:!0,get:function(){return r}}),window.next={version:"13.4.12",appDir:!0},("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4039:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"callServer",{enumerable:!0,get:function(){return u}});let n=r(7948);async function u(e,t){let r=(0,n.getServerActionDispatcher)();if(!r)throw Error("Invariant: missing action dispatcher.");return new Promise((n,u)=>{r({actionId:e,actionArgs:t,resolve:n,reject:u})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},1615:function(e,t,r){"use strict";let n,u;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"hydrate",{enumerable:!0,get:function(){return N}});let o=r(1024),l=r(8533);r(2335);let a=o._(r(4040)),i=l._(r(2265)),c=r(6671),s=r(1330);r(6656);let f=o._(r(5152)),d=r(4039),p=r(8747),h=window.console.error;window.console.error=function(){for(var e=arguments.length,t=Array(e),r=0;r{if((0,p.isNextRouterError)(e.error)){e.preventDefault();return}});let _=e=>t=>e(t)+"",y=r.u,b={};r.u=_(e=>encodeURI(b[e]||y(e)));let v=r.k;r.k=_(v);let m=r.miniCssF;r.miniCssF=_(m),self.__next_require__=r,self.__next_chunk_load__=e=>{if(!e)return Promise.resolve();let[t,n]=e.split(":");return b[t]=n,r.e(t)};let g=document,O=()=>{let{pathname:e,search:t}=location;return e+t},P=new TextEncoder,E=!1,j=!1;function R(e){if(0===e[0])n=[];else{if(!n)throw Error("Unexpected server data: missing bootstrap script.");u?u.enqueue(P.encode(e[1])):n.push(e[1])}}let S=function(){u&&!j&&(u.close(),j=!0,n=void 0),E=!0};"loading"===document.readyState?document.addEventListener("DOMContentLoaded",S,!1):S();let T=self.__next_f=self.__next_f||[];T.forEach(R),T.push=R;let w=new Map;function M(e){let{cacheKey:t}=e;i.default.useEffect(()=>{w.delete(t)});let r=function(e){let t=w.get(e);if(t)return t;let r=new ReadableStream({start(e){n&&(n.forEach(t=>{e.enqueue(P.encode(t))}),E&&!j&&(e.close(),j=!0,n=void 0)),u=e}}),o=(0,c.createFromReadableStream)(r,{callServer:d.callServer});return w.set(e,o),o}(t),o=(0,i.use)(r);return o}let C=i.default.Fragment;function x(e){let{children:t}=e,[r,n]=i.default.useState(!1);return t}function A(e){return i.default.createElement(M,{...e,cacheKey:O()})}function N(){let e=i.default.createElement(C,null,i.default.createElement(s.HeadManagerContext.Provider,{value:{appDir:!0}},i.default.createElement(x,null,i.default.createElement(A,null)))),t={onRecoverableError:f.default},r="__next_error__"===document.documentElement.id;r?a.default.createRoot(g,t).render(e):i.default.startTransition(()=>a.default.hydrateRoot(g,e,t))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},2916:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0});let n=r(4843);(0,n.appBootstrap)(()=>{r(7948),r(7767);let{hydrate:e}=r(1615);e()}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},1768:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"AppRouterAnnouncer",{enumerable:!0,get:function(){return l}});let n=r(2265),u=r(4887),o="next-route-announcer";function l(e){let{tree:t}=e,[r,l]=(0,n.useState)(null);(0,n.useEffect)(()=>{let e=function(){var e;let t=document.getElementsByName(o)[0];if(null==t?void 0:null==(e=t.shadowRoot)?void 0:e.childNodes[0])return t.shadowRoot.childNodes[0];{let e=document.createElement(o);e.style.cssText="position:absolute";let t=document.createElement("div");t.ariaLive="assertive",t.id="__next-route-announcer__",t.role="alert",t.style.cssText="position:absolute;border:0;height:1px;margin:-1px;padding:0;width:1px;clip:rect(0 0 0 0);overflow:hidden;white-space:nowrap;word-wrap:normal";let r=e.attachShadow({mode:"open"});return r.appendChild(t),document.body.appendChild(e),t}}();return l(e),()=>{let e=document.getElementsByTagName(o)[0];(null==e?void 0:e.isConnected)&&document.body.removeChild(e)}},[]);let[a,i]=(0,n.useState)(""),c=(0,n.useRef)();return(0,n.useEffect)(()=>{let e="";if(document.title)e=document.title;else{let t=document.querySelector("h1");t&&(e=t.innerText||t.textContent||"")}void 0!==c.current&&i(e),c.current=e},[t]),r?(0,u.createPortal)(a,r):null}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4509:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{RSC:function(){return r},ACTION:function(){return n},NEXT_ROUTER_STATE_TREE:function(){return u},NEXT_ROUTER_PREFETCH:function(){return o},NEXT_URL:function(){return l},FETCH_CACHE_HEADER:function(){return a},RSC_CONTENT_TYPE_HEADER:function(){return i},RSC_VARY_HEADER:function(){return c},FLIGHT_PARAMETERS:function(){return s},NEXT_RSC_UNION_QUERY:function(){return f}});let r="RSC",n="Next-Action",u="Next-Router-State-Tree",o="Next-Router-Prefetch",l="Next-Url",a="x-vercel-sc-headers",i="text/x-component",c=r+", "+u+", "+o,s=[[r],[u],[o]],f="_rsc";("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7948:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{getServerActionDispatcher:function(){return P},urlToUrlWithoutFlightMarker:function(){return E},default:function(){return w}});let n=r(8533),u=n._(r(2265)),o=r(6656),l=r(7538),a=r(5685),i=r(9330),c=r(6208),s=r(9865),f=r(6628),d=r(4444),p=r(3738),h=r(6711),_=r(1768),y=r(935),b=r(1487),v=r(8987),m=r(4509),g=new Map,O=null;function P(){return O}function E(e){let t=new URL(e,location.origin);if(t.searchParams.delete(m.NEXT_RSC_UNION_QUERY),t.pathname.endsWith(".txt")){let{pathname:e}=t,r=e.endsWith("/index.txt")?10:4;t.pathname=e.slice(0,-r)}return t}function j(e){return e.origin!==window.location.origin}function R(e){let{tree:t,pushRef:r,canonicalUrl:n,sync:o}=e;return(0,u.useInsertionEffect)(()=>{let e={__NA:!0,tree:t};r.pendingPush&&(0,i.createHrefFromUrl)(new URL(window.location.href))!==n?(r.pendingPush=!1,window.history.pushState(e,"",n)):window.history.replaceState(e,"",n),o()},[t,r,n,o]),null}let S=()=>({status:o.CacheStates.LAZY_INITIALIZED,data:null,subTreeData:null,parallelRoutes:new Map});function T(e){let{buildId:t,initialHead:r,initialTree:n,initialCanonicalUrl:i,children:f,assetPrefix:m}=e,P=(0,u.useMemo)(()=>(0,d.createInitialRouterState)({buildId:t,children:f,initialCanonicalUrl:i,initialTree:n,initialParallelRoutes:g,isServer:!1,location:window.location,initialHead:r}),[t,f,i,n,r]),[{tree:E,cache:T,prefetchCache:w,pushRef:M,focusAndScrollRef:C,canonicalUrl:x,nextUrl:A},N,I]=(0,s.useReducerWithReduxDevtools)(l.reducer,P);(0,u.useEffect)(()=>{g=null},[]);let{searchParams:k,pathname:D}=(0,u.useMemo)(()=>{let e=new URL(x,window.location.href);return{searchParams:e.searchParams,pathname:e.pathname}},[x]),F=(0,u.useCallback)((e,t,r)=>{(0,u.startTransition)(()=>{N({type:a.ACTION_SERVER_PATCH,flightData:t,previousTree:e,overrideCanonicalUrl:r,cache:S(),mutable:{}})})},[N]),U=(0,u.useCallback)((e,t,r,n)=>{let u=new URL((0,h.addBasePath)(e),location.href);return N({type:a.ACTION_NAVIGATE,url:u,isExternalUrl:j(u),locationSearch:location.search,forceOptimisticNavigation:r,shouldScroll:null==n||n,navigateType:t,cache:S(),mutable:{}})},[N]);!function(e,t,r){let n=(0,u.useCallback)(n=>{(0,u.startTransition)(()=>{t({...n,type:a.ACTION_SERVER_ACTION,mutable:{},navigate:r,changeByServerResponse:e})})},[e,t,r]);O=n}(F,N,U);let L=(0,u.useMemo)(()=>{let e={back:()=>window.history.back(),forward:()=>window.history.forward(),prefetch:(e,t)=>{if((0,p.isBot)(window.navigator.userAgent))return;let r=new URL((0,h.addBasePath)(e),location.href);j(r)||(0,u.startTransition)(()=>{var e;N({type:a.ACTION_PREFETCH,url:r,kind:null!=(e=null==t?void 0:t.kind)?e:a.PrefetchKind.FULL})})},replace:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var r;U(e,"replace",!!t.forceOptimisticNavigation,null==(r=t.scroll)||r)})},push:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var r;U(e,"push",!!t.forceOptimisticNavigation,null==(r=t.scroll)||r)})},refresh:()=>{(0,u.startTransition)(()=>{N({type:a.ACTION_REFRESH,cache:S(),mutable:{},origin:window.location.origin})})},fastRefresh:()=>{throw Error("fastRefresh can only be used in development mode. Please use refresh instead.")}};return e},[N,U]);if((0,u.useEffect)(()=>{window.next&&(window.next.router=L)},[L]),M.mpaNavigation){let e=window.location;M.pendingPush?e.assign(x):e.replace(x),(0,u.use)((0,v.createInfinitePromise)())}let H=(0,u.useCallback)(e=>{let{state:t}=e;if(t){if(!t.__NA){window.location.reload();return}(0,u.startTransition)(()=>{N({type:a.ACTION_RESTORE,url:new URL(window.location.href),tree:t.tree})})}},[N]);(0,u.useEffect)(()=>(window.addEventListener("popstate",H),()=>{window.removeEventListener("popstate",H)}),[H]);let $=(0,u.useMemo)(()=>(0,b.findHeadInCache)(T,E[1]),[T,E]),W=u.default.createElement(y.RedirectBoundary,null,$,T.subTreeData,u.default.createElement(_.AppRouterAnnouncer,{tree:E}));return u.default.createElement(u.default.Fragment,null,u.default.createElement(R,{tree:E,pushRef:M,canonicalUrl:x,sync:I}),u.default.createElement(c.PathnameContext.Provider,{value:D},u.default.createElement(c.SearchParamsContext.Provider,{value:k},u.default.createElement(o.GlobalLayoutRouterContext.Provider,{value:{buildId:t,changeByServerResponse:F,tree:E,focusAndScrollRef:C,nextUrl:A}},u.default.createElement(o.AppRouterContext.Provider,{value:L},u.default.createElement(o.LayoutRouterContext.Provider,{value:{childNodes:T.parallelRoutes,tree:E,url:x}},W))))))}function w(e){let{globalErrorComponent:t,...r}=e;return u.default.createElement(f.ErrorBoundary,{errorComponent:t},u.default.createElement(T,r))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},1253:function(e,t,r){"use strict";function n(e){}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"clientHookInServerComponentError",{enumerable:!0,get:function(){return n}}),r(1024),r(2265),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6628:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{ErrorBoundaryHandler:function(){return a},GlobalError:function(){return i},ErrorBoundary:function(){return c}});let n=r(1024),u=n._(r(2265)),o=r(8165),l={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},text:{fontSize:"14px",fontWeight:400,lineHeight:"28px",margin:"0 8px"}};class a extends u.default.Component{static getDerivedStateFromError(e){return{error:e}}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.error?{error:null,previousPathname:e.pathname}:{error:t.error,previousPathname:e.pathname}}render(){return this.state.error?u.default.createElement(u.default.Fragment,null,this.props.errorStyles,u.default.createElement(this.props.errorComponent,{error:this.state.error,reset:this.reset})):this.props.children}constructor(e){super(e),this.reset=()=>{this.setState({error:null})},this.state={error:null,previousPathname:this.props.pathname}}}function i(e){let{error:t}=e,r=null==t?void 0:t.digest;return u.default.createElement("html",{id:"__next_error__"},u.default.createElement("head",null),u.default.createElement("body",null,u.default.createElement("div",{style:l.error},u.default.createElement("div",null,u.default.createElement("h2",{style:l.text},"Application error: a "+(r?"server":"client")+"-side exception has occurred (see the "+(r?"server logs":"browser console")+" for more information)."),r?u.default.createElement("p",{style:l.text},"Digest: "+r):null))))}function c(e){let{errorComponent:t,errorStyles:r,children:n}=e,l=(0,o.usePathname)();return t?u.default.createElement(a,{pathname:l,errorComponent:t,errorStyles:r},n):u.default.createElement(u.default.Fragment,null,n)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4124:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{DYNAMIC_ERROR_CODE:function(){return r},DynamicServerError:function(){return n}});let r="DYNAMIC_SERVER_USAGE";class n extends Error{constructor(e){super("Dynamic server usage: "+e),this.digest=r}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8987:function(e,t){"use strict";let r;function n(){return r||(r=new Promise(()=>{})),r}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createInfinitePromise",{enumerable:!0,get:function(){return n}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8747:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isNextRouterError",{enumerable:!0,get:function(){return o}});let n=r(6920),u=r(5800);function o(e){return e&&e.digest&&((0,u.isRedirectError)(e)||(0,n.isNotFoundError)(e))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7767:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return E}});let n=r(1024),u=r(8533),o=u._(r(2265)),l=n._(r(4887)),a=r(6656),i=r(2738),c=r(8987),s=r(6628),f=r(7910),d=r(1067),p=r(935),h=r(6280),_=r(5447),y=r(4818),b=["bottom","height","left","right","top","width","x","y"];function v(e,t){let r=e.getBoundingClientRect();return r.top>=0&&r.top<=t}class m extends o.default.Component{componentDidMount(){this.handlePotentialScroll()}componentDidUpdate(){this.props.focusAndScrollRef.apply&&this.handlePotentialScroll(!0)}render(){return this.props.children}constructor(...e){super(...e),this.handlePotentialScroll=e=>{let{focusAndScrollRef:t,segmentPath:r}=this.props;if(t.apply){var n;if(0!==t.segmentPaths.length&&!t.segmentPaths.some(e=>r.every((t,r)=>(0,f.matchSegment)(t,e[r]))))return;let u=null,o=t.hashFragment;if(o&&(u="top"===o?document.body:null!=(n=document.getElementById(o))?n:document.getElementsByName(o)[0]),u||(u=l.default.findDOMNode(this)),!(u instanceof Element))return;for(;!(u instanceof HTMLElement)||function(e){let t=e.getBoundingClientRect();return b.every(e=>0===t[e])}(u);){if(null===u.nextElementSibling)return;u=u.nextElementSibling}t.apply=!1,t.hashFragment=null,t.segmentPaths=[],(0,d.handleSmoothScroll)(()=>{if(o){u.scrollIntoView();return}let e=document.documentElement,t=e.clientHeight;!v(u,t)&&(e.scrollTop=0,v(u,t)||u.scrollIntoView())},{dontForceLayout:!0,onlyHashChange:!!e}),u.focus()}}}}function g(e){let{segmentPath:t,children:r}=e,n=(0,o.useContext)(a.GlobalLayoutRouterContext);if(!n)throw Error("invariant global layout router not mounted");return o.default.createElement(m,{segmentPath:t,focusAndScrollRef:n.focusAndScrollRef},r)}function O(e){let{parallelRouterKey:t,url:r,childNodes:n,childProp:u,segmentPath:l,tree:s,cacheKey:d}=e,p=(0,o.useContext)(a.GlobalLayoutRouterContext);if(!p)throw Error("invariant global layout router not mounted");let{buildId:h,changeByServerResponse:_,tree:y}=p,b=n.get(d);if(u&&null!==u.current&&(b?b.status===a.CacheStates.LAZY_INITIALIZED&&(b.status=a.CacheStates.READY,b.subTreeData=u.current):(b={status:a.CacheStates.READY,data:null,subTreeData:u.current,parallelRoutes:new Map},n.set(d,b))),!b||b.status===a.CacheStates.LAZY_INITIALIZED){let e=function e(t,r){if(t){let[n,u]=t,o=2===t.length;if((0,f.matchSegment)(r[0],n)&&r[1].hasOwnProperty(u)){if(o){let t=e(void 0,r[1][u]);return[r[0],{...r[1],[u]:[t[0],t[1],t[2],"refetch"]}]}return[r[0],{...r[1],[u]:e(t.slice(2),r[1][u])}]}}return r}(["",...l],y);b={status:a.CacheStates.DATA_FETCH,data:(0,i.fetchServerResponse)(new URL(r,location.origin),e,p.nextUrl,h),subTreeData:null,head:b&&b.status===a.CacheStates.LAZY_INITIALIZED?b.head:void 0,parallelRoutes:b&&b.status===a.CacheStates.LAZY_INITIALIZED?b.parallelRoutes:new Map},n.set(d,b)}if(!b)throw Error("Child node should always exist");if(b.subTreeData&&b.data)throw Error("Child node should not have both subTreeData and data");if(b.data){let[e,t]=(0,o.use)(b.data);b.data=null,setTimeout(()=>{(0,o.startTransition)(()=>{_(y,e,t)})}),(0,o.use)((0,c.createInfinitePromise)())}b.subTreeData||(0,o.use)((0,c.createInfinitePromise)());let v=o.default.createElement(a.LayoutRouterContext.Provider,{value:{tree:s[1][t],childNodes:b.parallelRoutes,url:r}},b.subTreeData);return v}function P(e){let{children:t,loading:r,loadingStyles:n,hasLoading:u}=e;return u?o.default.createElement(o.Suspense,{fallback:o.default.createElement(o.default.Fragment,null,n,r)},t):o.default.createElement(o.default.Fragment,null,t)}function E(e){let{parallelRouterKey:t,segmentPath:r,childProp:n,error:u,errorStyles:l,templateStyles:i,loading:c,loadingStyles:d,hasLoading:b,template:v,notFound:m,notFoundStyles:E,styles:j}=e,R=(0,o.useContext)(a.LayoutRouterContext);if(!R)throw Error("invariant expected layout router to be mounted");let{childNodes:S,tree:T,url:w}=R,M=S.get(t);M||(M=new Map,S.set(t,M));let C=T[1][t][0],x=n.segment,A=(0,_.getSegmentValue)(C),N=[C];return o.default.createElement(o.default.Fragment,null,j,N.map(e=>{let j=(0,f.matchSegment)(e,x),R=(0,_.getSegmentValue)(e),S=(0,y.createRouterCacheKey)(e);return o.default.createElement(a.TemplateContext.Provider,{key:(0,y.createRouterCacheKey)(e,!0),value:o.default.createElement(g,{segmentPath:r},o.default.createElement(s.ErrorBoundary,{errorComponent:u,errorStyles:l},o.default.createElement(P,{hasLoading:b,loading:c,loadingStyles:d},o.default.createElement(h.NotFoundBoundary,{notFound:m,notFoundStyles:E},o.default.createElement(p.RedirectBoundary,null,o.default.createElement(O,{parallelRouterKey:t,url:w,tree:T,childNodes:M,childProp:j?n:null,segmentPath:r,cacheKey:S,isActive:A===R}))))))},i,v)}))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7910:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{matchSegment:function(){return u},canSegmentBeOverridden:function(){return o}});let n=r(5682),u=(e,t)=>"string"==typeof e?"string"==typeof t&&e===t:"string"!=typeof t&&e[0]===t[0]&&e[1]===t[1],o=(e,t)=>{var r;return!Array.isArray(e)&&!!Array.isArray(t)&&(null==(r=(0,n.getSegmentParam)(e))?void 0:r.param)===t[0]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8165:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{ReadonlyURLSearchParams:function(){return p},useSearchParams:function(){return h},usePathname:function(){return _},ServerInsertedHTMLContext:function(){return i.ServerInsertedHTMLContext},useServerInsertedHTML:function(){return i.useServerInsertedHTML},useRouter:function(){return y},useParams:function(){return b},useSelectedLayoutSegments:function(){return v},useSelectedLayoutSegment:function(){return m},redirect:function(){return c.redirect},notFound:function(){return s.notFound}});let n=r(2265),u=r(6656),o=r(6208),l=r(1253),a=r(5447),i=r(8169),c=r(5800),s=r(6920),f=Symbol("internal for urlsearchparams readonly");function d(){return Error("ReadonlyURLSearchParams cannot be modified")}class p{[Symbol.iterator](){return this[f][Symbol.iterator]()}append(){throw d()}delete(){throw d()}set(){throw d()}sort(){throw d()}constructor(e){this[f]=e,this.entries=e.entries.bind(e),this.forEach=e.forEach.bind(e),this.get=e.get.bind(e),this.getAll=e.getAll.bind(e),this.has=e.has.bind(e),this.keys=e.keys.bind(e),this.values=e.values.bind(e),this.toString=e.toString.bind(e)}}function h(){(0,l.clientHookInServerComponentError)("useSearchParams");let e=(0,n.useContext)(o.SearchParamsContext),t=(0,n.useMemo)(()=>e?new p(e):null,[e]);return t}function _(){return(0,l.clientHookInServerComponentError)("usePathname"),(0,n.useContext)(o.PathnameContext)}function y(){(0,l.clientHookInServerComponentError)("useRouter");let e=(0,n.useContext)(u.AppRouterContext);if(null===e)throw Error("invariant expected app router to be mounted");return e}function b(){(0,l.clientHookInServerComponentError)("useParams");let e=(0,n.useContext)(u.GlobalLayoutRouterContext);return e?function e(t,r){void 0===r&&(r={});let n=t[1];for(let t of Object.values(n)){let n=t[0],u=Array.isArray(n),o=u?n[1]:n;if(!o||o.startsWith("__PAGE__"))continue;let l=u&&("c"===n[2]||"oc"===n[2]);l?r[n[0]]=n[1].split("/"):u&&(r[n[0]]=n[1]),r=e(t,r)}return r}(e.tree):null}function v(e){void 0===e&&(e="children"),(0,l.clientHookInServerComponentError)("useSelectedLayoutSegments");let{tree:t}=(0,n.useContext)(u.LayoutRouterContext);return function e(t,r,n,u){let o;if(void 0===n&&(n=!0),void 0===u&&(u=[]),n)o=t[1][r];else{var l;let e=t[1];o=null!=(l=e.children)?l:Object.values(e)[0]}if(!o)return u;let i=o[0],c=(0,a.getSegmentValue)(i);return!c||c.startsWith("__PAGE__")?u:(u.push(c),e(o,r,!1,u))}(t,e)}function m(e){void 0===e&&(e="children"),(0,l.clientHookInServerComponentError)("useSelectedLayoutSegment");let t=v(e);return 0===t.length?null:t[0]}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6280:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"NotFoundBoundary",{enumerable:!0,get:function(){return a}});let n=r(1024),u=n._(r(2265)),o=r(8165);class l extends u.default.Component{static getDerivedStateFromError(e){if((null==e?void 0:e.digest)==="NEXT_NOT_FOUND")return{notFoundTriggered:!0};throw e}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.notFoundTriggered?{notFoundTriggered:!1,previousPathname:e.pathname}:{notFoundTriggered:t.notFoundTriggered,previousPathname:e.pathname}}render(){return this.state.notFoundTriggered?u.default.createElement(u.default.Fragment,null,u.default.createElement("meta",{name:"robots",content:"noindex"}),!1,this.props.notFoundStyles,this.props.notFound):this.props.children}constructor(e){super(e),this.state={notFoundTriggered:!!e.asNotFound,previousPathname:e.pathname}}}function a(e){let{notFound:t,notFoundStyles:r,asNotFound:n,children:a}=e,i=(0,o.usePathname)();return t?u.default.createElement(l,{pathname:i,notFound:t,notFoundStyles:r,asNotFound:n},a):u.default.createElement(u.default.Fragment,null,a)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6920:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{notFound:function(){return n},isNotFoundError:function(){return u}});let r="NEXT_NOT_FOUND";function n(){let e=Error(r);throw e.digest=r,e}function u(e){return(null==e?void 0:e.digest)===r}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7843:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"PromiseQueue",{enumerable:!0,get:function(){return c}});let n=r(4677),u=r(6249);var o=u._("_maxConcurrency"),l=u._("_runningCount"),a=u._("_queue"),i=u._("_processNext");class c{enqueue(e){let t,r;let u=new Promise((e,n)=>{t=e,r=n}),o=async()=>{try{n._(this,l)[l]++;let r=await e();t(r)}catch(e){r(e)}finally{n._(this,l)[l]--,n._(this,i)[i]()}};return n._(this,a)[a].push({promiseFn:u,task:o}),n._(this,i)[i](),u}bump(e){let t=n._(this,a)[a].findIndex(t=>t.promiseFn===e);if(t>-1){let e=n._(this,a)[a].splice(t,1)[0];n._(this,a)[a].unshift(e),n._(this,i)[i](!0)}}constructor(e=5){Object.defineProperty(this,i,{value:s}),Object.defineProperty(this,o,{writable:!0,value:void 0}),Object.defineProperty(this,l,{writable:!0,value:void 0}),Object.defineProperty(this,a,{writable:!0,value:void 0}),n._(this,o)[o]=e,n._(this,l)[l]=0,n._(this,a)[a]=[]}}function s(e){if(void 0===e&&(e=!1),(n._(this,l)[l]0){var t;null==(t=n._(this,a)[a].shift())||t.task()}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},935:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{RedirectErrorBoundary:function(){return i},RedirectBoundary:function(){return c}});let n=r(8533),u=n._(r(2265)),o=r(8165),l=r(5800);function a(e){let{redirect:t,reset:r,redirectType:n}=e,a=(0,o.useRouter)();return(0,u.useEffect)(()=>{u.default.startTransition(()=>{n===l.RedirectType.push?a.push(t,{}):a.replace(t,{}),r()})},[t,n,r,a]),null}class i extends u.default.Component{static getDerivedStateFromError(e){if((0,l.isRedirectError)(e)){let t=(0,l.getURLFromRedirectError)(e),r=(0,l.getRedirectTypeFromError)(e);return{redirect:t,redirectType:r}}throw e}render(){let{redirect:e,redirectType:t}=this.state;return null!==e&&null!==t?u.default.createElement(a,{redirect:e,redirectType:t,reset:()=>this.setState({redirect:null})}):this.props.children}constructor(e){super(e),this.state={redirect:null,redirectType:null}}}function c(e){let{children:t}=e,r=(0,o.useRouter)();return u.default.createElement(i,{router:r},t)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5800:function(e,t,r){"use strict";var n,u;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{RedirectType:function(){return n},getRedirectError:function(){return a},redirect:function(){return i},isRedirectError:function(){return c},getURLFromRedirectError:function(){return s},getRedirectTypeFromError:function(){return f}});let o=r(6170),l="NEXT_REDIRECT";function a(e,t){let r=Error(l);r.digest=l+";"+t+";"+e;let n=o.requestAsyncStorage.getStore();return n&&(r.mutableCookies=n.mutableCookies),r}function i(e,t){throw void 0===t&&(t="replace"),a(e,t)}function c(e){if("string"!=typeof(null==e?void 0:e.digest))return!1;let[t,r,n]=e.digest.split(";",3);return t===l&&("replace"===r||"push"===r)&&"string"==typeof n}function s(e){return c(e)?e.digest.split(";",3)[2]:null}function f(e){if(!c(e))throw Error("Not a redirect error");return e.digest.split(";",3)[1]}(u=n||(n={})).push="push",u.replace="replace",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7920:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return l}});let n=r(8533),u=n._(r(2265)),o=r(6656);function l(){let e=(0,u.useContext)(o.TemplateContext);return u.default.createElement(u.default.Fragment,null,e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7027:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyFlightData",{enumerable:!0,get:function(){return l}});let n=r(6656),u=r(9726),o=r(516);function l(e,t,r,l){void 0===l&&(l=!1);let[a,i,c]=r.slice(-3);return null!==i&&(3===r.length?(t.status=n.CacheStates.READY,t.subTreeData=i,(0,u.fillLazyItemsTillLeafWithHead)(t,e,a,c,l)):(t.status=n.CacheStates.READY,t.subTreeData=e.subTreeData,t.parallelRoutes=new Map(e.parallelRoutes),(0,o.fillCacheWithNewSubTreeData)(t,e,r,l)),!0)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7491:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyRouterStatePatchToTree",{enumerable:!0,get:function(){return function e(t,r,o){let l;let[a,i,,,c]=r;if(1===t.length){let e=u(r,o);return e}let[s,f]=t;if(!(0,n.matchSegment)(s,a))return null;let d=2===t.length;if(d)l=u(i[f],o);else if(null===(l=e(t.slice(2),i[f],o)))return null;let p=[t[0],{...i,[f]:l}];return c&&(p[4]=!0),p}}});let n=r(7910);function u(e,t){let[r,o]=e,[l,a]=t;if("__DEFAULT__"===l&&"__DEFAULT__"!==r)return e;if((0,n.matchSegment)(r,l)){let t={};for(let e in o){let r=void 0!==a[e];r?t[e]=u(o[e],a[e]):t[e]=o[e]}for(let e in a)t[e]||(t[e]=a[e]);let n=[r,t];return e[2]&&(n[2]=e[2]),e[3]&&(n[3]=e[3]),e[4]&&(n[4]=e[4]),n}return t}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5121:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{extractPathFromFlightRouterState:function(){return a},computeChangedPath:function(){return i}});let n=r(4507),u=r(7910),o=e=>"string"==typeof e?e:e[1];function l(e){return e.split("/").reduce((e,t)=>""===t||t.startsWith("(")&&t.endsWith(")")?e:e+"/"+t,"")||"/"}function a(e){var t;let r=Array.isArray(e[0])?e[0][1]:e[0];if("__DEFAULT__"===r||n.INTERCEPTION_ROUTE_MARKERS.some(e=>r.startsWith(e)))return;if(r.startsWith("__PAGE__"))return"";let u=[r],o=null!=(t=e[1])?t:{},i=o.children?a(o.children):void 0;if(void 0!==i)u.push(i);else for(let[e,t]of Object.entries(o)){if("children"===e)continue;let r=a(t);void 0!==r&&u.push(r)}return l(u.join("/"))}function i(e,t){let r=function e(t,r){let[l,i]=t,[c,s]=r,f=o(l),d=o(c);if(n.INTERCEPTION_ROUTE_MARKERS.some(e=>f.startsWith(e)||d.startsWith(e)))return"";if(!(0,u.matchSegment)(l,c)){var p;return null!=(p=a(r))?p:""}for(let t in i)if(s[t]){let r=e(i[t],s[t]);if(null!==r)return o(c)+"/"+r}return null}(e,t);return null==r||"/"===r?r:l(r)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},9330:function(e,t){"use strict";function r(e,t){return void 0===t&&(t=!0),e.pathname+e.search+(t?e.hash:"")}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createHrefFromUrl",{enumerable:!0,get:function(){return r}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4444:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createInitialRouterState",{enumerable:!0,get:function(){return a}});let n=r(6656),u=r(9330),o=r(9726),l=r(5121);function a(e){var t;let{buildId:r,initialTree:a,children:i,initialCanonicalUrl:c,initialParallelRoutes:s,isServer:f,location:d,initialHead:p}=e,h={status:n.CacheStates.READY,data:null,subTreeData:i,parallelRoutes:f?new Map:s};return(null===s||0===s.size)&&(0,o.fillLazyItemsTillLeafWithHead)(h,void 0,a,p),{buildId:r,tree:a,cache:h,prefetchCache:new Map,pushRef:{pendingPush:!1,mpaNavigation:!1},focusAndScrollRef:{apply:!1,hashFragment:null,segmentPaths:[]},canonicalUrl:d?(0,u.createHrefFromUrl)(d):c,nextUrl:null!=(t=(0,l.extractPathFromFlightRouterState)(a)||(null==d?void 0:d.pathname))?t:null}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4679:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createOptimisticTree",{enumerable:!0,get:function(){return function e(t,r,u){let o;let[l,a,i,c,s]=r||[null,{}],f=t[0],d=1===t.length,p=null!==l&&(0,n.matchSegment)(l,f),h=Object.keys(a).length>1,_=!r||!p||h,y={};if(null!==l&&p&&(y=a),!d&&!h){let r=e(t.slice(1),y?y.children:null,u||_);o=r}let b=[f,{...y,...o?{children:o}:{}}];return i&&(b[2]=i),!u&&_?b[3]="refetch":p&&c&&(b[3]=c),p&&s&&(b[4]=s),b}}});let n=r(7910);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8982:function(e,t){"use strict";function r(e){return e.status="pending",e.then(t=>{"pending"===e.status&&(e.status="fulfilled",e.value=t)},t=>{"pending"===e.status&&(e.status="rejected",e.value=t)}),e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createRecordFromThenable",{enumerable:!0,get:function(){return r}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4818:function(e,t){"use strict";function r(e,t){return void 0===t&&(t=!1),Array.isArray(e)?e[0]+"|"+e[1]+"|"+e[2]:t&&e.startsWith("__PAGE__")?"__PAGE__":e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createRouterCacheKey",{enumerable:!0,get:function(){return r}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},2738:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fetchServerResponse",{enumerable:!0,get:function(){return s}});let n=r(6671),u=r(4509),o=r(7948),l=r(4039),a=r(5685),i=r(216);function c(e){return[(0,o.urlToUrlWithoutFlightMarker)(e).toString(),void 0]}async function s(e,t,r,s,f){let d={[u.RSC]:"1",[u.NEXT_ROUTER_STATE_TREE]:encodeURIComponent(JSON.stringify(t))};f===a.PrefetchKind.AUTO&&(d[u.NEXT_ROUTER_PREFETCH]="1"),r&&(d[u.NEXT_URL]=r);let p=(0,i.hexHash)([d[u.NEXT_ROUTER_PREFETCH]||"0",d[u.NEXT_ROUTER_STATE_TREE]].join(","));try{let t=new URL(e);t.pathname.endsWith("/")?t.pathname+="index.txt":t.pathname+=".txt",t.searchParams.set(u.NEXT_RSC_UNION_QUERY,p);let r=await fetch(t,{credentials:"same-origin",headers:d}),a=(0,o.urlToUrlWithoutFlightMarker)(r.url),i=r.redirected?a:void 0,f=r.headers.get("content-type")||"",h=f===u.RSC_CONTENT_TYPE_HEADER;if(h||(h=f.startsWith("text/plain")),!h||!r.ok)return c(a.toString());let[_,y]=await (0,n.createFromFetch)(Promise.resolve(r),{callServer:l.callServer});if(s!==_)return c(r.url);return[y,i]}catch(t){return console.error("Failed to fetch RSC payload. Falling back to browser navigation.",t),[e.toString(),void 0]}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},2562:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillCacheWithDataProperty",{enumerable:!0,get:function(){return function e(t,r,o,l,a){void 0===a&&(a=!1);let i=o.length<=2,[c,s]=o,f=(0,u.createRouterCacheKey)(s),d=r.parallelRoutes.get(c);if(!d||a&&r.parallelRoutes.size>1)return{bailOptimistic:!0};let p=t.parallelRoutes.get(c);p&&p!==d||(p=new Map(d),t.parallelRoutes.set(c,p));let h=d.get(f),_=p.get(f);if(i){_&&_.data&&_!==h||p.set(f,{status:n.CacheStates.DATA_FETCH,data:l(),subTreeData:null,parallelRoutes:new Map});return}if(!_||!h){_||p.set(f,{status:n.CacheStates.DATA_FETCH,data:l(),subTreeData:null,parallelRoutes:new Map});return}return _===h&&(_={status:_.status,data:_.data,subTreeData:_.subTreeData,parallelRoutes:new Map(_.parallelRoutes)},p.set(f,_)),e(_,h,o.slice(2),l)}}});let n=r(6656),u=r(4818);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},516:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillCacheWithNewSubTreeData",{enumerable:!0,get:function(){return function e(t,r,a,i){let c=a.length<=5,[s,f]=a,d=(0,l.createRouterCacheKey)(f),p=r.parallelRoutes.get(s);if(!p)return;let h=t.parallelRoutes.get(s);h&&h!==p||(h=new Map(p),t.parallelRoutes.set(s,h));let _=p.get(d),y=h.get(d);if(c){y&&y.data&&y!==_||(y={status:n.CacheStates.READY,data:null,subTreeData:a[3],parallelRoutes:_?new Map(_.parallelRoutes):new Map},_&&(0,u.invalidateCacheByRouterState)(y,_,a[2]),(0,o.fillLazyItemsTillLeafWithHead)(y,_,a[2],a[4],i),h.set(d,y));return}y&&_&&(y===_&&(y={status:y.status,data:y.data,subTreeData:y.subTreeData,parallelRoutes:new Map(y.parallelRoutes)},h.set(d,y)),e(y,_,a.slice(2),i))}}});let n=r(6656),u=r(9495),o=r(9726),l=r(4818);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},9726:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillLazyItemsTillLeafWithHead",{enumerable:!0,get:function(){return function e(t,r,o,l,a){let i=0===Object.keys(o[1]).length;if(i){t.head=l;return}for(let i in o[1]){let c=o[1][i],s=c[0],f=(0,u.createRouterCacheKey)(s);if(r){let u=r.parallelRoutes.get(i);if(u){let r=new Map(u),o=r.get(f),s=a&&o?{status:o.status,data:o.data,subTreeData:o.subTreeData,parallelRoutes:new Map(o.parallelRoutes)}:{status:n.CacheStates.LAZY_INITIALIZED,data:null,subTreeData:null,parallelRoutes:new Map(null==o?void 0:o.parallelRoutes)};r.set(f,s),e(s,o,c,l,a),t.parallelRoutes.set(i,r);continue}}let d={status:n.CacheStates.LAZY_INITIALIZED,data:null,subTreeData:null,parallelRoutes:new Map},p=t.parallelRoutes.get(i);p?p.set(f,d):t.parallelRoutes.set(i,new Map([[f,d]])),e(d,void 0,c,l,a)}}}});let n=r(6656),u=r(4818);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},234:function(e,t){"use strict";var r,n;function u(e){let{kind:t,prefetchTime:r,lastUsedTime:n}=e;return Date.now()<(null!=n?n:r)+3e4?n?"reusable":"fresh":"auto"===t&&Date.now()["children",e]).flat(),p=(0,c.fillCacheWithDataProperty)(f,e.cache,d,()=>(t||(t=(0,o.createRecordFromThenable)((0,u.fetchServerResponse)(r,i,e.nextUrl,e.buildId))),t),!0);if(!(null==p?void 0:p.bailOptimistic))return R.previousTree=e.tree,R.patchedTree=i,R.pendingPush=x,R.hashFragment=M,R.shouldScroll=T,R.scrollableSegments=[],R.cache=f,R.canonicalUrl=C,e.prefetchCache.set((0,a.createHrefFromUrl)(r,!1),{data:Promise.resolve(t),kind:h.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:e.tree,lastUsedTime:Date.now()}),(0,_.handleMutable)(e,R)}if(!N){let t=(0,o.createRecordFromThenable)((0,u.fetchServerResponse)(r,e.tree,e.nextUrl,e.buildId,void 0)),n={data:Promise.resolve(t),kind:h.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:e.tree,lastUsedTime:null};e.prefetchCache.set((0,a.createHrefFromUrl)(r,!1),n),N=n}let I=(0,b.getPrefetchEntryCacheStatus)(N),{treeAtTimeOfPrefetch:k,data:D}=N;m.prefetchQueue.bump(D);let[F,U]=(0,l.readRecordValue)(D);if(N.lastUsedTime=Date.now(),"string"==typeof F)return g(e,R,F,x);let L=e.tree,H=e.cache,$=[];for(let t of F){let o=t.slice(0,-4),l=t.slice(-3)[0],a=["",...o],s=(0,f.applyRouterStatePatchToTree)(a,L,l);if(null===s&&(s=(0,f.applyRouterStatePatchToTree)(a,k,l)),null!==s){if((0,p.isNavigatingToNewRootLayout)(L,s))return g(e,R,C,x);let f=(0,y.applyFlightData)(H,j,t,"auto"===N.kind&&I===b.PrefetchCacheEntryStatus.reusable);f||I!==b.PrefetchCacheEntryStatus.stale||(f=function(e,t,r,u,o){let l=!1;e.status=n.CacheStates.READY,e.subTreeData=t.subTreeData,e.parallelRoutes=new Map(t.parallelRoutes);let a=O(u).map(e=>[...r,...e]);for(let r of a){let n=(0,c.fillCacheWithDataProperty)(e,t,r,o);(null==n?void 0:n.bailOptimistic)||(l=!0)}return l}(j,H,o,l,()=>(0,u.fetchServerResponse)(r,L,e.nextUrl,e.buildId)));let h=(0,d.shouldHardNavigate)(a,L);for(let e of(h?(j.status=n.CacheStates.READY,j.subTreeData=H.subTreeData,(0,i.invalidateCacheBelowFlightSegmentPath)(j,H,o),R.cache=j):f&&(R.cache=j),H=j,L=s,O(l))){let t=[...o,...e];"__DEFAULT__"!==t[t.length-1]&&$.push(t)}}}return R.previousTree=e.tree,R.patchedTree=L,R.canonicalUrl=U?(0,a.createHrefFromUrl)(U):C,R.pendingPush=x,R.scrollableSegments=$,R.hashFragment=M,R.shouldScroll=T,(0,_.handleMutable)(e,R)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8593:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{prefetchQueue:function(){return s},prefetchReducer:function(){return f}});let n=r(9330),u=r(2738),o=r(5685),l=r(8982),a=r(3996),i=r(4509),c=r(7843),s=new c.PromiseQueue(5);function f(e,t){(0,a.prunePrefetchCache)(e.prefetchCache);let{url:r}=t;r.searchParams.delete(i.NEXT_RSC_UNION_QUERY);let c=(0,n.createHrefFromUrl)(r,!1),f=e.prefetchCache.get(c);if(f&&(f.kind===o.PrefetchKind.TEMPORARY&&e.prefetchCache.set(c,{...f,kind:t.kind}),!(f.kind===o.PrefetchKind.AUTO&&t.kind===o.PrefetchKind.FULL)))return e;let d=(0,l.createRecordFromThenable)(s.enqueue(()=>(0,u.fetchServerResponse)(r,e.tree,e.nextUrl,e.buildId,t.kind)));return e.prefetchCache.set(c,{treeAtTimeOfPrefetch:e.tree,data:d,kind:t.kind,prefetchTime:Date.now(),lastUsedTime:null}),e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},3996:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"prunePrefetchCache",{enumerable:!0,get:function(){return u}});let n=r(234);function u(e){for(let[t,r]of e)(0,n.getPrefetchEntryCacheStatus)(r)===n.PrefetchCacheEntryStatus.expired&&e.delete(t)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7439:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"refreshReducer",{enumerable:!0,get:function(){return p}});let n=r(2738),u=r(8982),o=r(6689),l=r(9330),a=r(7491),i=r(3139),c=r(4838),s=r(7575),f=r(6656),d=r(9726);function p(e,t){let{cache:r,mutable:p,origin:h}=t,_=e.canonicalUrl,y=e.tree,b=JSON.stringify(p.previousTree)===JSON.stringify(y);if(b)return(0,s.handleMutable)(e,p);r.data||(r.data=(0,u.createRecordFromThenable)((0,n.fetchServerResponse)(new URL(_,h),[y[0],y[1],y[2],"refetch"],e.nextUrl,e.buildId)));let[v,m]=(0,o.readRecordValue)(r.data);if("string"==typeof v)return(0,c.handleExternalUrl)(e,p,v,e.pushRef.pendingPush);for(let t of(r.data=null,v)){if(3!==t.length)return console.log("REFRESH FAILED"),e;let[n]=t,u=(0,a.applyRouterStatePatchToTree)([""],y,n);if(null===u)throw Error("SEGMENT MISMATCH");if((0,i.isNavigatingToNewRootLayout)(y,u))return(0,c.handleExternalUrl)(e,p,_,e.pushRef.pendingPush);let o=m?(0,l.createHrefFromUrl)(m):void 0;m&&(p.canonicalUrl=o);let[s,h]=t.slice(-2);null!==s&&(r.status=f.CacheStates.READY,r.subTreeData=s,(0,d.fillLazyItemsTillLeafWithHead)(r,void 0,n,h),p.cache=r,p.prefetchCache=new Map),p.previousTree=y,p.patchedTree=u,p.canonicalUrl=_,y=u}return(0,s.handleMutable)(e,p)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},9958:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"restoreReducer",{enumerable:!0,get:function(){return u}});let n=r(9330);function u(e,t){let{url:r,tree:u}=t,o=(0,n.createHrefFromUrl)(r);return{buildId:e.buildId,canonicalUrl:o,pushRef:e.pushRef,focusAndScrollRef:e.focusAndScrollRef,cache:e.cache,prefetchCache:e.prefetchCache,tree:u,nextUrl:r.pathname}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7148:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverActionReducer",{enumerable:!0,get:function(){return p}});let n=r(4039),u=r(4509),o=r(8982),l=r(6689),a=r(6671),i=r(5685),c=r(6711),s=r(9330),f=r(5800);async function d(e,t){let r,{actionId:o,actionArgs:l}=t,i=await (0,a.encodeReply)(l),s=await fetch("",{method:"POST",headers:{Accept:u.RSC_CONTENT_TYPE_HEADER,"Next-Action":o,[u.NEXT_ROUTER_STATE_TREE]:JSON.stringify(e.tree),...e.nextUrl?{[u.NEXT_URL]:e.nextUrl}:{}},body:i}),f=s.headers.get("x-action-redirect");try{let e=JSON.parse(s.headers.get("x-action-revalidated")||"[[],0,0]");r={paths:e[0]||[],tag:!!e[1],cookie:e[2]}}catch(e){r={paths:[],tag:!1,cookie:!1}}let d=f?new URL((0,c.addBasePath)(f),window.location.origin):void 0;if(s.headers.get("content-type")===u.RSC_CONTENT_TYPE_HEADER){let e=await (0,a.createFromFetch)(Promise.resolve(s),{callServer:n.callServer});if(f){let[,t]=e;return{actionFlightData:null==t?void 0:t[1],redirectLocation:d,revalidatedParts:r}}{let[t,[,n]]=null!=e?e:[];return{actionResult:t,actionFlightData:n,redirectLocation:d,revalidatedParts:r}}}return{redirectLocation:d,revalidatedParts:r}}function p(e,t){if(t.mutable.serverActionApplied)return e;t.mutable.inFlightServerAction||(t.mutable.previousTree=e.tree,t.mutable.previousUrl=e.canonicalUrl,t.mutable.inFlightServerAction=(0,o.createRecordFromThenable)(d(e,t)));try{var r,n;let{actionResult:u,actionFlightData:a,redirectLocation:c,revalidatedParts:d}=(0,l.readRecordValue)(t.mutable.inFlightServerAction);if(d.tag||d.cookie?e.prefetchCache.clear():d.paths.length>0&&e.prefetchCache.clear(),c){if(a){let n=(0,s.createHrefFromUrl)(c,!1),u=e.prefetchCache.get(n);e.prefetchCache.set(n,{data:(0,o.createRecordFromThenable)(Promise.resolve([a,void 0])),kind:null!=(r=null==u?void 0:u.kind)?r:i.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:t.mutable.previousTree,lastUsedTime:null})}t.reject((0,f.getRedirectError)(c.toString(),f.RedirectType.push))}else{if(a){let r=(0,s.createHrefFromUrl)(new URL(t.mutable.previousUrl,window.location.origin),!1),u=e.prefetchCache.get(r);e.prefetchCache.set((0,s.createHrefFromUrl)(new URL(t.mutable.previousUrl,window.location.origin),!1),{data:(0,o.createRecordFromThenable)(Promise.resolve([a,void 0])),kind:null!=(n=null==u?void 0:u.kind)?n:i.PrefetchKind.TEMPORARY,prefetchTime:Date.now(),treeAtTimeOfPrefetch:t.mutable.previousTree,lastUsedTime:null}),setTimeout(()=>{t.changeByServerResponse(t.mutable.previousTree,a,void 0)})}t.resolve(u)}}catch(e){if("rejected"===e.status)t.reject(e.value);else throw e}return t.mutable.serverActionApplied=!0,e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7811:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverPatchReducer",{enumerable:!0,get:function(){return c}});let n=r(9330),u=r(7491),o=r(3139),l=r(4838),a=r(7027),i=r(7575);function c(e,t){let{flightData:r,previousTree:c,overrideCanonicalUrl:s,cache:f,mutable:d}=t,p=JSON.stringify(c)===JSON.stringify(e.tree);if(!p)return console.log("TREE MISMATCH"),e;if(d.previousTree)return(0,i.handleMutable)(e,d);if("string"==typeof r)return(0,l.handleExternalUrl)(e,d,r,e.pushRef.pendingPush);let h=e.tree,_=e.cache;for(let t of r){let r=t.slice(0,-4),[i]=t.slice(-3,-2),c=(0,u.applyRouterStatePatchToTree)(["",...r],h,i);if(null===c)throw Error("SEGMENT MISMATCH");if((0,o.isNavigatingToNewRootLayout)(h,c))return(0,l.handleExternalUrl)(e,d,e.canonicalUrl,e.pushRef.pendingPush);let p=s?(0,n.createHrefFromUrl)(s):void 0;p&&(d.canonicalUrl=p),(0,a.applyFlightData)(_,f,t),d.previousTree=h,d.patchedTree=c,d.cache=f,_=f,h=c}return(0,i.handleMutable)(e,d)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5685:function(e,t){"use strict";var r,n;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{PrefetchKind:function(){return r},ACTION_REFRESH:function(){return u},ACTION_NAVIGATE:function(){return o},ACTION_RESTORE:function(){return l},ACTION_SERVER_PATCH:function(){return a},ACTION_PREFETCH:function(){return i},ACTION_FAST_REFRESH:function(){return c},ACTION_SERVER_ACTION:function(){return s}});let u="refresh",o="navigate",l="restore",a="server-patch",i="prefetch",c="fast-refresh",s="server-action";(n=r||(r={})).AUTO="auto",n.FULL="full",n.TEMPORARY="temporary",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7538:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"reducer",{enumerable:!0,get:function(){return f}});let n=r(5685),u=r(4838),o=r(7811),l=r(9958),a=r(7439),i=r(8593),c=r(4995),s=r(7148),f=function(e,t){switch(t.type){case n.ACTION_NAVIGATE:return(0,u.navigateReducer)(e,t);case n.ACTION_SERVER_PATCH:return(0,o.serverPatchReducer)(e,t);case n.ACTION_RESTORE:return(0,l.restoreReducer)(e,t);case n.ACTION_REFRESH:return(0,a.refreshReducer)(e,t);case n.ACTION_FAST_REFRESH:return(0,c.fastRefreshReducer)(e,t);case n.ACTION_PREFETCH:return(0,i.prefetchReducer)(e,t);case n.ACTION_SERVER_ACTION:return(0,s.serverActionReducer)(e,t);default:throw Error("Unknown action")}};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},8741:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"shouldHardNavigate",{enumerable:!0,get:function(){return function e(t,r){let[u,o]=r,[l,a]=t;if(!(0,n.matchSegment)(l,u))return!!Array.isArray(l);let i=t.length<=2;return!i&&e(t.slice(2),o[a])}}});let n=r(7910);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},2476:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createSearchParamsBailoutProxy",{enumerable:!0,get:function(){return u}});let n=r(5698);function u(){return new Proxy({},{get(e,t){"string"==typeof t&&(0,n.staticGenerationBailout)("searchParams."+t)}})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5698:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationBailout",{enumerable:!0,get:function(){return l}});let n=r(4124),u=r(2287);class o extends Error{constructor(...e){super(...e),this.code="NEXT_STATIC_GEN_BAILOUT"}}let l=(e,t)=>{let r=u.staticGenerationAsyncStorage.getStore();if(null==r?void 0:r.forceStatic)return!0;if(null==r?void 0:r.dynamicShouldError){let{dynamic:r="error",link:n}=t||{};throw new o('Page with `dynamic = "'+r+"\"` couldn't be rendered statically because it used `"+e+"`."+(n?" See more info here: "+n:""))}if(r&&(r.revalidate=0),null==r?void 0:r.isStaticGeneration){let t=new n.DynamicServerError(e);throw r.dynamicUsageDescription=e,r.dynamicUsageStack=t.stack,t}return!1};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4839:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return l}});let n=r(1024),u=n._(r(2265)),o=r(2476);function l(e){let{Component:t,propsForComponent:r}=e,n=(0,o.createSearchParamsBailoutProxy)();return u.default.createElement(t,{searchParams:n,...r})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},9865:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"useReducerWithReduxDevtools",{enumerable:!0,get:function(){return o}});let n=r(2265);function u(e){if(e instanceof Map){let t={};for(let[r,n]of e.entries()){if("function"==typeof n){t[r]="fn()";continue}if("object"==typeof n&&null!==n){if(n.$$typeof){t[r]=n.$$typeof.toString();continue}if(n._bundlerConfig){t[r]="FlightData";continue}}t[r]=u(n)}return t}if("object"==typeof e&&null!==e){let t={};for(let r in e){let n=e[r];if("function"==typeof n){t[r]="fn()";continue}if("object"==typeof n&&null!==n){if(n.$$typeof){t[r]=n.$$typeof.toString();continue}if(n.hasOwnProperty("_bundlerConfig")){t[r]="FlightData";continue}}t[r]=u(n)}return t}return Array.isArray(e)?e.map(u):e}let o=function(e,t){let r=(0,n.useRef)(),o=(0,n.useRef)();(0,n.useEffect)(()=>{if(!r.current&&!1!==o.current){if(void 0===o.current&&void 0===window.__REDUX_DEVTOOLS_EXTENSION__){o.current=!1;return}return r.current=window.__REDUX_DEVTOOLS_EXTENSION__.connect({instanceId:8e3,name:"next-router"}),r.current&&r.current.init(u(t)),()=>{r.current=void 0}}},[t]);let[l,a]=(0,n.useReducer)((t,n)=>{let o=e(t,n);return r.current&&r.current.send(n,u(o)),o},t),i=(0,n.useCallback)(()=>{r.current&&r.current.send({type:"RENDER_SYNC"},u(l))},[l]);return[l,a,i]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6070:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"normalizePathTrailingSlash",{enumerable:!0,get:function(){return o}});let n=r(7369),u=r(2590),o=e=>{if(!e.startsWith("/"))return e;let{pathname:t,query:r,hash:o}=(0,u.parsePath)(e);return""+(0,n.removeTrailingSlash)(t)+r+o};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5152:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return u}});let n=r(7669);function u(e){let t="function"==typeof reportError?reportError:e=>{window.console.error(e)};e.digest!==n.NEXT_DYNAMIC_NO_SSR_CODE&&t(e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6656:function(e,t,r){"use strict";var n,u;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{CacheStates:function(){return n},AppRouterContext:function(){return a},LayoutRouterContext:function(){return i},GlobalLayoutRouterContext:function(){return c},TemplateContext:function(){return s}});let o=r(1024),l=o._(r(2265));(u=n||(n={})).LAZY_INITIALIZED="LAZYINITIALIZED",u.DATA_FETCH="DATAFETCH",u.READY="READY";let a=l.default.createContext(null),i=l.default.createContext(null),c=l.default.createContext(null),s=l.default.createContext(null)},216:function(e,t){"use strict";function r(e){let t=5381;for(let r=0;r!t||"("===t[0]&&t.endsWith(")")||"@"===t[0]||("page"===t||"route"===t)&&r===n.length-1?e:e+"/"+t,""))}function o(e,t){return t?e.replace(/\.rsc($|\?)/,"$1"):e}},1067:function(e,t){"use strict";function r(e,t){if(void 0===t&&(t={}),t.onlyHashChange){e();return}let r=document.documentElement,n=r.style.scrollBehavior;r.style.scrollBehavior="auto",t.dontForceLayout||r.getClientRects(),e(),r.style.scrollBehavior=n}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleSmoothScroll",{enumerable:!0,get:function(){return r}})},3738:function(e,t){"use strict";function r(e){return/Googlebot|Mediapartners-Google|AdsBot-Google|googleweblight|Storebot-Google|Google-PageRenderer|Bingbot|BingPreview|Slurp|DuckDuckBot|baiduspider|yandex|sogou|LinkedInBot|bitlybot|tumblr|vkShare|quora link preview|facebookexternalhit|facebookcatalog|Twitterbot|applebot|redditbot|Slackbot|Discordbot|WhatsApp|SkypeUriPreview|ia_archiver/i.test(e)}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isBot",{enumerable:!0,get:function(){return r}})},2590:function(e,t){"use strict";function r(e){let t=e.indexOf("#"),r=e.indexOf("?"),n=r>-1&&(t<0||r-1?{pathname:e.substring(0,n?r:t),query:n?e.substring(r,t>-1?t:void 0):"",hash:t>-1?e.slice(t):""}:{pathname:e,query:"",hash:""}}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"parsePath",{enumerable:!0,get:function(){return r}})},7369:function(e,t){"use strict";function r(e){return e.replace(/\/$/,"")||"/"}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"removeTrailingSlash",{enumerable:!0,get:function(){return r}})},8169:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{ServerInsertedHTMLContext:function(){return o},useServerInsertedHTML:function(){return l}});let n=r(8533),u=n._(r(2265)),o=u.default.createContext(null);function l(e){let t=(0,u.useContext)(o);t&&t(e)}},2616:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createAsyncLocalStorage",{enumerable:!0,get:function(){return o}});let r=Error("Invariant: AsyncLocalStorage accessed in runtime where it is not available");class n{disable(){throw r}getStore(){}run(){throw r}exit(){throw r}enterWith(){throw r}}let u=globalThis.AsyncLocalStorage;function o(){return u?new u:new n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6170:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"requestAsyncStorage",{enumerable:!0,get:function(){return u}});let n=r(2616),u=(0,n.createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},2287:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationAsyncStorage",{enumerable:!0,get:function(){return u}});let n=r(2616),u=(0,n.createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4040:function(e,t,r){"use strict";var n=r(4887);t.createRoot=n.createRoot,t.hydrateRoot=n.hydrateRoot},4887:function(e,t,r){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(e){console.error(e)}}(),e.exports=r(4417)},7950:function(e,t,r){"use strict";/** - * @license React - * react-server-dom-webpack-client.browser.production.min.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var n=r(4887),u=r(2265),o={stream:!0},l=new Map;function a(e){var t=globalThis.__next_require__(e);return"function"!=typeof t.then||"fulfilled"===t.status?null:(t.then(function(e){t.status="fulfilled",t.value=e},function(e){t.status="rejected",t.reason=e}),t)}function i(){}var c=n.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.Dispatcher,s=Symbol.for("react.element"),f=Symbol.for("react.lazy"),d=Symbol.for("react.default_value"),p=Symbol.iterator,h=Array.isArray,_=new WeakMap,y=u.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ContextRegistry;function b(e,t,r,n){this.status=e,this.value=t,this.reason=r,this._response=n}function v(e){switch(e.status){case"resolved_model":R(e);break;case"resolved_module":S(e)}switch(e.status){case"fulfilled":return e.value;case"pending":case"blocked":throw e;default:throw e.reason}}function m(e,t){for(var r=0;rd?(h=d,d=3,f++):(h=0,d=3);continue;case 2:44===(v=s[f++])?d=4:_=_<<4|(96s.length&&(v=-1)}var m=s.byteOffset+f;if(-1>>1,u=e[n];if(0>>1;no(i,r))co(s,i)?(e[n]=s,e[c]=r,n=c):(e[n]=i,e[a]=r,n=a);else if(co(s,r))e[n]=s,e[c]=r,n=c;else break}}return t}function o(e,t){var r=e.sortIndex-t.sortIndex;return 0!==r?r:e.id-t.id}if(t.unstable_now=void 0,"object"==typeof performance&&"function"==typeof performance.now){var l,a=performance;t.unstable_now=function(){return a.now()}}else{var i=Date,c=i.now();t.unstable_now=function(){return i.now()-c}}var s=[],f=[],d=1,p=null,h=3,_=!1,y=!1,b=!1,v="function"==typeof setTimeout?setTimeout:null,m="function"==typeof clearTimeout?clearTimeout:null,g="undefined"!=typeof setImmediate?setImmediate:null;function O(e){for(var t=n(f);null!==t;){if(null===t.callback)u(f);else if(t.startTime<=e)u(f),t.sortIndex=t.expirationTime,r(s,t);else break;t=n(f)}}function P(e){if(b=!1,O(e),!y){if(null!==n(s))y=!0,N(E);else{var t=n(f);null!==t&&I(P,t.startTime-e)}}}function E(e,r){y=!1,b&&(b=!1,m(S),S=-1),_=!0;var o=h;try{e:{for(O(r),p=n(s);null!==p&&(!(p.expirationTime>r)||e&&!M());){var l=p.callback;if("function"==typeof l){p.callback=null,h=p.priorityLevel;var a=l(p.expirationTime<=r);if(r=t.unstable_now(),"function"==typeof a){p.callback=a,O(r);var i=!0;break e}p===n(s)&&u(s),O(r)}else u(s);p=n(s)}if(null!==p)i=!0;else{var c=n(f);null!==c&&I(P,c.startTime-r),i=!1}}return i}finally{p=null,h=o,_=!1}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var j=!1,R=null,S=-1,T=5,w=-1;function M(){return!(t.unstable_now()-we||125l?(e.sortIndex=o,r(f,e),null===n(s)&&e===n(f)&&(b?(m(S),S=-1):b=!0,I(P,o-l))):(e.sortIndex=a,r(s,e),y||_||(y=!0,N(E))),e},t.unstable_shouldYield=M,t.unstable_wrapCallback=function(e){var t=h;return function(){var r=h;h=t;try{return e.apply(this,arguments)}finally{h=r}}}},8261:function(e,t,r){"use strict";e.exports=r(1756)},5682:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getSegmentParam",{enumerable:!0,get:function(){return u}});let n=r(4507);function u(e){let t=n.INTERCEPTION_ROUTE_MARKERS.find(t=>e.startsWith(t));return(t&&(e=e.slice(t.length)),e.startsWith("[[...")&&e.endsWith("]]"))?{type:"optional-catchall",param:e.slice(5,-2)}:e.startsWith("[...")&&e.endsWith("]")?{type:"catchall",param:e.slice(4,-1)}:e.startsWith("[")&&e.endsWith("]")?{type:"dynamic",param:e.slice(1,-1)}:null}},4507:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{INTERCEPTION_ROUTE_MARKERS:function(){return u},isInterceptionRouteAppPath:function(){return o},extractInterceptionRouteInformation:function(){return l}});let n=r(8896),u=["(..)(..)","(.)","(..)","(...)"];function o(e){return void 0!==e.split("/").find(e=>u.find(t=>e.startsWith(t)))}function l(e){let t,r,o;for(let n of e.split("/"))if(r=u.find(e=>n.startsWith(e))){[t,o]=e.split(r,2);break}if(!t||!r||!o)throw Error(`Invalid interception route: ${e}. Must be in the format //(..|...|..)(..)/`);switch(t=(0,n.normalizeAppPath)(t),r){case"(.)":o="/"===t?`/${o}`:t+"/"+o;break;case"(..)":if("/"===t)throw Error(`Invalid interception route: ${e}. Cannot use (..) marker at the root level, use (.) instead.`);o=t.split("/").slice(0,-1).concat(o).join("/");break;case"(...)":o="/"+o;break;case"(..)(..)":let l=t.split("/");if(l.length<=2)throw Error(`Invalid interception route: ${e}. Cannot use (..)(..) marker at the root level or one level up.`);o=l.slice(0,-2).concat(o).join("/");break;default:throw Error("Invariant: unexpected marker")}return{interceptingRoute:t,interceptedRoute:o}}},4677:function(e,t,r){"use strict";function n(e,t){if(!Object.prototype.hasOwnProperty.call(e,t))throw TypeError("attempted to use private field on non-instance");return e}r.r(t),r.d(t,{_:function(){return n},_class_private_field_loose_base:function(){return n}})},6249:function(e,t,r){"use strict";r.r(t),r.d(t,{_:function(){return u},_class_private_field_loose_key:function(){return u}});var n=0;function u(e){return"__private_"+n+++"_"+e}},1024:function(e,t,r){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}r.r(t),r.d(t,{_:function(){return n},_interop_require_default:function(){return n}})},8533:function(e,t,r){"use strict";function n(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,r=new WeakMap;return(n=function(e){return e?r:t})(e)}function u(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var r=n(t);if(r&&r.has(e))return r.get(e);var u={},o=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var l in e)if("default"!==l&&Object.prototype.hasOwnProperty.call(e,l)){var a=o?Object.getOwnPropertyDescriptor(e,l):null;a&&(a.get||a.set)?Object.defineProperty(u,l,a):u[l]=e[l]}return u.default=e,r&&r.set(e,u),u}r.r(t),r.d(t,{_:function(){return u},_interop_require_wildcard:function(){return u}})}}]); \ No newline at end of file diff --git a/spaces/Xhaheen/Lexica_prompt_search/lexica.py b/spaces/Xhaheen/Lexica_prompt_search/lexica.py deleted file mode 100644 index 6317fed79b91ad9296e1fcb044f9ba69db361bf8..0000000000000000000000000000000000000000 --- a/spaces/Xhaheen/Lexica_prompt_search/lexica.py +++ /dev/null @@ -1,90 +0,0 @@ -import requests -import shutil -from PIL import Image -from io import BytesIO -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -import random -import gradio as gr - -design='india' -def lexica(design,n): - - request=requests.get(f'https://lexica.art/api/v1/search?q={design}') - request.json() - data = request.json() - data_items = list(data.items()) - - random.shuffle(data_items) - - data = dict(data_items) - - image_urls = [] - image_prompts = [] - image_gallery=[] - - for key, value in data.items(): - for i in range(n): - image_url = value[i]['src'] - if isinstance(image_url, list): - image_url = image_url[0] - image_urls.append(image_url) - - - image_prompts.append(value[i]['prompt']) - image_gallery.append(value[i]['gallery']) - - images = [] - - # Loop through the image URLs - for url in image_urls: - # Download the image from the URL - response = requests.get(url) - - # Load the image data into PIL format - image = Image.open(BytesIO(response.content)) - - # Add the image to the list - images.append(image) - - -# df = pd.DataFrame(image_prompts, columns=["Lexica Prompt"], index=range(1, len(image_prompts)+1)) - - -# df.index.name = "Sr. No." - df = pd.DataFrame({ 'image_gallery': image_gallery,'image_prompts': image_prompts}) - def make_clickable(val): - return '{}'.format(val, val) - - # df.style.format({'image_gallery': make_clickable}) - df.style.format({'image_prompts': make_clickable}).set_properties(subset=['image_prompts'], width=30) - - for image in images: - - array = np.array(image) - - - return images , df -design='india' -# lexica(design) - -inputs =[ gr.Textbox(label = 'Enter prompt to search Lexica.art'), - gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)] - - -outputs= [gr.Gallery(lable='Output gallery').style(grid=4,height=100,container=True), - gr.Dataframe(label='links and prompts for corresponding images')] - -# Create and launch the interface -interface = gr.Interface(lexica, - inputs=inputs, - outputs=outputs, - examples =[ ['trending digital art', 5], - ['beautiful home', 5], - ['interior design of living room', 5]] - , - title = "" +' 🔍 🖌️🎨 Lexica Art - A Search Engine for Generative Art Prompts and Works '+ "", - description="🔍🖌️ 🎨 lexica huggingface space , Find inspiration and discover new generative artworks with Lexica Art, a search engine built by by @[Sharif shameem](https://twitter.com/sharifshameem) . Explore a vast collection of prompts and corresponding artworks, and let your imagination take over as you create your own masterpieces. \n\n Visit @[baith_al_suroor](https://huggingface.co/spaces/Xhaheen/Baith-al-suroor) to redesign your home interiors for FREE \n\n💡🖌️ spaces built with ❤️ @[Xhaheen](https://www.linkedin.com/in/sallu-mandya)") - -interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/XzJosh/Lumi-Bert-VITS2/attentions.py b/spaces/XzJosh/Lumi-Bert-VITS2/attentions.py deleted file mode 100644 index 1192dd7268c20c11010e73a6017ed09549695afe..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Lumi-Bert-VITS2/attentions.py +++ /dev/null @@ -1,344 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import logging - -logger = logging.getLogger(__name__) - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - #if isflow: - # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - # self.cond_layer = weight_norm(cond_layer, name='weight') - # self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - logging.debug(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/ZX9966/LOGO-Approximate-Computing-Technology/README.md b/spaces/ZX9966/LOGO-Approximate-Computing-Technology/README.md deleted file mode 100644 index eeae9882481961b1a803273fbfb7c372f90ac05e..0000000000000000000000000000000000000000 --- a/spaces/ZX9966/LOGO-Approximate-Computing-Technology/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: LOGO Approximate Computing Technology -emoji: 😻 -colorFrom: purple -colorTo: blue -sdk: static -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Zeltoria/anime-voice-generator/text/__init__.py b/spaces/Zeltoria/anime-voice-generator/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/Zeltoria/anime-voice-generator/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/aadnk/faster-whisper-webui/LICENSE.md b/spaces/aadnk/faster-whisper-webui/LICENSE.md deleted file mode 100644 index f5f4b8b5ecd27c09e4ef16e9662bcb7bb2bfc76f..0000000000000000000000000000000000000000 --- a/spaces/aadnk/faster-whisper-webui/LICENSE.md +++ /dev/null @@ -1,195 +0,0 @@ -Apache License -============== - -_Version 2.0, January 2004_ -_<>_ - -### Terms and Conditions for use, reproduction, and distribution - -#### 1. Definitions - -“License” shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -“Licensor” shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -“Legal Entity” shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, “control” means **(i)** the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the -outstanding shares, or **(iii)** beneficial ownership of such entity. - -“You” (or “Your”) shall mean an individual or Legal Entity exercising -permissions granted by this License. - -“Source” form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -“Object” form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -“Work” shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -“Derivative Works” shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -“Contribution” shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -“submitted” means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as “Not a Contribution.” - -“Contributor” shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -#### 2. Grant of Copyright License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -#### 3. Grant of Patent License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -#### 4. Redistribution - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -* **(a)** You must give any other recipients of the Work or Derivative Works a copy of -this License; and -* **(b)** You must cause any modified files to carry prominent notices stating that You -changed the files; and -* **(c)** You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. - -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -#### 5. Submission of Contributions - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -#### 6. Trademarks - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -#### 7. Disclaimer of Warranty - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -#### 8. Limitation of Liability - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -#### 9. Accepting Warranty or Additional Liability - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -_END OF TERMS AND CONDITIONS_ - -### APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets `[]` replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same “printed page” as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/spaces/aai198/ComfyUI/README.md b/spaces/aai198/ComfyUI/README.md deleted file mode 100644 index 9a18b220b506adbf1f72ca73223edc0fc1f6f754..0000000000000000000000000000000000000000 --- a/spaces/aai198/ComfyUI/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: ComfyUI -emoji: 📈 -colorFrom: green -colorTo: pink -sdk: docker -pinned: false ---- - -model: https://huggingface.co/stabilityai/control-lora \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/psa_mask.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/psa_mask.py deleted file mode 100644 index cdf14e62b50e8d4dd6856c94333c703bcc4c9ab6..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/psa_mask.py +++ /dev/null @@ -1,92 +0,0 @@ -# Modified from https://github.com/hszhao/semseg/blob/master/lib/psa -from torch import nn -from torch.autograd import Function -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', - ['psamask_forward', 'psamask_backward']) - - -class PSAMaskFunction(Function): - - @staticmethod - def symbolic(g, input, psa_type, mask_size): - return g.op( - 'mmcv::MMCVPSAMask', - input, - psa_type_i=psa_type, - mask_size_i=mask_size) - - @staticmethod - def forward(ctx, input, psa_type, mask_size): - ctx.psa_type = psa_type - ctx.mask_size = _pair(mask_size) - ctx.save_for_backward(input) - - h_mask, w_mask = ctx.mask_size - batch_size, channels, h_feature, w_feature = input.size() - assert channels == h_mask * w_mask - output = input.new_zeros( - (batch_size, h_feature * w_feature, h_feature, w_feature)) - - ext_module.psamask_forward( - input, - output, - psa_type=psa_type, - num_=batch_size, - h_feature=h_feature, - w_feature=w_feature, - h_mask=h_mask, - w_mask=w_mask, - half_h_mask=(h_mask - 1) // 2, - half_w_mask=(w_mask - 1) // 2) - return output - - @staticmethod - def backward(ctx, grad_output): - input = ctx.saved_tensors[0] - psa_type = ctx.psa_type - h_mask, w_mask = ctx.mask_size - batch_size, channels, h_feature, w_feature = input.size() - grad_input = grad_output.new_zeros( - (batch_size, channels, h_feature, w_feature)) - ext_module.psamask_backward( - grad_output, - grad_input, - psa_type=psa_type, - num_=batch_size, - h_feature=h_feature, - w_feature=w_feature, - h_mask=h_mask, - w_mask=w_mask, - half_h_mask=(h_mask - 1) // 2, - half_w_mask=(w_mask - 1) // 2) - return grad_input, None, None, None - - -psa_mask = PSAMaskFunction.apply - - -class PSAMask(nn.Module): - - def __init__(self, psa_type, mask_size=None): - super(PSAMask, self).__init__() - assert psa_type in ['collect', 'distribute'] - if psa_type == 'collect': - psa_type_enum = 0 - else: - psa_type_enum = 1 - self.psa_type_enum = psa_type_enum - self.mask_size = mask_size - self.psa_type = psa_type - - def forward(self, input): - return psa_mask(input, self.psa_type_enum, self.mask_size) - - def __repr__(self): - s = self.__class__.__name__ - s += f'(psa_type={self.psa_type}, ' - s += f'mask_size={self.mask_size})' - return s diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/coder/base_bbox_coder.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/coder/base_bbox_coder.py deleted file mode 100644 index cf0b34c7cc2fe561718b0c884990beb40a993643..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/coder/base_bbox_coder.py +++ /dev/null @@ -1,17 +0,0 @@ -from abc import ABCMeta, abstractmethod - - -class BaseBBoxCoder(metaclass=ABCMeta): - """Base bounding box coder.""" - - def __init__(self, **kwargs): - pass - - @abstractmethod - def encode(self, bboxes, gt_bboxes): - """Encode deltas between bboxes and ground truth boxes.""" - - @abstractmethod - def decode(self, bboxes, bboxes_pred): - """Decode the predicted bboxes according to prediction and base - boxes.""" diff --git a/spaces/ai-moroz/webui-cpu/README.md b/spaces/ai-moroz/webui-cpu/README.md deleted file mode 100644 index 97d972aa8679e4b367156dcbfff5d58ad47937e9..0000000000000000000000000000000000000000 --- a/spaces/ai-moroz/webui-cpu/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Webui -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: zwv9/webui-cpu ---- diff --git a/spaces/akhaliq/Real-Time-Voice-Cloning/toolbox/__init__.py b/spaces/akhaliq/Real-Time-Voice-Cloning/toolbox/__init__.py deleted file mode 100644 index 531d6adef076007afd6116eb6472485f540e80de..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Real-Time-Voice-Cloning/toolbox/__init__.py +++ /dev/null @@ -1,357 +0,0 @@ -from toolbox.ui import UI -from encoder import inference as encoder -from synthesizer.inference import Synthesizer -from vocoder import inference as vocoder -from pathlib import Path -from time import perf_counter as timer -from toolbox.utterance import Utterance -import numpy as np -import traceback -import sys -import torch -import librosa -from audioread.exceptions import NoBackendError - -# Use this directory structure for your datasets, or modify it to fit your needs -recognized_datasets = [ - "LibriSpeech/dev-clean", - "LibriSpeech/dev-other", - "LibriSpeech/test-clean", - "LibriSpeech/test-other", - "LibriSpeech/train-clean-100", - "LibriSpeech/train-clean-360", - "LibriSpeech/train-other-500", - "LibriTTS/dev-clean", - "LibriTTS/dev-other", - "LibriTTS/test-clean", - "LibriTTS/test-other", - "LibriTTS/train-clean-100", - "LibriTTS/train-clean-360", - "LibriTTS/train-other-500", - "LJSpeech-1.1", - "VoxCeleb1/wav", - "VoxCeleb1/test_wav", - "VoxCeleb2/dev/aac", - "VoxCeleb2/test/aac", - "VCTK-Corpus/wav48", -] - -#Maximum of generated wavs to keep on memory -MAX_WAVES = 15 - -class Toolbox: - def __init__(self, datasets_root, enc_models_dir, syn_models_dir, voc_models_dir, seed, no_mp3_support): - if not no_mp3_support: - try: - librosa.load("samples/6829_00000.mp3") - except NoBackendError: - print("Librosa will be unable to open mp3 files if additional software is not installed.\n" - "Please install ffmpeg or add the '--no_mp3_support' option to proceed without support for mp3 files.") - exit(-1) - self.no_mp3_support = no_mp3_support - sys.excepthook = self.excepthook - self.datasets_root = datasets_root - self.utterances = set() - self.current_generated = (None, None, None, None) # speaker_name, spec, breaks, wav - - self.synthesizer = None # type: Synthesizer - self.current_wav = None - self.waves_list = [] - self.waves_count = 0 - self.waves_namelist = [] - - # Check for webrtcvad (enables removal of silences in vocoder output) - try: - import webrtcvad - self.trim_silences = True - except: - self.trim_silences = False - - # Initialize the events and the interface - self.ui = UI() - self.reset_ui(enc_models_dir, syn_models_dir, voc_models_dir, seed) - self.setup_events() - self.ui.start() - - def excepthook(self, exc_type, exc_value, exc_tb): - traceback.print_exception(exc_type, exc_value, exc_tb) - self.ui.log("Exception: %s" % exc_value) - - def setup_events(self): - # Dataset, speaker and utterance selection - self.ui.browser_load_button.clicked.connect(lambda: self.load_from_browser()) - random_func = lambda level: lambda: self.ui.populate_browser(self.datasets_root, - recognized_datasets, - level) - self.ui.random_dataset_button.clicked.connect(random_func(0)) - self.ui.random_speaker_button.clicked.connect(random_func(1)) - self.ui.random_utterance_button.clicked.connect(random_func(2)) - self.ui.dataset_box.currentIndexChanged.connect(random_func(1)) - self.ui.speaker_box.currentIndexChanged.connect(random_func(2)) - - # Model selection - self.ui.encoder_box.currentIndexChanged.connect(self.init_encoder) - def func(): - self.synthesizer = None - self.ui.synthesizer_box.currentIndexChanged.connect(func) - self.ui.vocoder_box.currentIndexChanged.connect(self.init_vocoder) - - # Utterance selection - func = lambda: self.load_from_browser(self.ui.browse_file()) - self.ui.browser_browse_button.clicked.connect(func) - func = lambda: self.ui.draw_utterance(self.ui.selected_utterance, "current") - self.ui.utterance_history.currentIndexChanged.connect(func) - func = lambda: self.ui.play(self.ui.selected_utterance.wav, Synthesizer.sample_rate) - self.ui.play_button.clicked.connect(func) - self.ui.stop_button.clicked.connect(self.ui.stop) - self.ui.record_button.clicked.connect(self.record) - - #Audio - self.ui.setup_audio_devices(Synthesizer.sample_rate) - - #Wav playback & save - func = lambda: self.replay_last_wav() - self.ui.replay_wav_button.clicked.connect(func) - func = lambda: self.export_current_wave() - self.ui.export_wav_button.clicked.connect(func) - self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav) - - # Generation - func = lambda: self.synthesize() or self.vocode() - self.ui.generate_button.clicked.connect(func) - self.ui.synthesize_button.clicked.connect(self.synthesize) - self.ui.vocode_button.clicked.connect(self.vocode) - self.ui.random_seed_checkbox.clicked.connect(self.update_seed_textbox) - - # UMAP legend - self.ui.clear_button.clicked.connect(self.clear_utterances) - - def set_current_wav(self, index): - self.current_wav = self.waves_list[index] - - def export_current_wave(self): - self.ui.save_audio_file(self.current_wav, Synthesizer.sample_rate) - - def replay_last_wav(self): - self.ui.play(self.current_wav, Synthesizer.sample_rate) - - def reset_ui(self, encoder_models_dir, synthesizer_models_dir, vocoder_models_dir, seed): - self.ui.populate_browser(self.datasets_root, recognized_datasets, 0, True) - self.ui.populate_models(encoder_models_dir, synthesizer_models_dir, vocoder_models_dir) - self.ui.populate_gen_options(seed, self.trim_silences) - - def load_from_browser(self, fpath=None): - if fpath is None: - fpath = Path(self.datasets_root, - self.ui.current_dataset_name, - self.ui.current_speaker_name, - self.ui.current_utterance_name) - name = str(fpath.relative_to(self.datasets_root)) - speaker_name = self.ui.current_dataset_name + '_' + self.ui.current_speaker_name - - # Select the next utterance - if self.ui.auto_next_checkbox.isChecked(): - self.ui.browser_select_next() - elif fpath == "": - return - else: - name = fpath.name - speaker_name = fpath.parent.name - - if fpath.suffix.lower() == ".mp3" and self.no_mp3_support: - self.ui.log("Error: No mp3 file argument was passed but an mp3 file was used") - return - - # Get the wav from the disk. We take the wav with the vocoder/synthesizer format for - # playback, so as to have a fair comparison with the generated audio - wav = Synthesizer.load_preprocess_wav(fpath) - self.ui.log("Loaded %s" % name) - - self.add_real_utterance(wav, name, speaker_name) - - def record(self): - wav = self.ui.record_one(encoder.sampling_rate, 5) - if wav is None: - return - self.ui.play(wav, encoder.sampling_rate) - - speaker_name = "user01" - name = speaker_name + "_rec_%05d" % np.random.randint(100000) - self.add_real_utterance(wav, name, speaker_name) - - def add_real_utterance(self, wav, name, speaker_name): - # Compute the mel spectrogram - spec = Synthesizer.make_spectrogram(wav) - self.ui.draw_spec(spec, "current") - - # Compute the embedding - if not encoder.is_loaded(): - self.init_encoder() - encoder_wav = encoder.preprocess_wav(wav) - embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) - - # Add the utterance - utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, False) - self.utterances.add(utterance) - self.ui.register_utterance(utterance) - - # Plot it - self.ui.draw_embed(embed, name, "current") - self.ui.draw_umap_projections(self.utterances) - - def clear_utterances(self): - self.utterances.clear() - self.ui.draw_umap_projections(self.utterances) - - def synthesize(self): - self.ui.log("Generating the mel spectrogram...") - self.ui.set_loading(1) - - # Update the synthesizer random seed - if self.ui.random_seed_checkbox.isChecked(): - seed = int(self.ui.seed_textbox.text()) - self.ui.populate_gen_options(seed, self.trim_silences) - else: - seed = None - - if seed is not None: - torch.manual_seed(seed) - - # Synthesize the spectrogram - if self.synthesizer is None or seed is not None: - self.init_synthesizer() - - texts = self.ui.text_prompt.toPlainText().split("\n") - embed = self.ui.selected_utterance.embed - embeds = [embed] * len(texts) - specs = self.synthesizer.synthesize_spectrograms(texts, embeds) - breaks = [spec.shape[1] for spec in specs] - spec = np.concatenate(specs, axis=1) - - self.ui.draw_spec(spec, "generated") - self.current_generated = (self.ui.selected_utterance.speaker_name, spec, breaks, None) - self.ui.set_loading(0) - - def vocode(self): - speaker_name, spec, breaks, _ = self.current_generated - assert spec is not None - - # Initialize the vocoder model and make it determinstic, if user provides a seed - if self.ui.random_seed_checkbox.isChecked(): - seed = int(self.ui.seed_textbox.text()) - self.ui.populate_gen_options(seed, self.trim_silences) - else: - seed = None - - if seed is not None: - torch.manual_seed(seed) - - # Synthesize the waveform - if not vocoder.is_loaded() or seed is not None: - self.init_vocoder() - - def vocoder_progress(i, seq_len, b_size, gen_rate): - real_time_factor = (gen_rate / Synthesizer.sample_rate) * 1000 - line = "Waveform generation: %d/%d (batch size: %d, rate: %.1fkHz - %.2fx real time)" \ - % (i * b_size, seq_len * b_size, b_size, gen_rate, real_time_factor) - self.ui.log(line, "overwrite") - self.ui.set_loading(i, seq_len) - if self.ui.current_vocoder_fpath is not None: - self.ui.log("") - wav = vocoder.infer_waveform(spec, progress_callback=vocoder_progress) - else: - self.ui.log("Waveform generation with Griffin-Lim... ") - wav = Synthesizer.griffin_lim(spec) - self.ui.set_loading(0) - self.ui.log(" Done!", "append") - - # Add breaks - b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size) - b_starts = np.concatenate(([0], b_ends[:-1])) - wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)] - breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks) - wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)]) - - # Trim excessive silences - if self.ui.trim_silences_checkbox.isChecked(): - wav = encoder.preprocess_wav(wav) - - # Play it - wav = wav / np.abs(wav).max() * 0.97 - self.ui.play(wav, Synthesizer.sample_rate) - - # Name it (history displayed in combobox) - # TODO better naming for the combobox items? - wav_name = str(self.waves_count + 1) - - #Update waves combobox - self.waves_count += 1 - if self.waves_count > MAX_WAVES: - self.waves_list.pop() - self.waves_namelist.pop() - self.waves_list.insert(0, wav) - self.waves_namelist.insert(0, wav_name) - - self.ui.waves_cb.disconnect() - self.ui.waves_cb_model.setStringList(self.waves_namelist) - self.ui.waves_cb.setCurrentIndex(0) - self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav) - - # Update current wav - self.set_current_wav(0) - - #Enable replay and save buttons: - self.ui.replay_wav_button.setDisabled(False) - self.ui.export_wav_button.setDisabled(False) - - # Compute the embedding - # TODO: this is problematic with different sampling rates, gotta fix it - if not encoder.is_loaded(): - self.init_encoder() - encoder_wav = encoder.preprocess_wav(wav) - embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) - - # Add the utterance - name = speaker_name + "_gen_%05d" % np.random.randint(100000) - utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, True) - self.utterances.add(utterance) - - # Plot it - self.ui.draw_embed(embed, name, "generated") - self.ui.draw_umap_projections(self.utterances) - - def init_encoder(self): - model_fpath = self.ui.current_encoder_fpath - - self.ui.log("Loading the encoder %s... " % model_fpath) - self.ui.set_loading(1) - start = timer() - encoder.load_model(model_fpath) - self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") - self.ui.set_loading(0) - - def init_synthesizer(self): - model_fpath = self.ui.current_synthesizer_fpath - - self.ui.log("Loading the synthesizer %s... " % model_fpath) - self.ui.set_loading(1) - start = timer() - self.synthesizer = Synthesizer(model_fpath) - self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") - self.ui.set_loading(0) - - def init_vocoder(self): - model_fpath = self.ui.current_vocoder_fpath - # Case of Griffin-lim - if model_fpath is None: - return - - self.ui.log("Loading the vocoder %s... " % model_fpath) - self.ui.set_loading(1) - start = timer() - vocoder.load_model(model_fpath) - self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") - self.ui.set_loading(0) - - def update_seed_textbox(self): - self.ui.update_seed_textbox() diff --git a/spaces/akhaliq/Real-Time-Voice-Cloning/utils/__init__.py b/spaces/akhaliq/Real-Time-Voice-Cloning/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akhaliq/Spleeter/README.md b/spaces/akhaliq/Spleeter/README.md deleted file mode 100644 index a24a5e84fe7788c6dbf7e11a0fdbf0348c305259..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Spleeter/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Spleeter -emoji: 💻 -colorFrom: purple -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Trainers/DistributedTrainer.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Trainers/DistributedTrainer.py deleted file mode 100644 index 3ae8bf565f151c8746033f7832a17e0e9ea0b6f3..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Trainers/DistributedTrainer.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -import os -import torch -from torch.utils.tensorboard import SummaryWriter -import random -import numpy as np - -from pkg_resources import parse_version -from model.third_party.HMNet.Models.Trainers.BaseTrainer import BaseTrainer -from model.third_party.HMNet.Utils.GeneralUtils import bcolors -from model.third_party.HMNet.Utils.distributed import distributed - - -class DistributedTrainer(BaseTrainer): - def __init__(self, opt): - super().__init__(opt) - - self.seed = int(self.opt["SEED"]) if "SEED" in self.opt else 0 - - random.seed(self.seed) - np.random.seed(self.seed) - torch.manual_seed(self.seed) - - ( - self.opt["device"], - _, - self.opt["world_size"], - self.opt["local_size"], - self.opt["rank"], - self.opt["local_rank"], - _, - self.opt["run"], - ) = distributed(opt, not self.use_cuda) - - self.getSaveFolder() - self.opt["logFile"] = f"log_{self.opt['rank']}.txt" - self.saveConf() - - self.high_pytorch_version = parse_version(torch.__version__) >= parse_version( - "1.2.0" - ) - if self.opt["rank"] == 0: - print( - bcolors.OKGREEN, - torch.__version__, - bcolors.ENDC, - "is", - "high" if self.high_pytorch_version else "low", - ) - - if self.use_cuda: - # torch.cuda.manual_seed_all(self.seed) - # ddp: only set seed on GPU associated with this process - torch.cuda.manual_seed(self.seed) - - # ddp: print stats and update learning rate - if self.opt["rank"] == 0: - print( - "Number of GPUs is", - bcolors.OKGREEN, - self.opt["world_size"], - bcolors.ENDC, - ) - # print('Boost learning rate from', bcolors.OKGREEN, self.opt['START_LEARNING_RATE'], bcolors.ENDC, 'to', - # bcolors.OKGREEN, self.opt['START_LEARNING_RATE'] * self.opt['world_size'], bcolors.ENDC) - print( - "Effective batch size is increased from", - bcolors.OKGREEN, - self.opt["MINI_BATCH"], - bcolors.ENDC, - "to", - bcolors.OKGREEN, - self.opt["MINI_BATCH"] * self.opt["world_size"], - bcolors.ENDC, - ) - - self.grad_acc_steps = 1 - if "GRADIENT_ACCUMULATE_STEP" in self.opt: - if self.opt["rank"] == 0: - print( - "Gradient accumulation steps =", - bcolors.OKGREEN, - self.opt["GRADIENT_ACCUMULATE_STEP"], - bcolors.ENDC, - ) - # print('Boost learning rate from', bcolors.OKGREEN, self.opt['START_LEARNING_RATE'], bcolors.ENDC, 'to', - # bcolors.OKGREEN, self.opt['START_LEARNING_RATE'] * self.opt['world_size'] * self.opt['GRADIENT_ACCUMULATE_STEP'], bcolors.ENDC) - print( - "Effective batch size =", - bcolors.OKGREEN, - self.opt["MINI_BATCH"] - * self.opt["world_size"] - * self.opt["GRADIENT_ACCUMULATE_STEP"], - bcolors.ENDC, - ) - self.grad_acc_steps = int(self.opt["GRADIENT_ACCUMULATE_STEP"]) - # self.opt['START_LEARNING_RATE'] *= self.opt['world_size'] * self.grad_acc_steps - - def tb_log_scalar(self, name, value, step): - if self.opt["rank"] == 0: - if self.tb_writer is None: - self.tb_writer = SummaryWriter( - os.path.join(self.saveFolder, "tensorboard") - ) - self.tb_writer.add_scalar(name, value, step) - - def log(self, s): - # When 'OFFICIAL' flag is set in the config file, the program does not output logs - if self.is_official: - return - try: - if self.logFileHandle is None: - self.logFileHandle = open( - os.path.join(self.saveFolder, self.opt["logFile"]), "a" - ) - self.logFileHandle.write(s + "\n") - except Exception as e: - print("ERROR while writing log file:", e) - print(s) - - def getSaveFolder(self): - runid = 1 - while True: - saveFolder = os.path.join( - self.opt["datadir"], - self.opt["basename"] + "_conf~", - "run_" + str(runid), - ) - if not os.path.isdir(saveFolder): - if self.opt["world_size"] > 1: - torch.distributed.barrier() - if self.opt["rank"] == 0: - os.makedirs(saveFolder) - self.saveFolder = saveFolder - if self.opt["world_size"] > 1: - torch.distributed.barrier() - print( - "Saving logs, model, checkpoint, and evaluation in " - + self.saveFolder - ) - return - runid = runid + 1 - - def saveConf(self): - if self.opt["rank"] == 0: - super().saveConf() diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/libritts/voc1/path.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/libritts/voc1/path.sh deleted file mode 100644 index b0ca27c615f70aa29e240222ec370f8ad4e7b45a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/libritts/voc1/path.sh +++ /dev/null @@ -1,33 +0,0 @@ -# cuda related -export CUDA_HOME=/usr/local/cuda-10.0 -export LD_LIBRARY_PATH="${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}" - -# path related -export PRJ_ROOT="${PWD}/../../.." -if [ -e "${PRJ_ROOT}/tools/venv/bin/activate" ]; then - # shellcheck disable=SC1090 - . "${PRJ_ROOT}/tools/venv/bin/activate" -fi - -# python related -export OMP_NUM_THREADS=1 -export PYTHONIOENCODING=UTF-8 -export MPL_BACKEND=Agg - -# check installation -if ! command -v parallel-wavegan-train > /dev/null; then - echo "Error: It seems setup is not finished." >&2 - echo "Error: Please setup your environment by following README.md" >&2 - return 1 -fi -if ! command -v jq > /dev/null; then - echo "Error: It seems jq is not installed." >&2 - echo "Error: Please install via \`sudo apt-get install jq\`." >&2 - echo "Error: If you do not have sudo, please download from https://stedolan.github.io/jq/download/." >&2 - return 1 -fi -if ! command -v yq > /dev/null; then - echo "Error: It seems yq is not installed." >&2 - echo "Error: Please install via \`pip install yq\`." >&2 - return 1 -fi diff --git a/spaces/akhaliq/deeplab2/model/layers/dual_path_transformer.py b/spaces/akhaliq/deeplab2/model/layers/dual_path_transformer.py deleted file mode 100644 index 806db522ac2ece7304d7d4fb481d85274614e580..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/model/layers/dual_path_transformer.py +++ /dev/null @@ -1,488 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements dual path transformer layers proposed in MaX-DeepLab [1]. - -Dual-path transformer introduces a global memory path in addition to a CNN path, -allowing bi-directional communication with any CNN layers. - -[1] MaX-DeepLab: End-to-End Panoptic Segmentation with Mask Transformers, - CVPR 2021. - Huiyu Wang, Yukun Zhu, Hartwig Adam, Alan Yuille, Liang-Chieh Chen. -""" - -import tensorflow as tf - -from deeplab2.model import utils -from deeplab2.model.layers import activations -from deeplab2.model.layers import convolutions - - -class AttentionOperation(tf.keras.layers.Layer): - """Computes standard 1D multi-head attention with query, key, and value.""" - - def __init__(self, - name, - activation, - transformer_activation, - bn_layer=tf.keras.layers.BatchNormalization): - """Initializes an AttentionOperation layer. - - Args: - name: A string, the name of this layer. - activation: A string, type of activation function to apply. - transformer_activation: A string, type of activation function for - self-attention. Support 'sigmoid' and 'softmax'. - bn_layer: An optional tf.keras.layers.Layer that computes the - normalization (default: tf.keras.layers.BatchNormalization). - """ - super(AttentionOperation, self).__init__(name=name) - # batch_norm_similarity has shape [batch, num_heads, num_query, num_key], - # where num_query and num_key usually equals to height or width or length, - # i.e., spatial dimensions, so batch norm is applied to axis=1 only. - self._batch_norm_similarity = bn_layer(axis=1, name='batch_norm_similarity') - # batch_norm_retrieved_value is done on shape [batch, num_heads, length, - # value_channels], which will be reshaped to the output shape [batch, - # length, value_channels * num_heads], so we apply batch norm on the - # effective channel dimension -- value_channels * num_heads. - self._batch_norm_retrieved_value = bn_layer( - axis=[1, 3], name='batch_norm_retrieved_value') - self._activation_fn = activations.get_activation(activation) - self._transformer_activation_fn = activations.get_activation( - transformer_activation) - - def call(self, inputs, training=False): - """Performs an AttentionOperation. - - Args: - inputs: A tuple of (query, key, value), where query is [batch, num_head, - query_length, channels] tensor, key is a [batch, num_head, key_length, - channels] tensor, and value is a [batch, key_length, num_head, - value_channels] tensor. - training: A boolean, whether the model is in training mode. - - Returns: - output: A [batch, query_length, num_head * value_channels] tensor, the - retrieved value. - """ - # Decode query, key, and value from inputs. - query, key, value = inputs - # Compute attention similarity. - similarity_logits = tf.einsum('bhld,bhmd->bhlm', query, key) - similarity_logits = self._batch_norm_similarity( - similarity_logits, training=training) - # Apply a transformer attention activation function, e.g. softmax. - attention_weights = self._transformer_activation_fn(similarity_logits) - # Retrieve the value content. - retrieved_value = tf.einsum( - 'bhlm,bmhd->bhld', attention_weights, value) - retrieved_value = self._batch_norm_retrieved_value( - retrieved_value, training=training) - retrieved_value = self._activation_fn(retrieved_value) - # Reshape the output. - return utils.transpose_and_reshape_for_attention_operation( - retrieved_value) - - -class DualPathTransformerLayer(tf.keras.layers.Layer): - """Applies a dual path transformer layer, as proposed in MaX-DeepLab [1]. - - Dual-path transformer layer takes a pixel space input and a memory space - input, and performs memory2pixel attention, pixel2memory attention, and - memory2memory self-attention. Note that the pixel2pixel self-attention or - convolution in the pixel space is implemented in axial_layers.py and - axial_blocks.py. Thus, the pixel2pixel operation is not included in this - DualPathTransformerLayer implementation. Please use this class together with - a residual block with axial-attention, global-attention, or convolution in - order to construct the full dual path transformer in the paper. - - [1] MaX-DeepLab: End-to-End Panoptic Segmentation with Mask Transformers, - CVPR 2021. - Huiyu Wang, Yukun Zhu, Hartwig Adam, Alan Yuille, Liang-Chieh Chen. - """ - - def __init__(self, - name='dual_path_transformer_layer', - activation='relu', - filters=128, - num_heads=8, - bottleneck_expansion=2, - key_expansion=1, - value_expansion=2, - feed_forward_network_channels=2048, - use_memory_self_attention=True, - use_pixel2memory_feedback_attention=True, - transformer_activation='softmax', - bn_layer=tf.keras.layers.BatchNormalization, - conv_kernel_weight_decay=0.0): - """Initializes a DualPathTransformerLayer. - - This function implements a dual path transformer layer between a pixel space - and a memory space, as described in the MaX-DeepLab paper. In this dual path - transformer, the memory2pixel cross attention and the memory self-attention - share a single activation, e.g. softmax. - - Reference: - MaX-DeepLab: "End-to-End Panoptic Segmentation with Mask Transformers", - CVPR 2021. https://arxiv.org/abs/2012.00759 - Huiyu Wang, Yukun Zhu, Hartwig Adam, Alan Yuille, Liang-Chieh Chen. - - Args: - name: A string, the name of this dual path transformer layer. - activation: A string, type of activation function to apply. - filters: An integer, the base number of channels for the layer. - num_heads: An integer, the number of heads in multi-head attention. - bottleneck_expansion: A float, the channel expansion ratio for the - bottleneck. - key_expansion: A float, the channel expansion ratio for keys. - value_expansion: A float, the channel expansion ratio for values. - feed_forward_network_channels: An integer, the number of channels for the - feed_forward_network. Zero means no feed_forward_network will be - applied. - use_memory_self_attention: A boolean, whether to apply the memory space - self-attention. - use_pixel2memory_feedback_attention: A boolean, whether to apply the - pixel2memory feedback attention. - transformer_activation: A string, type of activation function for - self-attention. Support 'sigmoid' and 'softmax'. - bn_layer: A tf.keras.layers.Layer that computes the normalization - (default: tf.keras.layers.BatchNormalization). - conv_kernel_weight_decay: A float, the weight decay for convolution - kernels. - - Raises: - ValueError: If filters * key_expansion is not divisible by num_heads. - ValueError: If filters * value_expansion is not divisible by num_heads. - """ - super(DualPathTransformerLayer, self).__init__(name=name) - - bottleneck_channels = int(round(filters * bottleneck_expansion)) - total_key_depth = int(round(filters * key_expansion)) - total_value_depth = int(round(filters * value_expansion)) - - if total_key_depth % num_heads: - raise ValueError('Total_key_depth should be divisible by num_heads.') - - if total_value_depth % num_heads: - raise ValueError('Total_value_depth should be divisible by num_heads.') - - # Compute query key value with one convolution and a batch norm layer. The - # initialization std is standard transformer initialization (without batch - # norm), as used in SASA and ViT. In our case, we use batch norm by default, - # so it does not require careful tuning. If one wants to remove all batch - # norms in axial attention, this standard initialization should still be - # good, but a more careful initialization is encouraged. - initialization_std = bottleneck_channels ** -0.5 - - self._memory_conv1_bn_act = convolutions.Conv1D( - bottleneck_channels, 'memory_conv1_bn_act', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation=activation, - conv_kernel_weight_decay=conv_kernel_weight_decay) - - self._pixel_conv1_bn_act = convolutions.Conv1D( - bottleneck_channels, 'pixel_conv1_bn_act', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation=activation, - conv_kernel_weight_decay=conv_kernel_weight_decay) - - # We always compute the query for memory space, since it gathers information - # from the pixel space and thus cannot be removed. We compute the key and - # value for memory space only when they are necessary (i.e. either - # use_memory_self_attention or use_pixel2memory_feedback_attention). - if use_memory_self_attention or use_pixel2memory_feedback_attention: - self._memory_qkv_conv_bn = convolutions.Conv1D( - total_key_depth * 2 + total_value_depth, 'memory_qkv_conv_bn', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation='none', - conv_kernel_weight_decay=conv_kernel_weight_decay, - kernel_initializer=tf.keras.initializers.TruncatedNormal( - stddev=initialization_std)) - else: - # Compute memory query only if memory key and value are not used. - self._memory_query_conv_bn = convolutions.Conv1D( - total_key_depth, 'memory_query_conv_bn', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation='none', - conv_kernel_weight_decay=conv_kernel_weight_decay, - kernel_initializer=tf.keras.initializers.TruncatedNormal( - stddev=initialization_std)) - - # For the pixel space, we always compute the key and value, since they - # provide information for the memory space and thus cannot be removed. We - # compute the query for pixel space only when it is necessary (i.e. - # use_pixel2memory_feedback_attention is True). - if use_pixel2memory_feedback_attention: - self._pixel_qkv_conv_bn = convolutions.Conv1D( - total_key_depth * 2 + total_value_depth, 'pixel_qkv_conv_bn', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation='none', - conv_kernel_weight_decay=conv_kernel_weight_decay, - kernel_initializer=tf.keras.initializers.TruncatedNormal( - stddev=initialization_std)) - else: - self._pixel_kv_conv_bn = convolutions.Conv1D( - total_key_depth + total_value_depth, 'pixel_kv_conv_bn', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation='none', - conv_kernel_weight_decay=conv_kernel_weight_decay, - kernel_initializer=tf.keras.initializers.TruncatedNormal( - stddev=initialization_std)) - self._memory_attention = AttentionOperation( - 'memory_attention', activation, transformer_activation, - bn_layer=bn_layer) - if use_pixel2memory_feedback_attention: - self._pixel_attention = AttentionOperation( - 'pixel_attention', activation, transformer_activation, - bn_layer=bn_layer) - - self._use_memory_self_attention = use_memory_self_attention - self._use_pixel2memory_feedback_attention = ( - use_pixel2memory_feedback_attention) - self._total_key_depth = total_key_depth - self._total_value_depth = total_value_depth - self._num_heads = num_heads - self._bn_layer = bn_layer - self._conv_kernel_weight_decay = conv_kernel_weight_decay - self._activation = activation - self._activation_fn = activations.get_activation(activation) - self._feed_forward_network_channels = feed_forward_network_channels - - def build(self, input_shape_list): - pixel_shape, memory_shape = input_shape_list[:2] - # Here we follow ResNet bottleneck blocks: we apply a batch norm with gamma - # initialized at zero, followed by drop path and an activation function. - # Initializing this gamma at zero ensures that at random initialization of - # the model, the skip connections dominate all residual blocks. In this way, - # all the skip connections construct an identity mapping that passes the - # gradients (without any distortion from the randomly initialized blocks) to - # all residual blocks. This helps training at early epochs. - # Reference: "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour". - # https://arxiv.org/abs/1706.02677 - self._memory_conv3_bn = convolutions.Conv1D( - memory_shape[-1], 'memory_conv3_bn', - use_bias=False, - use_bn=True, - bn_layer=self._bn_layer, - bn_gamma_initializer='zeros', - activation='none', - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - - if self._feed_forward_network_channels > 0: - self._memory_ffn_conv1_bn_act = convolutions.Conv1D( - self._feed_forward_network_channels, 'memory_ffn_conv1_bn_act', - use_bias=False, - use_bn=True, - bn_layer=self._bn_layer, - activation=self._activation, - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - # Again, we follow ResNet bottleneck blocks: we apply a batch norm with - # gamma initialized at zero, followed by drop path and an activation - # function. - self._memory_ffn_conv2_bn = convolutions.Conv1D( - memory_shape[-1], 'memory_ffn_conv2_bn', - use_bias=False, - use_bn=True, - bn_layer=self._bn_layer, - bn_gamma_initializer='zeros', - activation='none', - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - if self._use_pixel2memory_feedback_attention: - self._pixel_conv3_bn = convolutions.Conv1D( - pixel_shape[-1], 'pixel_conv3_bn', - use_bias=False, - use_bn=True, - bn_layer=self._bn_layer, - bn_gamma_initializer='zeros', - activation='none', - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - - def call(self, inputs): - """Performs a forward pass. - - We have to define drop_path_masks outside the layer call and pass it into - the layer call, because recompute_grad (gradient checkpointing) does not - allow any randomness within the function call. In addition, recompute_grad - only supports float tensors as inputs. For this reason, the training flag - should be also passed as a float tensor. For the same reason, we cannot - support passing drop_path_random_mask as None. Instead, we ask the users to - pass only the first two tensors when drop path is not used. - - Args: - inputs: A tuple of 3 or 6 tensors, containing - pixel_space_input should be a [batch, num_pixel, pixel_space_channels] - tensor. - memory_space_input should be a [batch, num_memory, - memory_space_channels] tensor. - float_tensor_training should be a float tensor of 0.0 or 1.0, whether - the model is in training mode. - (optional) pixel_space_drop_path_mask is a drop path mask tensor of - shape [batch, 1, 1] for the pixel space. - (optional) memory_space_attention_drop_path_mask is a drop path mask - tensor of shape [batch, 1, 1] for the memory space. - (optional) memory_space_feed_forward_network_drop_path_mask is a drop - path mask tensor of shape [batch, 1, 1] for the memory space feed - forward network. - - Returns: - pixel_space_output: A [batch, num_pixel, pixel_space_channels] tensor. - activated_pixel_space_output: A [batch, num_pixel, pixel_space_channels] - tensor, activated pixel_space_output. - memory_space_output: A [batch, num_memory, memory_space_channels] - tensor. - - Raises: - ValueError: If the length of inputs is not 3 or 6. - """ - if len(inputs) not in (3, 6): - raise ValueError('The length of inputs should be either 3 or 6.') - - # Unpack the inputs. - (pixel_space_input, memory_space_input, float_tensor_training, - pixel_space_drop_path_mask, memory_space_attention_drop_path_mask, - memory_space_feed_forward_network_drop_path_mask) = ( - utils.pad_sequence_with_none(inputs, target_length=6)) - - # Recompute_grad takes only float tensors as inputs. It does not allow - # bools or boolean tensors. For this reason, we cast training to a float - # tensor outside this call, and now we cast it back to a boolean tensor. - training = tf.cast(float_tensor_training, tf.bool) - - # Decode the inputs shapes. - pixel_shape = pixel_space_input.get_shape().as_list() - memory_shape = memory_space_input.get_shape().as_list() - - # Similar to the ResNet bottleneck design, we do an input down projection - # in both the pixel space and the memory space. - memory_space = self._memory_conv1_bn_act(memory_space_input, - training=training) - - # Pixel space input is not activated. - pixel_space = self._pixel_conv1_bn_act( - self._activation_fn(pixel_space_input), training=training) - - if (self._use_memory_self_attention or - self._use_pixel2memory_feedback_attention): - memory_space_qkv = self._memory_qkv_conv_bn(memory_space, - training=training) - # Split, reshape, and transpose the query, key, and value. - memory_query, memory_key, memory_value = ( - tf.split(memory_space_qkv, [ - self._total_key_depth, self._total_key_depth, - self._total_value_depth], axis=-1)) - memory_key = utils.reshape_and_transpose_for_attention_operation( - memory_key, self._num_heads) - memory_value = tf.reshape(memory_value, [ - -1, memory_shape[1], self._num_heads, - self._total_value_depth // self._num_heads]) - else: - # Compute memory query only if memory key and value are not used. - memory_query = self._memory_query_conv_bn(memory_space, - training=training) - # Reshape and transpose the query. - memory_query = utils.reshape_and_transpose_for_attention_operation( - memory_query, self._num_heads) - - if self._use_pixel2memory_feedback_attention: - pixel_space_qkv = self._pixel_qkv_conv_bn(pixel_space, - training=training) - # Split the query, key, and value. - pixel_query, pixel_key, pixel_value = tf.split( - pixel_space_qkv, [ - self._total_key_depth, self._total_key_depth, - self._total_value_depth], axis=-1) - pixel_query = utils.reshape_and_transpose_for_attention_operation( - pixel_query, self._num_heads) - else: - pixel_space_kv = self._pixel_kv_conv_bn(pixel_space, training=training) - # Split the key and the value. - pixel_key, pixel_value = tf.split(pixel_space_kv, [ - self._total_key_depth, self._total_value_depth], axis=-1) - # Reshape and transpose the key and the value. - pixel_key = utils.reshape_and_transpose_for_attention_operation( - pixel_key, self._num_heads) - pixel_value = tf.reshape(pixel_value, [ - -1, pixel_shape[1], self._num_heads, - self._total_value_depth // self._num_heads]) - - # Compute memory space attention. - if not self._use_memory_self_attention: - # If memory self attention is not used, then only memory2pixel cross - # attention is used for the memory space. In this case, the key and the - # value are simply pixel_key and pixel_value. - memory_attention_key = pixel_key - memory_attention_value = pixel_value - else: - # If we also use memory self attention, the key and the value are the - # concatenation of keys and values in both the pixel space and the - # memory space. - memory_attention_key = tf.concat([pixel_key, memory_key], axis=2) - memory_attention_value = tf.concat([pixel_value, memory_value], axis=1) - - memory_space = self._memory_attention( - (memory_query, memory_attention_key, memory_attention_value), - training=training) - memory_space = self._memory_conv3_bn(memory_space, training=training) - - if memory_space_attention_drop_path_mask is not None: - memory_space = memory_space * memory_space_attention_drop_path_mask - memory_space_output = self._activation_fn( - memory_space_input + memory_space) - - # Apply an optional feed-forward network to the memory space. - if self._feed_forward_network_channels > 0: - memory_space = self._memory_ffn_conv1_bn_act(memory_space_output, - training=training) - memory_space = self._memory_ffn_conv2_bn(memory_space, - training=training) - if memory_space_feed_forward_network_drop_path_mask is not None: - memory_space = (memory_space * - memory_space_feed_forward_network_drop_path_mask) - memory_space_output = self._activation_fn( - memory_space_output + memory_space) - - # Compute pixel space attention and the output projection only when - # pixel2memory_feedback_attention is used. - if self._use_pixel2memory_feedback_attention: - pixel_space = self._pixel_attention( - (pixel_query, memory_key, memory_value), training=training) - pixel_space = self._pixel_conv3_bn(pixel_space, training=training) - if pixel_space_drop_path_mask is not None: - pixel_space = pixel_space * pixel_space_drop_path_mask - pixel_space_output = pixel_space_input + pixel_space - else: - # If pixel2memory_feedback_attention is not used, the pixel_space_input - # is not changed. - pixel_space_output = pixel_space_input - activated_pixel_space_output = self._activation_fn(pixel_space_output) - - # Return the pixel space output and memory space output. Note that we - # return pixel sapce output with and without the activation function, - # because our decoder might use non-activated features. - return (pixel_space_output, - activated_pixel_space_output, - memory_space_output) diff --git a/spaces/akhaliq/deeplab2/model/utils_test.py b/spaces/akhaliq/deeplab2/model/utils_test.py deleted file mode 100644 index 1f3848148a8d5eb447c15ae45b5d883d240b6a8f..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/model/utils_test.py +++ /dev/null @@ -1,201 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for utils.""" - -import itertools - -import numpy as np -import tensorflow as tf - -from deeplab2.model import utils - - -class UtilsTest(tf.test.TestCase): - - def test_resize_logits_graph_mode(self): - @tf.function - def graph_mode_wrapper(*args): - return utils.resize_and_rescale_offsets(*args) - - resized_logits = graph_mode_wrapper(tf.ones((2, 33, 33, 2)), [65, 65]) - resized_logits_2 = graph_mode_wrapper(tf.ones((2, 33, 33, 2)), [33, 33]) - self.assertListEqual(resized_logits.shape.as_list(), [2, 65, 65, 2]) - self.assertListEqual(resized_logits_2.shape.as_list(), [2, 33, 33, 2]) - - def test_resize_logits(self): - offset_logits = tf.convert_to_tensor([[[[2, 2], [2, 1], [2, 0]], - [[1, 2], [1, 1], [1, 0]], - [[0, 2], [0, 1], [0, 0]]]], - dtype=tf.float32) - target_size = [5, 5] - resized_logits = utils.resize_and_rescale_offsets(offset_logits, - target_size) - - self.assertListEqual(resized_logits.shape.as_list(), [1, 5, 5, 2]) - for i in range(5): - for j in range(5): - np.testing.assert_array_almost_equal(resized_logits.numpy()[0, i, j, :], - [4 - i, 4 - j]) - - def test_zero_padding(self): - input_tensor = tf.ones(shape=(2, 5, 5, 2)) - input_tensor_2 = tf.ones(shape=(5, 5, 2)) - padded_tensor = utils.add_zero_padding(input_tensor, kernel_size=5, rank=4) - padded_tensor_2 = utils.add_zero_padding( - input_tensor_2, kernel_size=5, rank=3) - - self.assertEqual(tf.reduce_sum(padded_tensor), 100) - self.assertEqual(tf.reduce_sum(padded_tensor_2), 50) - self.assertListEqual(padded_tensor.shape.as_list(), [2, 9, 9, 2]) - self.assertListEqual(padded_tensor_2.shape.as_list(), [9, 9, 2]) - # Count zero elements. - self.assertEqual(tf.reduce_sum(padded_tensor-1), -224) - self.assertEqual(tf.reduce_sum(padded_tensor_2-1), -112) - - def test_resize_function_error(self): - input_tensor = tf.random.uniform(shape=(2, 10, 10, 2)) - with self.assertRaises(ValueError): - _ = utils.resize_align_corners(input_tensor, [19, 19], - method='not_a_valid_method') - - def test_resize_function_shape(self): - input_tensor = tf.random.uniform(shape=(2, 10, 10, 2)) - result_tensor = utils.resize_align_corners(input_tensor, [19, 19]) - - self.assertListEqual(result_tensor.shape.as_list(), [2, 19, 19, 2]) - - def test_resize_graph_mode(self): - @tf.function - def graph_mode_wrapper(*args): - return utils.resize_align_corners(*args) - - result_tensor = graph_mode_wrapper(tf.ones((2, 33, 33, 2)), [65, 65]) - result_tensor_2 = graph_mode_wrapper(tf.ones((2, 33, 33, 2)), [33, 33]) - self.assertListEqual(result_tensor.shape.as_list(), [2, 65, 65, 2]) - self.assertListEqual(result_tensor_2.shape.as_list(), [2, 33, 33, 2]) - - def test_resize_function_constant_input(self): - input_tensor = tf.ones(shape=(2, 10, 10, 2)) - result_tensor = utils.resize_align_corners(input_tensor, [19, 19]) - - self.assertTrue(tf.keras.backend.all(result_tensor == 1)) - - def test_resize_function_invalid_rank(self): - input_tensor = tf.keras.Input(shape=(None, 2)) - with self.assertRaisesRegex( - ValueError, 'should have rank of 4'): - _ = utils.resize_align_corners(input_tensor, [19, 19]) - - def test_resize_function_v1_compatibility(self): - # Test for odd and even input, and output shapes. - input_shapes = [(2, 10, 10, 3), (2, 11, 11, 3)] - target_sizes = [[19, 19], [20, 20]] - methods = ['bilinear', 'nearest'] - - for shape, target_size, method in itertools.product(input_shapes, - target_sizes, methods): - input_tensor = tf.random.uniform(shape=shape) - - result_tensor = utils.resize_align_corners(input_tensor, target_size, - method) - if method == 'bilinear': - expected_tensor = tf.compat.v1.image.resize( - input_tensor, - target_size, - align_corners=True, - method=tf.compat.v1.image.ResizeMethod.BILINEAR) - else: - expected_tensor = tf.compat.v1.image.resize( - input_tensor, - target_size, - align_corners=True, - method=tf.compat.v1.image.ResizeMethod.NEAREST_NEIGHBOR) - - np.testing.assert_equal(result_tensor.numpy(), expected_tensor.numpy()) - - def test_resize_bilinear_v1_compatibility(self): - # Test for odd and even input, and output shapes. - input_shapes = [(2, 10, 10, 3), (2, 11, 11, 3), (1, 11, 11, 64)] - target_sizes = [[19, 19], [20, 20], [10, 10]] - - for shape, target_size in itertools.product(input_shapes, target_sizes): - input_tensor = tf.random.uniform(shape=shape) - result_tensor = utils.resize_bilinear(input_tensor, target_size) - expected_tensor = tf.compat.v1.image.resize( - input_tensor, - target_size, - align_corners=True, - method=tf.compat.v1.image.ResizeMethod.BILINEAR) - self.assertAllClose(result_tensor, expected_tensor) - - def test_make_divisible(self): - value, divisor, min_value = 17, 2, 8 - new_value = utils.make_divisible(value, divisor, min_value) - self.assertAllEqual(new_value, 18) - - value, divisor, min_value = 17, 2, 22 - new_value = utils.make_divisible(value, divisor, min_value) - self.assertAllEqual(new_value, 22) - - def test_transpose_and_reshape_for_attention_operation(self): - images = tf.zeros([2, 8, 11, 2]) - output = utils.transpose_and_reshape_for_attention_operation(images) - self.assertEqual(output.get_shape().as_list(), [2, 11, 16]) - - def test_reshape_and_transpose_for_attention_operation(self): - images = tf.zeros([2, 11, 16]) - output = utils.reshape_and_transpose_for_attention_operation(images, - num_heads=8) - self.assertEqual(output.get_shape().as_list(), [2, 8, 11, 2]) - - def test_safe_setattr_raise_error(self): - layer = tf.keras.layers.Conv2D(1, 1) - with self.assertRaises(ValueError): - utils.safe_setattr(layer, 'filters', 3) - - utils.safe_setattr(layer, 'another_conv', tf.keras.layers.Conv2D(1, 1)) - with self.assertRaises(ValueError): - utils.safe_setattr(layer, 'another_conv', tf.keras.layers.Conv2D(1, 1)) - - def test_pad_sequence_with_none(self): - sequence = [1, 2] - output_2 = utils.pad_sequence_with_none(sequence, target_length=2) - self.assertEqual(output_2, [1, 2]) - output_3 = utils.pad_sequence_with_none(sequence, target_length=3) - self.assertEqual(output_3, [1, 2, None]) - - def test_strided_downsample(self): - inputs = tf.zeros([2, 11, 11]) - output = utils.strided_downsample(inputs, target_size=[6, 6]) - self.assertEqual(output.get_shape().as_list(), [2, 6, 6]) - - def test_get_stuff_class_ids(self): - # num_thing_stuff_classes does not include `void` class. - num_thing_stuff_classes = 5 - thing_class_ids = [3, 4] - void_label_list = [5, 0] - expected_stuff_class_ids_list = [ - [0, 1, 2], [1, 2, 5] - ] - for void_label, expected_stuff_class_ids in zip( - void_label_list, expected_stuff_class_ids_list): - stuff_class_ids = utils.get_stuff_class_ids( - num_thing_stuff_classes, thing_class_ids, void_label) - np.testing.assert_equal(stuff_class_ids, - expected_stuff_class_ids) - -if __name__ == '__main__': - tf.test.main() diff --git a/spaces/akhaliq/lama/saicinpainting/evaluation/losses/fid/inception.py b/spaces/akhaliq/lama/saicinpainting/evaluation/losses/fid/inception.py deleted file mode 100644 index e9bd0863b457aaa40c770eaa4acbb142b18fc18b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/saicinpainting/evaluation/losses/fid/inception.py +++ /dev/null @@ -1,323 +0,0 @@ -import logging - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision import models - -try: - from torchvision.models.utils import load_state_dict_from_url -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url - -# Inception weights ported to Pytorch from -# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz -FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' - - -LOGGER = logging.getLogger(__name__) - - -class InceptionV3(nn.Module): - """Pretrained InceptionV3 network returning feature maps""" - - # Index of default block of inception to return, - # corresponds to output of final average pooling - DEFAULT_BLOCK_INDEX = 3 - - # Maps feature dimensionality to their output blocks indices - BLOCK_INDEX_BY_DIM = { - 64: 0, # First max pooling features - 192: 1, # Second max pooling featurs - 768: 2, # Pre-aux classifier features - 2048: 3 # Final average pooling features - } - - def __init__(self, - output_blocks=[DEFAULT_BLOCK_INDEX], - resize_input=True, - normalize_input=True, - requires_grad=False, - use_fid_inception=True): - """Build pretrained InceptionV3 - - Parameters - ---------- - output_blocks : list of int - Indices of blocks to return features of. Possible values are: - - 0: corresponds to output of first max pooling - - 1: corresponds to output of second max pooling - - 2: corresponds to output which is fed to aux classifier - - 3: corresponds to output of final average pooling - resize_input : bool - If true, bilinearly resizes input to width and height 299 before - feeding input to model. As the network without fully connected - layers is fully convolutional, it should be able to handle inputs - of arbitrary size, so resizing might not be strictly needed - normalize_input : bool - If true, scales the input from range (0, 1) to the range the - pretrained Inception network expects, namely (-1, 1) - requires_grad : bool - If true, parameters of the model require gradients. Possibly useful - for finetuning the network - use_fid_inception : bool - If true, uses the pretrained Inception model used in Tensorflow's - FID implementation. If false, uses the pretrained Inception model - available in torchvision. The FID Inception model has different - weights and a slightly different structure from torchvision's - Inception model. If you want to compute FID scores, you are - strongly advised to set this parameter to true to get comparable - results. - """ - super(InceptionV3, self).__init__() - - self.resize_input = resize_input - self.normalize_input = normalize_input - self.output_blocks = sorted(output_blocks) - self.last_needed_block = max(output_blocks) - - assert self.last_needed_block <= 3, \ - 'Last possible output block index is 3' - - self.blocks = nn.ModuleList() - - if use_fid_inception: - inception = fid_inception_v3() - else: - inception = models.inception_v3(pretrained=True) - - # Block 0: input to maxpool1 - block0 = [ - inception.Conv2d_1a_3x3, - inception.Conv2d_2a_3x3, - inception.Conv2d_2b_3x3, - nn.MaxPool2d(kernel_size=3, stride=2) - ] - self.blocks.append(nn.Sequential(*block0)) - - # Block 1: maxpool1 to maxpool2 - if self.last_needed_block >= 1: - block1 = [ - inception.Conv2d_3b_1x1, - inception.Conv2d_4a_3x3, - nn.MaxPool2d(kernel_size=3, stride=2) - ] - self.blocks.append(nn.Sequential(*block1)) - - # Block 2: maxpool2 to aux classifier - if self.last_needed_block >= 2: - block2 = [ - inception.Mixed_5b, - inception.Mixed_5c, - inception.Mixed_5d, - inception.Mixed_6a, - inception.Mixed_6b, - inception.Mixed_6c, - inception.Mixed_6d, - inception.Mixed_6e, - ] - self.blocks.append(nn.Sequential(*block2)) - - # Block 3: aux classifier to final avgpool - if self.last_needed_block >= 3: - block3 = [ - inception.Mixed_7a, - inception.Mixed_7b, - inception.Mixed_7c, - nn.AdaptiveAvgPool2d(output_size=(1, 1)) - ] - self.blocks.append(nn.Sequential(*block3)) - - for param in self.parameters(): - param.requires_grad = requires_grad - - def forward(self, inp): - """Get Inception feature maps - - Parameters - ---------- - inp : torch.autograd.Variable - Input tensor of shape Bx3xHxW. Values are expected to be in - range (0, 1) - - Returns - ------- - List of torch.autograd.Variable, corresponding to the selected output - block, sorted ascending by index - """ - outp = [] - x = inp - - if self.resize_input: - x = F.interpolate(x, - size=(299, 299), - mode='bilinear', - align_corners=False) - - if self.normalize_input: - x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) - - for idx, block in enumerate(self.blocks): - x = block(x) - if idx in self.output_blocks: - outp.append(x) - - if idx == self.last_needed_block: - break - - return outp - - -def fid_inception_v3(): - """Build pretrained Inception model for FID computation - - The Inception model for FID computation uses a different set of weights - and has a slightly different structure than torchvision's Inception. - - This method first constructs torchvision's Inception and then patches the - necessary parts that are different in the FID Inception model. - """ - LOGGER.info('fid_inception_v3 called') - inception = models.inception_v3(num_classes=1008, - aux_logits=False, - pretrained=False) - LOGGER.info('models.inception_v3 done') - inception.Mixed_5b = FIDInceptionA(192, pool_features=32) - inception.Mixed_5c = FIDInceptionA(256, pool_features=64) - inception.Mixed_5d = FIDInceptionA(288, pool_features=64) - inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) - inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) - inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) - inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) - inception.Mixed_7b = FIDInceptionE_1(1280) - inception.Mixed_7c = FIDInceptionE_2(2048) - - LOGGER.info('fid_inception_v3 patching done') - - state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) - LOGGER.info('fid_inception_v3 weights downloaded') - - inception.load_state_dict(state_dict) - LOGGER.info('fid_inception_v3 weights loaded into model') - - return inception - - -class FIDInceptionA(models.inception.InceptionA): - """InceptionA block patched for FID computation""" - def __init__(self, in_channels, pool_features): - super(FIDInceptionA, self).__init__(in_channels, pool_features) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch5x5 = self.branch5x5_1(x) - branch5x5 = self.branch5x5_2(branch5x5) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionC(models.inception.InceptionC): - """InceptionC block patched for FID computation""" - def __init__(self, in_channels, channels_7x7): - super(FIDInceptionC, self).__init__(in_channels, channels_7x7) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch7x7 = self.branch7x7_1(x) - branch7x7 = self.branch7x7_2(branch7x7) - branch7x7 = self.branch7x7_3(branch7x7) - - branch7x7dbl = self.branch7x7dbl_1(x) - branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionE_1(models.inception.InceptionE): - """First InceptionE block patched for FID computation""" - def __init__(self, in_channels): - super(FIDInceptionE_1, self).__init__(in_channels) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch3x3 = self.branch3x3_1(x) - branch3x3 = [ - self.branch3x3_2a(branch3x3), - self.branch3x3_2b(branch3x3), - ] - branch3x3 = torch.cat(branch3x3, 1) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = [ - self.branch3x3dbl_3a(branch3x3dbl), - self.branch3x3dbl_3b(branch3x3dbl), - ] - branch3x3dbl = torch.cat(branch3x3dbl, 1) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionE_2(models.inception.InceptionE): - """Second InceptionE block patched for FID computation""" - def __init__(self, in_channels): - super(FIDInceptionE_2, self).__init__(in_channels) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch3x3 = self.branch3x3_1(x) - branch3x3 = [ - self.branch3x3_2a(branch3x3), - self.branch3x3_2b(branch3x3), - ] - branch3x3 = torch.cat(branch3x3, 1) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = [ - self.branch3x3dbl_3a(branch3x3dbl), - self.branch3x3dbl_3b(branch3x3dbl), - ] - branch3x3dbl = torch.cat(branch3x3dbl, 1) - - # Patch: The FID Inception model uses max pooling instead of average - # pooling. This is likely an error in this specific Inception - # implementation, as other Inception models use average pooling here - # (which matches the description in the paper). - branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) diff --git a/spaces/akhaliq/stylegan3_clip/visualizer.py b/spaces/akhaliq/stylegan3_clip/visualizer.py deleted file mode 100644 index 4168447d7d6ec7481fc76b889d498ac009dc5549..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/visualizer.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import click -import os - -import multiprocessing -import numpy as np -import imgui -import dnnlib -from gui_utils import imgui_window -from gui_utils import imgui_utils -from gui_utils import gl_utils -from gui_utils import text_utils -from viz import renderer -from viz import pickle_widget -from viz import latent_widget -from viz import stylemix_widget -from viz import trunc_noise_widget -from viz import performance_widget -from viz import capture_widget -from viz import layer_widget -from viz import equivariance_widget - -#---------------------------------------------------------------------------- - -class Visualizer(imgui_window.ImguiWindow): - def __init__(self, capture_dir=None): - super().__init__(title='GAN Visualizer', window_width=3840, window_height=2160) - - # Internals. - self._last_error_print = None - self._async_renderer = AsyncRenderer() - self._defer_rendering = 0 - self._tex_img = None - self._tex_obj = None - - # Widget interface. - self.args = dnnlib.EasyDict() - self.result = dnnlib.EasyDict() - self.pane_w = 0 - self.label_w = 0 - self.button_w = 0 - - # Widgets. - self.pickle_widget = pickle_widget.PickleWidget(self) - self.latent_widget = latent_widget.LatentWidget(self) - self.stylemix_widget = stylemix_widget.StyleMixingWidget(self) - self.trunc_noise_widget = trunc_noise_widget.TruncationNoiseWidget(self) - self.perf_widget = performance_widget.PerformanceWidget(self) - self.capture_widget = capture_widget.CaptureWidget(self) - self.layer_widget = layer_widget.LayerWidget(self) - self.eq_widget = equivariance_widget.EquivarianceWidget(self) - - if capture_dir is not None: - self.capture_widget.path = capture_dir - - # Initialize window. - self.set_position(0, 0) - self._adjust_font_size() - self.skip_frame() # Layout may change after first frame. - - def close(self): - super().close() - if self._async_renderer is not None: - self._async_renderer.close() - self._async_renderer = None - - def add_recent_pickle(self, pkl, ignore_errors=False): - self.pickle_widget.add_recent(pkl, ignore_errors=ignore_errors) - - def load_pickle(self, pkl, ignore_errors=False): - self.pickle_widget.load(pkl, ignore_errors=ignore_errors) - - def print_error(self, error): - error = str(error) - if error != self._last_error_print: - print('\n' + error + '\n') - self._last_error_print = error - - def defer_rendering(self, num_frames=1): - self._defer_rendering = max(self._defer_rendering, num_frames) - - def clear_result(self): - self._async_renderer.clear_result() - - def set_async(self, is_async): - if is_async != self._async_renderer.is_async: - self._async_renderer.set_async(is_async) - self.clear_result() - if 'image' in self.result: - self.result.message = 'Switching rendering process...' - self.defer_rendering() - - def _adjust_font_size(self): - old = self.font_size - self.set_font_size(min(self.content_width / 120, self.content_height / 60)) - if self.font_size != old: - self.skip_frame() # Layout changed. - - def draw_frame(self): - self.begin_frame() - self.args = dnnlib.EasyDict() - self.pane_w = self.font_size * 45 - self.button_w = self.font_size * 5 - self.label_w = round(self.font_size * 4.5) - - # Detect mouse dragging in the result area. - dragging, dx, dy = imgui_utils.drag_hidden_window('##result_area', x=self.pane_w, y=0, width=self.content_width-self.pane_w, height=self.content_height) - if dragging: - self.latent_widget.drag(dx, dy) - - # Begin control pane. - imgui.set_next_window_position(0, 0) - imgui.set_next_window_size(self.pane_w, self.content_height) - imgui.begin('##control_pane', closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE)) - - # Widgets. - expanded, _visible = imgui_utils.collapsing_header('Network & latent', default=True) - self.pickle_widget(expanded) - self.latent_widget(expanded) - self.stylemix_widget(expanded) - self.trunc_noise_widget(expanded) - expanded, _visible = imgui_utils.collapsing_header('Performance & capture', default=True) - self.perf_widget(expanded) - self.capture_widget(expanded) - expanded, _visible = imgui_utils.collapsing_header('Layers & channels', default=True) - self.layer_widget(expanded) - with imgui_utils.grayed_out(not self.result.get('has_input_transform', False)): - expanded, _visible = imgui_utils.collapsing_header('Equivariance', default=True) - self.eq_widget(expanded) - - # Render. - if self.is_skipping_frames(): - pass - elif self._defer_rendering > 0: - self._defer_rendering -= 1 - elif self.args.pkl is not None: - self._async_renderer.set_args(**self.args) - result = self._async_renderer.get_result() - if result is not None: - self.result = result - - # Display. - max_w = self.content_width - self.pane_w - max_h = self.content_height - pos = np.array([self.pane_w + max_w / 2, max_h / 2]) - if 'image' in self.result: - if self._tex_img is not self.result.image: - self._tex_img = self.result.image - if self._tex_obj is None or not self._tex_obj.is_compatible(image=self._tex_img): - self._tex_obj = gl_utils.Texture(image=self._tex_img, bilinear=False, mipmap=False) - else: - self._tex_obj.update(self._tex_img) - zoom = min(max_w / self._tex_obj.width, max_h / self._tex_obj.height) - zoom = np.floor(zoom) if zoom >= 1 else zoom - self._tex_obj.draw(pos=pos, zoom=zoom, align=0.5, rint=True) - if 'error' in self.result: - self.print_error(self.result.error) - if 'message' not in self.result: - self.result.message = str(self.result.error) - if 'message' in self.result: - tex = text_utils.get_texture(self.result.message, size=self.font_size, max_width=max_w, max_height=max_h, outline=2) - tex.draw(pos=pos, align=0.5, rint=True, color=1) - - # End frame. - self._adjust_font_size() - imgui.end() - self.end_frame() - -#---------------------------------------------------------------------------- - -class AsyncRenderer: - def __init__(self): - self._closed = False - self._is_async = False - self._cur_args = None - self._cur_result = None - self._cur_stamp = 0 - self._renderer_obj = None - self._args_queue = None - self._result_queue = None - self._process = None - - def close(self): - self._closed = True - self._renderer_obj = None - if self._process is not None: - self._process.terminate() - self._process = None - self._args_queue = None - self._result_queue = None - - @property - def is_async(self): - return self._is_async - - def set_async(self, is_async): - self._is_async = is_async - - def set_args(self, **args): - assert not self._closed - if args != self._cur_args: - if self._is_async: - self._set_args_async(**args) - else: - self._set_args_sync(**args) - self._cur_args = args - - def _set_args_async(self, **args): - if self._process is None: - self._args_queue = multiprocessing.Queue() - self._result_queue = multiprocessing.Queue() - try: - multiprocessing.set_start_method('spawn') - except RuntimeError: - pass - self._process = multiprocessing.Process(target=self._process_fn, args=(self._args_queue, self._result_queue), daemon=True) - self._process.start() - self._args_queue.put([args, self._cur_stamp]) - - def _set_args_sync(self, **args): - if self._renderer_obj is None: - self._renderer_obj = renderer.Renderer() - self._cur_result = self._renderer_obj.render(**args) - - def get_result(self): - assert not self._closed - if self._result_queue is not None: - while self._result_queue.qsize() > 0: - result, stamp = self._result_queue.get() - if stamp == self._cur_stamp: - self._cur_result = result - return self._cur_result - - def clear_result(self): - assert not self._closed - self._cur_args = None - self._cur_result = None - self._cur_stamp += 1 - - @staticmethod - def _process_fn(args_queue, result_queue): - renderer_obj = renderer.Renderer() - cur_args = None - cur_stamp = None - while True: - args, stamp = args_queue.get() - while args_queue.qsize() > 0: - args, stamp = args_queue.get() - if args != cur_args or stamp != cur_stamp: - result = renderer_obj.render(**args) - if 'error' in result: - result.error = renderer.CapturedException(result.error) - result_queue.put([result, stamp]) - cur_args = args - cur_stamp = stamp - -#---------------------------------------------------------------------------- - -@click.command() -@click.argument('pkls', metavar='PATH', nargs=-1) -@click.option('--capture-dir', help='Where to save screenshot captures', metavar='PATH', default=None) -@click.option('--browse-dir', help='Specify model path for the \'Browse...\' button', metavar='PATH') -def main( - pkls, - capture_dir, - browse_dir -): - """Interactive model visualizer. - - Optional PATH argument can be used specify which .pkl file to load. - """ - viz = Visualizer(capture_dir=capture_dir) - - if browse_dir is not None: - viz.pickle_widget.search_dirs = [browse_dir] - - # List pickles. - if len(pkls) > 0: - for pkl in pkls: - viz.add_recent_pickle(pkl) - viz.load_pickle(pkls[0]) - else: - pretrained = [ - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhq-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-metfaces-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-metfacesu-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-afhqv2-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhqu-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhqu-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfaces-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqcat-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqdog-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqv2-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqwild-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-brecahad-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-celebahq-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-cifar10-32x32.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-512x512.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-lsundog-256x256.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfaces-1024x1024.pkl', - 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfacesu-1024x1024.pkl' - ] - - # Populate recent pickles list with pretrained model URLs. - for url in pretrained: - viz.add_recent_pickle(url) - - # Run. - while not viz.should_close(): - viz.draw_frame() - viz.close() - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - main() - -#---------------------------------------------------------------------------- diff --git a/spaces/alamin655/Personas/conversant/demo/__init__.py b/spaces/alamin655/Personas/conversant/demo/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py deleted file mode 100644 index eeface39ae62c3975ff535e6b1f79f2c28fbf888..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/auth.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.auth -~~~~~~~~~~~~~ - -This module contains the authentication handlers for Requests. -""" - -import os -import re -import time -import hashlib -import threading -import warnings - -from base64 import b64encode - -from .compat import urlparse, str, basestring -from .cookies import extract_cookies_to_jar -from ._internal_utils import to_native_string -from .utils import parse_dict_header - -CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' -CONTENT_TYPE_MULTI_PART = 'multipart/form-data' - - -def _basic_auth_str(username, password): - """Returns a Basic Auth string.""" - - # "I want us to put a big-ol' comment on top of it that - # says that this behaviour is dumb but we need to preserve - # it because people are relying on it." - # - Lukasa - # - # These are here solely to maintain backwards compatibility - # for things like ints. This will be removed in 3.0.0. - if not isinstance(username, basestring): - warnings.warn( - "Non-string usernames will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({!r}) to " - "a string or bytes object in the near future to avoid " - "problems.".format(username), - category=DeprecationWarning, - ) - username = str(username) - - if not isinstance(password, basestring): - warnings.warn( - "Non-string passwords will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({!r}) to " - "a string or bytes object in the near future to avoid " - "problems.".format(type(password)), - category=DeprecationWarning, - ) - password = str(password) - # -- End Removal -- - - if isinstance(username, str): - username = username.encode('latin1') - - if isinstance(password, str): - password = password.encode('latin1') - - authstr = 'Basic ' + to_native_string( - b64encode(b':'.join((username, password))).strip() - ) - - return authstr - - -class AuthBase(object): - """Base class that all auth implementations derive from""" - - def __call__(self, r): - raise NotImplementedError('Auth hooks must be callable.') - - -class HTTPBasicAuth(AuthBase): - """Attaches HTTP Basic Authentication to the given Request object.""" - - def __init__(self, username, password): - self.username = username - self.password = password - - def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) - - def __ne__(self, other): - return not self == other - - def __call__(self, r): - r.headers['Authorization'] = _basic_auth_str(self.username, self.password) - return r - - -class HTTPProxyAuth(HTTPBasicAuth): - """Attaches HTTP Proxy Authentication to a given Request object.""" - - def __call__(self, r): - r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) - return r - - -class HTTPDigestAuth(AuthBase): - """Attaches HTTP Digest Authentication to the given Request object.""" - - def __init__(self, username, password): - self.username = username - self.password = password - # Keep state in per-thread local storage - self._thread_local = threading.local() - - def init_per_thread_state(self): - # Ensure state is initialized just once per-thread - if not hasattr(self._thread_local, 'init'): - self._thread_local.init = True - self._thread_local.last_nonce = '' - self._thread_local.nonce_count = 0 - self._thread_local.chal = {} - self._thread_local.pos = None - self._thread_local.num_401_calls = None - - def build_digest_header(self, method, url): - """ - :rtype: str - """ - - realm = self._thread_local.chal['realm'] - nonce = self._thread_local.chal['nonce'] - qop = self._thread_local.chal.get('qop') - algorithm = self._thread_local.chal.get('algorithm') - opaque = self._thread_local.chal.get('opaque') - hash_utf8 = None - - if algorithm is None: - _algorithm = 'MD5' - else: - _algorithm = algorithm.upper() - # lambdas assume digest modules are imported at the top level - if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': - def md5_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.md5(x).hexdigest() - hash_utf8 = md5_utf8 - elif _algorithm == 'SHA': - def sha_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha1(x).hexdigest() - hash_utf8 = sha_utf8 - elif _algorithm == 'SHA-256': - def sha256_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha256(x).hexdigest() - hash_utf8 = sha256_utf8 - elif _algorithm == 'SHA-512': - def sha512_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha512(x).hexdigest() - hash_utf8 = sha512_utf8 - - KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) - - if hash_utf8 is None: - return None - - # XXX not implemented yet - entdig = None - p_parsed = urlparse(url) - #: path is request-uri defined in RFC 2616 which should not be empty - path = p_parsed.path or "/" - if p_parsed.query: - path += '?' + p_parsed.query - - A1 = '%s:%s:%s' % (self.username, realm, self.password) - A2 = '%s:%s' % (method, path) - - HA1 = hash_utf8(A1) - HA2 = hash_utf8(A2) - - if nonce == self._thread_local.last_nonce: - self._thread_local.nonce_count += 1 - else: - self._thread_local.nonce_count = 1 - ncvalue = '%08x' % self._thread_local.nonce_count - s = str(self._thread_local.nonce_count).encode('utf-8') - s += nonce.encode('utf-8') - s += time.ctime().encode('utf-8') - s += os.urandom(8) - - cnonce = (hashlib.sha1(s).hexdigest()[:16]) - if _algorithm == 'MD5-SESS': - HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) - - if not qop: - respdig = KD(HA1, "%s:%s" % (nonce, HA2)) - elif qop == 'auth' or 'auth' in qop.split(','): - noncebit = "%s:%s:%s:%s:%s" % ( - nonce, ncvalue, cnonce, 'auth', HA2 - ) - respdig = KD(HA1, noncebit) - else: - # XXX handle auth-int. - return None - - self._thread_local.last_nonce = nonce - - # XXX should the partial digests be encoded too? - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (self.username, realm, nonce, path, respdig) - if opaque: - base += ', opaque="%s"' % opaque - if algorithm: - base += ', algorithm="%s"' % algorithm - if entdig: - base += ', digest="%s"' % entdig - if qop: - base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) - - return 'Digest %s' % (base) - - def handle_redirect(self, r, **kwargs): - """Reset num_401_calls counter on redirects.""" - if r.is_redirect: - self._thread_local.num_401_calls = 1 - - def handle_401(self, r, **kwargs): - """ - Takes the given response and tries digest-auth, if needed. - - :rtype: requests.Response - """ - - # If response is not 4xx, do not auth - # See https://github.com/psf/requests/issues/3772 - if not 400 <= r.status_code < 500: - self._thread_local.num_401_calls = 1 - return r - - if self._thread_local.pos is not None: - # Rewind the file position indicator of the body to where - # it was to resend the request. - r.request.body.seek(self._thread_local.pos) - s_auth = r.headers.get('www-authenticate', '') - - if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: - - self._thread_local.num_401_calls += 1 - pat = re.compile(r'digest ', flags=re.IGNORECASE) - self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) - - # Consume content and release the original connection - # to allow our new request to reuse the same one. - r.content - r.close() - prep = r.request.copy() - extract_cookies_to_jar(prep._cookies, r.request, r.raw) - prep.prepare_cookies(prep._cookies) - - prep.headers['Authorization'] = self.build_digest_header( - prep.method, prep.url) - _r = r.connection.send(prep, **kwargs) - _r.history.append(r) - _r.request = prep - - return _r - - self._thread_local.num_401_calls = 1 - return r - - def __call__(self, r): - # Initialize per-thread state, if needed - self.init_per_thread_state() - # If we have a saved nonce, skip the 401 - if self._thread_local.last_nonce: - r.headers['Authorization'] = self.build_digest_header(r.method, r.url) - try: - self._thread_local.pos = r.body.tell() - except AttributeError: - # In the case of HTTPDigestAuth being reused and the body of - # the previous request was a file-like object, pos has the - # file position of the previous body. Ensure it's set to - # None. - self._thread_local.pos = None - r.register_hook('response', self.handle_401) - r.register_hook('response', self.handle_redirect) - self._thread_local.num_401_calls = 1 - - return r - - def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) - - def __ne__(self, other): - return not self == other diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/Evaluation/ROUGEEval.py b/spaces/aliabd/SummerTime/model/third_party/HMNet/Evaluation/ROUGEEval.py deleted file mode 100644 index e5fb9a95319404cb2ed1d87711947599a1fb7a46..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/Evaluation/ROUGEEval.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -import os -import re -import shutil -from string import ascii_uppercase -from tqdm.auto import tqdm -from model.third_party.HMNet.Evaluation.OldROUGEEval import rouge -from model.third_party.HMNet.ThirdParty.ROUGE import pyrouge -from shutil import copyfile -from mpi4py import MPI -import torch -import logging -import json - - -def write_json_res( - output_file, tokenizers, x_ids, y_ids, x_tokens, y_tokens, predictions, gts -): - data = [] - - # for x_id, y_id, x_token, y_token, preds, gt in zip(x_ids, y_ids, x_tokens, y_tokens, predictions, gts): - # x_id = tokenizers[0].decode(x_id, skip_special_tokens=False) if x_id.dim() == 1 else tokenizers[0].convert_tokens_to_string(x_token) - # y_id = tokenizers[1].decode(y_id, skip_special_tokens=False) if y_id.dim() == 1 else tokenizers[1].convert_tokens_to_string(y_token) - for x_token, y_token, preds, gt in zip(x_tokens, y_tokens, predictions, gts): - data.append( - { - # 'x_ids': x_id, - # 'y_ids': y_id, - "x_tokens": x_token if isinstance(x_token, str) else " ".join(x_token), - "y_tokens": y_token if isinstance(y_token, str) else " ".join(y_token), - "predictions": preds, - "gt": gt, - } - ) - - json.dump(data, output_file, indent=4, ensure_ascii=False) - - -logger = logging.getLogger(__name__) - -""" -This code can only be run within docker "rouge", because of the usage of rouge-perl -""" - - -"""" In ROUGE parlance, your summaries are ‘system’ summaries and the gold standard summaries are ‘model’ summaries. -The summaries should be in separate folders, whose paths are set with the system_dir and model_dir variables. -All summaries should contain one sentence per line.""" - - -class ROUGEEval: - """ - Wrapper class for pyrouge. - Compute ROUGE given predictions and references for summarization evaluation. - """ - - def __init__(self, run_dir, save_dir, opt): - self.run_dir = run_dir - self.save_dir = save_dir - self.opt = opt - - # use relative path to make it work on Philly - self.pyrouge_dir = os.path.join( - os.path.dirname(__file__), "../ThirdParty/ROUGE/ROUGE-1.5.5/" - ) - - self.eval_batches_num = self.opt.get("EVAL_BATCHES_NUM", float("Inf")) - self.best_score = -float("Inf") - self.best_res = {} - - def reset_best_score(self, set_high=False): - if set_high: - self.best_score = float("Inf") - else: - self.best_score = -float("Inf") - - def make_html_safe(self, s): - s = s.replace("<", "<") - s = s.replace(">", ">") - return s - - def print_to_rouge_dir( - self, summaries, dir, suffix, split_chars, special_char_dict=None - ): - for idx, summary in enumerate(summaries): - fname = os.path.join(dir, "%06d_%s.txt" % (idx, suffix)) - with open(fname, "wb") as f: - sents = re.split(r"(?') - # else: - # new_predicitons.append(pred) - # return new_predicitons, new_groundtruths - - def _convert_tokens_to_string(self, tokenizer, tokens): - if "EVAL_TOKENIZED" in self.opt: - tokens = [t for t in tokens if t not in tokenizer.all_special_tokens] - if "EVAL_LOWERCASE" in self.opt: - tokens = [t.lower() for t in tokens] - if "EVAL_TOKENIZED" in self.opt: - return " ".join(tokens) - else: - return tokenizer.decode( - tokenizer.convert_tokens_to_ids(tokens), skip_special_tokens=True - ) - - def eval_batches(self, module, dev_batches, save_folder, label=""): - max_sent_len = int(self.opt["MAX_GEN_LENGTH"]) - - logger.info( - "Decoding current model ... \nSaving folder is {}".format(save_folder) - ) - - predictions = [] # prediction of tokens from model - x_tokens = [] # input tokens - y_tokens = [] # groundtruths tokens - x_ids = [] # input token ids - y_ids = [] # groundtruths token ids - gts = [] # groundtruths string - got_better_score = False - # err = 0 - if not isinstance(module.tokenizer, list): - encoder_tokenizer = module.tokenizer - decoder_tokenizer = module.tokenizer - elif len(module.tokenizer) == 1: - encoder_tokenizer = module.tokenizer[0] - decoder_tokenizer = module.tokenizer[0] - elif len(module.tokenizer) == 2: - encoder_tokenizer = module.tokenizer[0] - decoder_tokenizer = module.tokenizer[1] - else: - assert False, f"len(module.tokenizer) > 2" - - with torch.no_grad(): - for j, dev_batch in enumerate(dev_batches): - for b in dev_batch: - if torch.is_tensor(dev_batch[b]): - dev_batch[b] = dev_batch[b].to(self.opt["device"]) - - beam_search_res = module( - dev_batch, beam_search=True, max_sent_len=max_sent_len - ) - pred = [ - [t[0] for t in x] if len(x) > 0 else [[]] for x in beam_search_res - ] - predictions.extend( - [ - [ - self._convert_tokens_to_string(decoder_tokenizer, tt) - for tt in t - ] - for t in pred - ] - ) - - gts.extend( - [ - self._convert_tokens_to_string(decoder_tokenizer, t) - for t in dev_batch["decoder_tokens"] - ] - ) - x_tokens.extend(dev_batch["encoder_tokens"]) - y_tokens.extend(dev_batch["decoder_tokens"]) - - if ("DEBUG" in self.opt and j >= 10) or j >= self.eval_batches_num: - # in debug mode (decode first 10 batches) ortherwise decode first self.eval_batches_num bathes - break - - # use MPI to gather results from all processes / GPUs - # the result of the gather operation is a list of sublists - # each sublist corresponds to the list created on one of the MPI processes (or GPUs, respectively) - # we flatten this list into a "simple" list - assert len(predictions) == len( - gts - ), "len(predictions): {0}, len(gts): {1}".format(len(predictions), len(gts)) - comm = MPI.COMM_WORLD - predictions = comm.gather(predictions, root=0) - x_tokens = comm.gather(x_tokens, root=0) - y_tokens = comm.gather(y_tokens, root=0) - # if GPU numbers are high (>=8), passing x_ids, y_ids to a rank 0 will cause out of memory - # x_ids = comm.gather(x_ids, root=0) - # y_ids = comm.gather(y_ids, root=0) - gts = comm.gather(gts, root=0) - if self.opt["rank"] == 0: - # flatten lists - predictions = [item for sublist in predictions for item in sublist] - y_tokens = [item for sublist in y_tokens for item in sublist] - x_tokens = [item for sublist in x_tokens for item in sublist] - # x_ids = [item for sublist in x_ids for item in sublist] - # y_ids = [item for sublist in y_ids for item in sublist] - gts = [item for sublist in gts for item in sublist] - # import pdb; pdb.set_trace() - assert ( - len(predictions) == len(y_tokens) == len(x_tokens) == len(gts) - ), "len(predictions): {0}, len(y_tokens): {1}, len(x_tokens): {2}, len(gts): {3}".format( - len(predictions), len(y_tokens), len(x_tokens), len(gts) - ) - - # write intermediate results only on rank 0 - if not os.path.isdir(os.path.join(save_folder, "intermediate_results")): - os.makedirs(os.path.join(save_folder, "intermediate_results")) - top_1_predictions = [pred[0] for pred in predictions] - with open( - os.path.join( - save_folder, "intermediate_results", "res_" + label + ".json" - ), - "w", - encoding="utf-8", - ) as output_file: - write_json_res( - output_file, - [encoder_tokenizer, decoder_tokenizer], - x_ids, - y_ids, - x_tokens, - y_tokens, - predictions, - gts, - ) - try: - result = self.eval(top_1_predictions, gts) - except Exception as e: - logger.exception("ROUGE Eval ERROR") - result = {} - score = -float("Inf") - pass # this happens when no overlapping between pred and gts - else: - rouge_su4 = rouge(top_1_predictions, gts) # f, prec, recall - result = { - "ROUGE_1": result["rouge_1_f_score"] * 100.0, - "ROUGE_1_Prc": result["rouge_1_precision"] * 100.0, - "ROUGE_1_Rcl": result["rouge_1_recall"] * 100.0, - "ROUGE_2": result["rouge_2_f_score"] * 100.0, - "ROUGE_2_Prc": result["rouge_2_precision"] * 100.0, - "ROUGE_2_Rcl": result["rouge_2_recall"] * 100.0, - "ROUGE_L": result["rouge_l_f_score"] * 100.0, - "ROUGE_L_Prc": result["rouge_l_precision"] * 100.0, - "ROUGE_L_Rcl": result["rouge_l_recall"] * 100.0, - "ROUGE_SU4": rouge_su4["rouge_su4_f_score"] * 100.0, - } - - score = result["ROUGE_1"] - if score > self.best_score: - copyfile( - os.path.join( - save_folder, - "intermediate_results", - "res_" + label + ".json", - ), - os.path.join( - save_folder, - "intermediate_results", - "res_" + label + ".best.json", - ), - ) - self.best_score = score - self.best_res = result - got_better_score = True - - else: - result = {} - score = -float("Inf") - got_better_score = False - - return result, score, got_better_score - - def eval(self, predictions, groundtruths): - # predictions, groundtruths = self.filter_empty(predictions, groundtruths) - predictions = [self.make_html_safe(w) for w in predictions] - groundtruths = [self.make_html_safe(w) for w in groundtruths] - pred_dir = os.path.join(self.save_dir, "predictions") - if os.path.exists(pred_dir): - shutil.rmtree(pred_dir) - os.makedirs(pred_dir) - - gt_dir = os.path.join(self.save_dir, "groundtruths") - if os.path.exists(gt_dir): - shutil.rmtree(gt_dir) - os.makedirs(gt_dir) - - special_char_dict = self.print_to_rouge_dir_gt( - groundtruths, gt_dir, "gt", "SPLIT_CHARS_FOR_EVAL" in self.opt - ) - self.print_to_rouge_dir( - predictions, - pred_dir, - "pred", - "SPLIT_CHARS_FOR_EVAL" in self.opt, - special_char_dict, - ) - - r = pyrouge.Rouge155(self.pyrouge_dir) - r.system_dir = pred_dir - r.model_dir = gt_dir - r.system_filename_pattern = "(\d+)_pred.txt" - r.model_filename_pattern = "[A-Z].#ID#_gt.txt" - results = r.output_to_dict(r.convert_and_evaluate()) - return results diff --git a/spaces/aliceoq/vozes-da-loirinha/i18n/locale_diff.py b/spaces/aliceoq/vozes-da-loirinha/i18n/locale_diff.py deleted file mode 100644 index 257277965e0866a86d0361863a8f1b408c4f71ab..0000000000000000000000000000000000000000 --- a/spaces/aliceoq/vozes-da-loirinha/i18n/locale_diff.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import os -from collections import OrderedDict - -# Define the standard file name -standard_file = "zh_CN.json" - -# Find all JSON files in the directory -dir_path = "./" -languages = [ - f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file -] - -# Load the standard file -with open(standard_file, "r", encoding="utf-8") as f: - standard_data = json.load(f, object_pairs_hook=OrderedDict) - -# Loop through each language file -for lang_file in languages: - # Load the language file - with open(lang_file, "r", encoding="utf-8") as f: - lang_data = json.load(f, object_pairs_hook=OrderedDict) - - # Find the difference between the language file and the standard file - diff = set(standard_data.keys()) - set(lang_data.keys()) - - miss = set(lang_data.keys()) - set(standard_data.keys()) - - # Add any missing keys to the language file - for key in diff: - lang_data[key] = key - - # Del any extra keys to the language file - for key in miss: - del lang_data[key] - - # Sort the keys of the language file to match the order of the standard file - lang_data = OrderedDict( - sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) - ) - - # Save the updated language file - with open(lang_file, "w", encoding="utf-8") as f: - json.dump(lang_data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/spaces/allknowingroger/Image-Models-Test53/app.py b/spaces/allknowingroger/Image-Models-Test53/app.py deleted file mode 100644 index ce8e589fa10ffd7dab580bd5b2b5ee9efcd1af79..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test53/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Yntec/Lunar", - "digiplay/KawaiiRealisticAnimeMix_A0.3", - "Daniil-plotnikov/russian-vision-v5-1", - "EarthnDusk/Earth-AniMix-Super-Flat", - "juliajoanna/lora-trained-xl-fred-6", - "dpwm/lora-trained-xl-4", - "zhangyi617/driving-lora", - "LinoyTsaban/huggy_v12", - "digiplay/PotoPhotoRealism_v1", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test9/README.md b/spaces/allknowingroger/Image-Models-Test9/README.md deleted file mode 100644 index f6f1b5cb6a66573263eb5a43484e15bafd4e74ee..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test9/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test8 ---- - - \ No newline at end of file diff --git a/spaces/amankishore/sjc/sd1/ldm/modules/attention.py b/spaces/amankishore/sjc/sd1/ldm/modules/attention.py deleted file mode 100644 index f4eff39ccb6d75daa764f6eb70a7cef024fb5a3f..0000000000000000000000000000000000000000 --- a/spaces/amankishore/sjc/sd1/ldm/modules/attention.py +++ /dev/null @@ -1,261 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat - -from ldm.modules.diffusionmodules.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) - k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): - super().__init__() - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth)] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) - x = self.proj_out(x) - return x + x_in \ No newline at end of file diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/common/pa_memorybarrier.h b/spaces/amarchheda/ChordDuplicate/portaudio/src/common/pa_memorybarrier.h deleted file mode 100644 index 0dca6aa42fae6937dc31fed7c29d641e5db5845d..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/src/common/pa_memorybarrier.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * $Id: pa_memorybarrier.h 1240 2007-07-17 13:05:07Z bjornroche $ - * Portable Audio I/O Library - * Memory barrier utilities - * - * Author: Bjorn Roche, XO Audio, LLC - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** - @file pa_memorybarrier.h - @ingroup common_src -*/ - -/**************** - * Some memory barrier primitives based on the system. - * right now only OS X, FreeBSD, and Linux are supported. In addition to providing - * memory barriers, these functions should ensure that data cached in registers - * is written out to cache where it can be snooped by other CPUs. (ie, the volatile - * keyword should not be required) - * - * the primitives that must be defined are: - * - * PaUtil_FullMemoryBarrier() - * PaUtil_ReadMemoryBarrier() - * PaUtil_WriteMemoryBarrier() - * - ****************/ - -#if defined(__APPLE__) -# include - /* Here are the memory barrier functions. Mac OS X only provides - full memory barriers, so the three types of barriers are the same, - however, these barriers are superior to compiler-based ones. */ -# define PaUtil_FullMemoryBarrier() OSMemoryBarrier() -# define PaUtil_ReadMemoryBarrier() OSMemoryBarrier() -# define PaUtil_WriteMemoryBarrier() OSMemoryBarrier() -#elif defined(__GNUC__) - /* GCC >= 4.1 has built-in intrinsics. We'll use those */ -# if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) -# define PaUtil_FullMemoryBarrier() __sync_synchronize() -# define PaUtil_ReadMemoryBarrier() __sync_synchronize() -# define PaUtil_WriteMemoryBarrier() __sync_synchronize() - /* as a fallback, GCC understands volatile asm and "memory" to mean it - * should not reorder memory read/writes */ - /* Note that it is not clear that any compiler actually defines __PPC__, - * it can probably removed safely. */ -# elif defined( __ppc__ ) || defined( __powerpc__) || defined( __PPC__ ) -# define PaUtil_FullMemoryBarrier() asm volatile("sync":::"memory") -# define PaUtil_ReadMemoryBarrier() asm volatile("sync":::"memory") -# define PaUtil_WriteMemoryBarrier() asm volatile("sync":::"memory") -# elif defined( __i386__ ) || defined( __i486__ ) || defined( __i586__ ) || \ - defined( __i686__ ) || defined( __x86_64__ ) -# define PaUtil_FullMemoryBarrier() asm volatile("mfence":::"memory") -# define PaUtil_ReadMemoryBarrier() asm volatile("lfence":::"memory") -# define PaUtil_WriteMemoryBarrier() asm volatile("sfence":::"memory") -# else -# ifdef ALLOW_SMP_DANGERS -# warning Memory barriers not defined on this system or system unknown -# warning For SMP safety, you should fix this. -# define PaUtil_FullMemoryBarrier() -# define PaUtil_ReadMemoryBarrier() -# define PaUtil_WriteMemoryBarrier() -# else -# error Memory barriers are not defined on this system. You can still compile by defining ALLOW_SMP_DANGERS, but SMP safety will not be guaranteed. -# endif -# endif -#elif (_MSC_VER >= 1400) && !defined(_WIN32_WCE) -# include -# pragma intrinsic(_ReadWriteBarrier) -# pragma intrinsic(_ReadBarrier) -# pragma intrinsic(_WriteBarrier) -/* note that MSVC intrinsics _ReadWriteBarrier(), _ReadBarrier(), _WriteBarrier() are just compiler barriers *not* memory barriers */ -# define PaUtil_FullMemoryBarrier() _ReadWriteBarrier() -# define PaUtil_ReadMemoryBarrier() _ReadBarrier() -# define PaUtil_WriteMemoryBarrier() _WriteBarrier() -#elif defined(_WIN32_WCE) -# define PaUtil_FullMemoryBarrier() -# define PaUtil_ReadMemoryBarrier() -# define PaUtil_WriteMemoryBarrier() -#elif defined(_MSC_VER) || defined(__BORLANDC__) -# define PaUtil_FullMemoryBarrier() _asm { lock add [esp], 0 } -# define PaUtil_ReadMemoryBarrier() _asm { lock add [esp], 0 } -# define PaUtil_WriteMemoryBarrier() _asm { lock add [esp], 0 } -#else -# ifdef ALLOW_SMP_DANGERS -# warning Memory barriers not defined on this system or system unknown -# warning For SMP safety, you should fix this. -# define PaUtil_FullMemoryBarrier() -# define PaUtil_ReadMemoryBarrier() -# define PaUtil_WriteMemoryBarrier() -# else -# error Memory barriers are not defined on this system. You can still compile by defining ALLOW_SMP_DANGERS, but SMP safety will not be guaranteed. -# endif -#endif diff --git a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/sidebar.css b/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/sidebar.css deleted file mode 100644 index 310887c60443abd491c3162f62e44b5ec333e50d..0000000000000000000000000000000000000000 --- a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/sidebar.css +++ /dev/null @@ -1,197 +0,0 @@ -.sidebar { - max-width: 260px; - padding: var(--section-gap); - flex-shrink: 0; - display: flex; - flex-direction: column; - justify-content: space-between; -} - -.sidebar .title { - font-size: 14px; - font-weight: 500; -} - -.sidebar .conversation-sidebar { - padding: 8px 12px; - display: flex; - gap: 18px; - align-items: center; - user-select: none; - justify-content: space-between; -} - -.sidebar .conversation-sidebar .left { - cursor: pointer; - display: flex; - align-items: center; - gap: 10px; -} - -.sidebar i { - color: var(--conversations); - cursor: pointer; -} - -.sidebar .top { - display: flex; - flex-direction: column; - overflow: hidden; - gap: 16px; - padding-right: 8px; -} - -.sidebar .top:hover { - overflow: auto; -} - -.sidebar .info { - padding: 8px 12px 0px 12px; - display: flex; - align-items: center; - justify-content: center; - user-select: none; - background: transparent; - width: 100%; - border: none; - text-decoration: none; -} - -.sidebar .info span { - color: var(--conversations); - line-height: 1.5; - font-size: 0.75rem; -} - -.sidebar .info i::before { - margin-right: 8px; -} - -.sidebar-footer { - width: 100%; - margin-top: 16px; - display: flex; - flex-direction: column; -} - -.sidebar-footer button { - cursor: pointer; - user-select: none; - background: transparent; -} - -.sidebar.shown { - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 1000; -} - -.sidebar.shown .box { - background-color: #16171a; - width: 80%; - height: 100%; - overflow-y: auto; -} - -@keyframes spinner { - to { - transform: rotate(360deg); - } -} - -/* scrollbar */ -.sidebar .top::-webkit-scrollbar { - width: 4px; - padding: 8px 0px; -} - -.sidebar .top::-webkit-scrollbar-track { - background-color: #ffffff00; -} - -.sidebar .top::-webkit-scrollbar-thumb { - background-color: #555555; - border-radius: 10px; -} - -.spinner:before { - content: ""; - box-sizing: border-box; - position: absolute; - top: 50%; - left: 45%; - width: 20px; - height: 20px; - border-radius: 50%; - border: 1px solid var(--conversations); - border-top-color: white; - animation: spinner 0.6s linear infinite; -} - -.menu-button { - display: none !important; - position: absolute; - z-index: 100000; - top: 0; - left: 0; - margin: 10px; - font-size: 1rem; - cursor: pointer; - width: 30px; - height: 30px; - justify-content: center; - align-items: center; - transition: 0.33s; -} - -.menu-button i { - transition: 0.33s; -} - -.rotated { - transform: rotate(360deg); -} - -.menu-button.rotated { - position: fixed; - top: 10px; - left: 10px; - z-index: 1001; -} - -@media screen and (max-width: 990px) { - .sidebar { - display: none; - width: 100%; - max-width: none; - } - - .menu-button { - display: flex !important; - } -} - -@media (max-width: 990px) { - .sidebar .top { - padding-top: 48px; - } -} - -@media (min-width: 768px) { - .sidebar.shown { - position: static; - width: auto; - height: auto; - background-color: transparent; - } - - .sidebar.shown .box { - background-color: #16171a; - width: auto; - height: auto; - overflow-y: auto; - } -} diff --git a/spaces/anikfaisal/weather_image_classifier/README.md b/spaces/anikfaisal/weather_image_classifier/README.md deleted file mode 100644 index 03d34dd99d6796a0c5bbf6839e3f998ce2ab74b9..0000000000000000000000000000000000000000 --- a/spaces/anikfaisal/weather_image_classifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Weather Image Classifier -emoji: 🦀 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/anonymous-pits/pits/text/cleaners.py b/spaces/anonymous-pits/pits/text/cleaners.py deleted file mode 100644 index e935fa24d9495cb0f74492e779fd998dfe81e261..0000000000000000000000000000000000000000 --- a/spaces/anonymous-pits/pits/text/cleaners.py +++ /dev/null @@ -1,89 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -from unicodedata import normalize - -from .numbers import normalize_numbers - - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -_cht_norm = [(re.compile(r'[%s]' % x[0]), x[1]) for x in [ - ('。.;', '.'), - (',、', ', '), - ('?', '?'), - ('!', '!'), - ('─‧', '-'), - ('…', '...'), - ('《》「」『』〈〉()', "'"), - (':︰', ':'), - (' ', ' ') -]] - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - -def expand_numbers(text): - return normalize_numbers(text) - -def lowercase(text): - return text.lower() - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - -def convert_to_ascii(text): - return unidecode(text) - -def english_cleaners(text): - '''Pipeline for English text, including abbreviation expansion.''' - text = convert_to_ascii(text) - #text = lowercase(text) - text = expand_numbers(text) - text = expand_abbreviations(text) - text = collapse_whitespace(text) - return text - -def korean_cleaners(text): - '''Pipeline for Korean text, including collapses whitespace.''' - text = collapse_whitespace(text) - text = normalize('NFKD', text) - return text - diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/css/html_cai_style.css b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/css/html_cai_style.css deleted file mode 100644 index f601de3248b7ee94d6da58026354f8b9afeb9297..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/css/html_cai_style.css +++ /dev/null @@ -1,91 +0,0 @@ -.chat { - margin-left: auto; - margin-right: auto; - max-width: 800px; - height: calc(100vh - 306px); - overflow-y: auto; - padding-right: 20px; - display: flex; - flex-direction: column-reverse; - word-break: break-word; - overflow-wrap: anywhere; -} - -.message { - display: grid; - grid-template-columns: 60px minmax(0, 1fr); - padding-bottom: 25px; - font-size: 15px; - font-family: Helvetica, Arial, sans-serif; - line-height: 1.428571429; -} - -.circle-you { - width: 50px; - height: 50px; - background-color: rgb(238, 78, 59); - border-radius: 50%; -} - -.circle-bot { - width: 50px; - height: 50px; - background-color: rgb(59, 78, 244); - border-radius: 50%; -} - -.circle-bot img, -.circle-you img { - border-radius: 50%; - width: 100%; - height: 100%; - object-fit: cover; -} - -.text {} - -.text p { - margin-top: 5px; -} - -.username { - font-weight: bold; -} - -.message-body {} - -.message-body img { - max-width: 300px; - max-height: 300px; - border-radius: 20px; -} - -.message-body p { - margin-bottom: 0 !important; - font-size: 15px !important; - line-height: 1.428571429 !important; -} - -.message-body li { - margin-top: 0.5em !important; - margin-bottom: 0.5em !important; -} - -.message-body li > p { - display: inline !important; -} - -.message-body code { - overflow-x: auto; -} -.message-body :not(pre) > code { - white-space: normal !important; -} - -.dark .message-body p em { - color: rgb(138, 138, 138) !important; -} - -.message-body p em { - color: rgb(110, 110, 110) !important; -} \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/__init__.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/__init__.py deleted file mode 100644 index 2bd2e5f0875a84633e707702cd7d628409b12057..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Dict, List, Union - -from TTS.utils.generic_utils import find_module - - -def setup_model(config: "Coqpit", samples: Union[List[List], List[Dict]] = None) -> "BaseTTS": - print(" > Using model: {}".format(config.model)) - # fetch the right model implementation. - if "base_model" in config and config["base_model"] is not None: - MyModel = find_module("TTS.tts.models", config.base_model.lower()) - else: - MyModel = find_module("TTS.tts.models", config.model.lower()) - model = MyModel.init_from_config(config=config, samples=samples) - return model diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/aux_tests/test_readme.py b/spaces/artificialguybr/video-dubbing/TTS/tests/aux_tests/test_readme.py deleted file mode 100644 index 32b26fc6fc38beb79303522f265b7f638bca4df3..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/aux_tests/test_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -import subprocess -import sys -from pathlib import Path - - -def test_readme_up_to_date(): - root = Path(__file__).parent.parent.parent - sync_readme = root / "scripts" / "sync_readme.py" - subprocess.check_call([sys.executable, str(sync_readme), "--check"], cwd=root) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/tests/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attr/_compat.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attr/_compat.py deleted file mode 100644 index dc0cb02b6435bb4cb90f1d9645150d32286379a5..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attr/_compat.py +++ /dev/null @@ -1,261 +0,0 @@ -# SPDX-License-Identifier: MIT - -from __future__ import absolute_import, division, print_function - -import platform -import sys -import threading -import types -import warnings - - -PY2 = sys.version_info[0] == 2 -PYPY = platform.python_implementation() == "PyPy" -PY36 = sys.version_info[:2] >= (3, 6) -HAS_F_STRINGS = PY36 -PY310 = sys.version_info[:2] >= (3, 10) - - -if PYPY or PY36: - ordered_dict = dict -else: - from collections import OrderedDict - - ordered_dict = OrderedDict - - -if PY2: - from collections import Mapping, Sequence - - from UserDict import IterableUserDict - - # We 'bundle' isclass instead of using inspect as importing inspect is - # fairly expensive (order of 10-15 ms for a modern machine in 2016) - def isclass(klass): - return isinstance(klass, (type, types.ClassType)) - - def new_class(name, bases, kwds, exec_body): - """ - A minimal stub of types.new_class that we need for make_class. - """ - ns = {} - exec_body(ns) - - return type(name, bases, ns) - - # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. - TYPE = "type" - - def iteritems(d): - return d.iteritems() - - # Python 2 is bereft of a read-only dict proxy, so we make one! - class ReadOnlyDict(IterableUserDict): - """ - Best-effort read-only dict wrapper. - """ - - def __setitem__(self, key, val): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item assignment" - ) - - def update(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'update'" - ) - - def __delitem__(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item deletion" - ) - - def clear(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'clear'" - ) - - def pop(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'pop'" - ) - - def popitem(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'popitem'" - ) - - def setdefault(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'setdefault'" - ) - - def __repr__(self): - # Override to be identical to the Python 3 version. - return "mappingproxy(" + repr(self.data) + ")" - - def metadata_proxy(d): - res = ReadOnlyDict() - res.data.update(d) # We blocked update, so we have to do it like this. - return res - - def just_warn(*args, **kw): # pragma: no cover - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - -else: # Python 3 and later. - from collections.abc import Mapping, Sequence # noqa - - def just_warn(*args, **kw): - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - warnings.warn( - "Running interpreter doesn't sufficiently support code object " - "introspection. Some features like bare super() or accessing " - "__class__ will not work with slotted classes.", - RuntimeWarning, - stacklevel=2, - ) - - def isclass(klass): - return isinstance(klass, type) - - TYPE = "class" - - def iteritems(d): - return d.items() - - new_class = types.new_class - - def metadata_proxy(d): - return types.MappingProxyType(dict(d)) - - -def make_set_closure_cell(): - """Return a function of two arguments (cell, value) which sets - the value stored in the closure cell `cell` to `value`. - """ - # pypy makes this easy. (It also supports the logic below, but - # why not do the easy/fast thing?) - if PYPY: - - def set_closure_cell(cell, value): - cell.__setstate__((value,)) - - return set_closure_cell - - # Otherwise gotta do it the hard way. - - # Create a function that will set its first cellvar to `value`. - def set_first_cellvar_to(value): - x = value - return - - # This function will be eliminated as dead code, but - # not before its reference to `x` forces `x` to be - # represented as a closure cell rather than a local. - def force_x_to_be_a_cell(): # pragma: no cover - return x - - try: - # Extract the code object and make sure our assumptions about - # the closure behavior are correct. - if PY2: - co = set_first_cellvar_to.func_code - else: - co = set_first_cellvar_to.__code__ - if co.co_cellvars != ("x",) or co.co_freevars != (): - raise AssertionError # pragma: no cover - - # Convert this code object to a code object that sets the - # function's first _freevar_ (not cellvar) to the argument. - if sys.version_info >= (3, 8): - # CPython 3.8+ has an incompatible CodeType signature - # (added a posonlyargcount argument) but also added - # CodeType.replace() to do this without counting parameters. - set_first_freevar_code = co.replace( - co_cellvars=co.co_freevars, co_freevars=co.co_cellvars - ) - else: - args = [co.co_argcount] - if not PY2: - args.append(co.co_kwonlyargcount) - args.extend( - [ - co.co_nlocals, - co.co_stacksize, - co.co_flags, - co.co_code, - co.co_consts, - co.co_names, - co.co_varnames, - co.co_filename, - co.co_name, - co.co_firstlineno, - co.co_lnotab, - # These two arguments are reversed: - co.co_cellvars, - co.co_freevars, - ] - ) - set_first_freevar_code = types.CodeType(*args) - - def set_closure_cell(cell, value): - # Create a function using the set_first_freevar_code, - # whose first closure cell is `cell`. Calling it will - # change the value of that cell. - setter = types.FunctionType( - set_first_freevar_code, {}, "setter", (), (cell,) - ) - # And call it to set the cell. - setter(value) - - # Make sure it works on this interpreter: - def make_func_with_cell(): - x = None - - def func(): - return x # pragma: no cover - - return func - - if PY2: - cell = make_func_with_cell().func_closure[0] - else: - cell = make_func_with_cell().__closure__[0] - set_closure_cell(cell, 100) - if cell.cell_contents != 100: - raise AssertionError # pragma: no cover - - except Exception: - return just_warn - else: - return set_closure_cell - - -set_closure_cell = make_set_closure_cell() - -# Thread-local global to track attrs instances which are already being repr'd. -# This is needed because there is no other (thread-safe) way to pass info -# about the instances that are already being repr'd through the call stack -# in order to ensure we don't perform infinite recursion. -# -# For instance, if an instance contains a dict which contains that instance, -# we need to know that we're already repr'ing the outside instance from within -# the dict's repr() call. -# -# This lives here rather than in _make.py so that the functions in _make.py -# don't have a direct reference to the thread-local in their globals dict. -# If they have such a reference, it breaks cloudpickle. -repr_context = threading.local() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/model.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/model.py deleted file mode 100644 index ad1c1764893d0257c0e75eeb61b0a359e89adf0f..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/model.py +++ /dev/null @@ -1,617 +0,0 @@ -import types -import weakref - -from .lock import allocate_lock -from .error import CDefError, VerificationError, VerificationMissing - -# type qualifiers -Q_CONST = 0x01 -Q_RESTRICT = 0x02 -Q_VOLATILE = 0x04 - -def qualify(quals, replace_with): - if quals & Q_CONST: - replace_with = ' const ' + replace_with.lstrip() - if quals & Q_VOLATILE: - replace_with = ' volatile ' + replace_with.lstrip() - if quals & Q_RESTRICT: - # It seems that __restrict is supported by gcc and msvc. - # If you hit some different compiler, add a #define in - # _cffi_include.h for it (and in its copies, documented there) - replace_with = ' __restrict ' + replace_with.lstrip() - return replace_with - - -class BaseTypeByIdentity(object): - is_array_type = False - is_raw_function = False - - def get_c_name(self, replace_with='', context='a C file', quals=0): - result = self.c_name_with_marker - assert result.count('&') == 1 - # some logic duplication with ffi.getctype()... :-( - replace_with = replace_with.strip() - if replace_with: - if replace_with.startswith('*') and '&[' in result: - replace_with = '(%s)' % replace_with - elif not replace_with[0] in '[(': - replace_with = ' ' + replace_with - replace_with = qualify(quals, replace_with) - result = result.replace('&', replace_with) - if '$' in result: - raise VerificationError( - "cannot generate '%s' in %s: unknown type name" - % (self._get_c_name(), context)) - return result - - def _get_c_name(self): - return self.c_name_with_marker.replace('&', '') - - def has_c_name(self): - return '$' not in self._get_c_name() - - def is_integer_type(self): - return False - - def get_cached_btype(self, ffi, finishlist, can_delay=False): - try: - BType = ffi._cached_btypes[self] - except KeyError: - BType = self.build_backend_type(ffi, finishlist) - BType2 = ffi._cached_btypes.setdefault(self, BType) - assert BType2 is BType - return BType - - def __repr__(self): - return '<%s>' % (self._get_c_name(),) - - def _get_items(self): - return [(name, getattr(self, name)) for name in self._attrs_] - - -class BaseType(BaseTypeByIdentity): - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self._get_items() == other._get_items()) - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash((self.__class__, tuple(self._get_items()))) - - -class VoidType(BaseType): - _attrs_ = () - - def __init__(self): - self.c_name_with_marker = 'void&' - - def build_backend_type(self, ffi, finishlist): - return global_cache(self, ffi, 'new_void_type') - -void_type = VoidType() - - -class BasePrimitiveType(BaseType): - def is_complex_type(self): - return False - - -class PrimitiveType(BasePrimitiveType): - _attrs_ = ('name',) - - ALL_PRIMITIVE_TYPES = { - 'char': 'c', - 'short': 'i', - 'int': 'i', - 'long': 'i', - 'long long': 'i', - 'signed char': 'i', - 'unsigned char': 'i', - 'unsigned short': 'i', - 'unsigned int': 'i', - 'unsigned long': 'i', - 'unsigned long long': 'i', - 'float': 'f', - 'double': 'f', - 'long double': 'f', - 'float _Complex': 'j', - 'double _Complex': 'j', - '_Bool': 'i', - # the following types are not primitive in the C sense - 'wchar_t': 'c', - 'char16_t': 'c', - 'char32_t': 'c', - 'int8_t': 'i', - 'uint8_t': 'i', - 'int16_t': 'i', - 'uint16_t': 'i', - 'int32_t': 'i', - 'uint32_t': 'i', - 'int64_t': 'i', - 'uint64_t': 'i', - 'int_least8_t': 'i', - 'uint_least8_t': 'i', - 'int_least16_t': 'i', - 'uint_least16_t': 'i', - 'int_least32_t': 'i', - 'uint_least32_t': 'i', - 'int_least64_t': 'i', - 'uint_least64_t': 'i', - 'int_fast8_t': 'i', - 'uint_fast8_t': 'i', - 'int_fast16_t': 'i', - 'uint_fast16_t': 'i', - 'int_fast32_t': 'i', - 'uint_fast32_t': 'i', - 'int_fast64_t': 'i', - 'uint_fast64_t': 'i', - 'intptr_t': 'i', - 'uintptr_t': 'i', - 'intmax_t': 'i', - 'uintmax_t': 'i', - 'ptrdiff_t': 'i', - 'size_t': 'i', - 'ssize_t': 'i', - } - - def __init__(self, name): - assert name in self.ALL_PRIMITIVE_TYPES - self.name = name - self.c_name_with_marker = name + '&' - - def is_char_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_float_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' - def is_complex_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' - - def build_backend_type(self, ffi, finishlist): - return global_cache(self, ffi, 'new_primitive_type', self.name) - - -class UnknownIntegerType(BasePrimitiveType): - _attrs_ = ('name',) - - def __init__(self, name): - self.name = name - self.c_name_with_marker = name + '&' - - def is_integer_type(self): - return True - - def build_backend_type(self, ffi, finishlist): - raise NotImplementedError("integer type '%s' can only be used after " - "compilation" % self.name) - -class UnknownFloatType(BasePrimitiveType): - _attrs_ = ('name', ) - - def __init__(self, name): - self.name = name - self.c_name_with_marker = name + '&' - - def build_backend_type(self, ffi, finishlist): - raise NotImplementedError("float type '%s' can only be used after " - "compilation" % self.name) - - -class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis', 'abi') - - def __init__(self, args, result, ellipsis, abi=None): - self.args = args - self.result = result - self.ellipsis = ellipsis - self.abi = abi - # - reprargs = [arg._get_c_name() for arg in self.args] - if self.ellipsis: - reprargs.append('...') - reprargs = reprargs or ['void'] - replace_with = self._base_pattern % (', '.join(reprargs),) - if abi is not None: - replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] - self.c_name_with_marker = ( - self.result.c_name_with_marker.replace('&', replace_with)) - - -class RawFunctionType(BaseFunctionType): - # Corresponds to a C type like 'int(int)', which is the C type of - # a function, but not a pointer-to-function. The backend has no - # notion of such a type; it's used temporarily by parsing. - _base_pattern = '(&)(%s)' - is_raw_function = True - - def build_backend_type(self, ffi, finishlist): - raise CDefError("cannot render the type %r: it is a function " - "type, not a pointer-to-function type" % (self,)) - - def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) - - -class FunctionPtrType(BaseFunctionType): - _base_pattern = '(*&)(%s)' - - def build_backend_type(self, ffi, finishlist): - result = self.result.get_cached_btype(ffi, finishlist) - args = [] - for tp in self.args: - args.append(tp.get_cached_btype(ffi, finishlist)) - abi_args = () - if self.abi == "__stdcall": - if not self.ellipsis: # __stdcall ignored for variadic funcs - try: - abi_args = (ffi._backend.FFI_STDCALL,) - except AttributeError: - pass - return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis, *abi_args) - - def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) - - -class PointerType(BaseType): - _attrs_ = ('totype', 'quals') - - def __init__(self, totype, quals=0): - self.totype = totype - self.quals = quals - extra = qualify(quals, " *&") - if totype.is_array_type: - extra = "(%s)" % (extra.lstrip(),) - self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) - - def build_backend_type(self, ffi, finishlist): - BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) - return global_cache(self, ffi, 'new_pointer_type', BItem) - -voidp_type = PointerType(void_type) - -def ConstPointerType(totype): - return PointerType(totype, Q_CONST) - -const_voidp_type = ConstPointerType(void_type) - - -class NamedPointerType(PointerType): - _attrs_ = ('totype', 'name') - - def __init__(self, totype, name, quals=0): - PointerType.__init__(self, totype, quals) - self.name = name - self.c_name_with_marker = name + '&' - - -class ArrayType(BaseType): - _attrs_ = ('item', 'length') - is_array_type = True - - def __init__(self, item, length): - self.item = item - self.length = length - # - if length is None: - brackets = '&[]' - elif length == '...': - brackets = '&[/*...*/]' - else: - brackets = '&[%s]' % length - self.c_name_with_marker = ( - self.item.c_name_with_marker.replace('&', brackets)) - - def length_is_unknown(self): - return isinstance(self.length, str) - - def resolve_length(self, newlength): - return ArrayType(self.item, newlength) - - def build_backend_type(self, ffi, finishlist): - if self.length_is_unknown(): - raise CDefError("cannot render the type %r: unknown length" % - (self,)) - self.item.get_cached_btype(ffi, finishlist) # force the item BType - BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) - return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) - -char_array_type = ArrayType(PrimitiveType('char'), None) - - -class StructOrUnionOrEnum(BaseTypeByIdentity): - _attrs_ = ('name',) - forcename = None - - def build_c_name_with_marker(self): - name = self.forcename or '%s %s' % (self.kind, self.name) - self.c_name_with_marker = name + '&' - - def force_the_name(self, forcename): - self.forcename = forcename - self.build_c_name_with_marker() - - def get_official_name(self): - assert self.c_name_with_marker.endswith('&') - return self.c_name_with_marker[:-1] - - -class StructOrUnion(StructOrUnionOrEnum): - fixedlayout = None - completed = 0 - partial = False - packed = 0 - - def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): - self.name = name - self.fldnames = fldnames - self.fldtypes = fldtypes - self.fldbitsize = fldbitsize - self.fldquals = fldquals - self.build_c_name_with_marker() - - def anonymous_struct_fields(self): - if self.fldtypes is not None: - for name, type in zip(self.fldnames, self.fldtypes): - if name == '' and isinstance(type, StructOrUnion): - yield type - - def enumfields(self, expand_anonymous_struct_union=True): - fldquals = self.fldquals - if fldquals is None: - fldquals = (0,) * len(self.fldnames) - for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, - self.fldbitsize, fldquals): - if (name == '' and isinstance(type, StructOrUnion) - and expand_anonymous_struct_union): - # nested anonymous struct/union - for result in type.enumfields(): - yield result - else: - yield (name, type, bitsize, quals) - - def force_flatten(self): - # force the struct or union to have a declaration that lists - # directly all fields returned by enumfields(), flattening - # nested anonymous structs/unions. - names = [] - types = [] - bitsizes = [] - fldquals = [] - for name, type, bitsize, quals in self.enumfields(): - names.append(name) - types.append(type) - bitsizes.append(bitsize) - fldquals.append(quals) - self.fldnames = tuple(names) - self.fldtypes = tuple(types) - self.fldbitsize = tuple(bitsizes) - self.fldquals = tuple(fldquals) - - def get_cached_btype(self, ffi, finishlist, can_delay=False): - BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, - can_delay) - if not can_delay: - self.finish_backend_type(ffi, finishlist) - return BType - - def finish_backend_type(self, ffi, finishlist): - if self.completed: - if self.completed != 2: - raise NotImplementedError("recursive structure declaration " - "for '%s'" % (self.name,)) - return - BType = ffi._cached_btypes[self] - # - self.completed = 1 - # - if self.fldtypes is None: - pass # not completing it: it's an opaque struct - # - elif self.fixedlayout is None: - fldtypes = [tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes] - lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - extra_flags = () - if self.packed: - if self.packed == 1: - extra_flags = (8,) # SF_PACKED - else: - extra_flags = (0, self.packed) - ffi._backend.complete_struct_or_union(BType, lst, self, - -1, -1, *extra_flags) - # - else: - fldtypes = [] - fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout - for i in range(len(self.fldnames)): - fsize = fieldsize[i] - ftype = self.fldtypes[i] - # - if isinstance(ftype, ArrayType) and ftype.length_is_unknown(): - # fix the length to match the total size - BItemType = ftype.item.get_cached_btype(ffi, finishlist) - nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) - if nrest != 0: - self._verification_error( - "field '%s.%s' has a bogus size?" % ( - self.name, self.fldnames[i] or '{}')) - ftype = ftype.resolve_length(nlen) - self.fldtypes = (self.fldtypes[:i] + (ftype,) + - self.fldtypes[i+1:]) - # - BFieldType = ftype.get_cached_btype(ffi, finishlist) - if isinstance(ftype, ArrayType) and ftype.length is None: - assert fsize == 0 - else: - bitemsize = ffi.sizeof(BFieldType) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) - fldtypes.append(BFieldType) - # - lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) - ffi._backend.complete_struct_or_union(BType, lst, self, - totalsize, totalalignment) - self.completed = 2 - - def _verification_error(self, msg): - raise VerificationError(msg) - - def check_not_partial(self): - if self.partial and self.fixedlayout is None: - raise VerificationMissing(self._get_c_name()) - - def build_backend_type(self, ffi, finishlist): - self.check_not_partial() - finishlist.append(self) - # - return global_cache(self, ffi, 'new_%s_type' % self.kind, - self.get_official_name(), key=self) - - -class StructType(StructOrUnion): - kind = 'struct' - - -class UnionType(StructOrUnion): - kind = 'union' - - -class EnumType(StructOrUnionOrEnum): - kind = 'enum' - partial = False - partial_resolved = False - - def __init__(self, name, enumerators, enumvalues, baseinttype=None): - self.name = name - self.enumerators = enumerators - self.enumvalues = enumvalues - self.baseinttype = baseinttype - self.build_c_name_with_marker() - - def force_the_name(self, forcename): - StructOrUnionOrEnum.force_the_name(self, forcename) - if self.forcename is None: - name = self.get_official_name() - self.forcename = '$' + name.replace(' ', '_') - - def check_not_partial(self): - if self.partial and not self.partial_resolved: - raise VerificationMissing(self._get_c_name()) - - def build_backend_type(self, ffi, finishlist): - self.check_not_partial() - base_btype = self.build_baseinttype(ffi, finishlist) - return global_cache(self, ffi, 'new_enum_type', - self.get_official_name(), - self.enumerators, self.enumvalues, - base_btype, key=self) - - def build_baseinttype(self, ffi, finishlist): - if self.baseinttype is not None: - return self.baseinttype.get_cached_btype(ffi, finishlist) - # - if self.enumvalues: - smallest_value = min(self.enumvalues) - largest_value = max(self.enumvalues) - else: - import warnings - try: - # XXX! The goal is to ensure that the warnings.warn() - # will not suppress the warning. We want to get it - # several times if we reach this point several times. - __warningregistry__.clear() - except NameError: - pass - warnings.warn("%r has no values explicitly defined; " - "guessing that it is equivalent to 'unsigned int'" - % self._get_c_name()) - smallest_value = largest_value = 0 - if smallest_value < 0: # needs a signed type - sign = 1 - candidate1 = PrimitiveType("int") - candidate2 = PrimitiveType("long") - else: - sign = 0 - candidate1 = PrimitiveType("unsigned int") - candidate2 = PrimitiveType("unsigned long") - btype1 = candidate1.get_cached_btype(ffi, finishlist) - btype2 = candidate2.get_cached_btype(ffi, finishlist) - size1 = ffi.sizeof(btype1) - size2 = ffi.sizeof(btype2) - if (smallest_value >= ((-1) << (8*size1-1)) and - largest_value < (1 << (8*size1-sign))): - return btype1 - if (smallest_value >= ((-1) << (8*size2-1)) and - largest_value < (1 << (8*size2-sign))): - return btype2 - raise CDefError("%s values don't all fit into either 'long' " - "or 'unsigned long'" % self._get_c_name()) - -def unknown_type(name, structname=None): - if structname is None: - structname = '$%s' % name - tp = StructType(structname, None, None, None) - tp.force_the_name(name) - tp.origin = "unknown_type" - return tp - -def unknown_ptr_type(name, structname=None): - if structname is None: - structname = '$$%s' % name - tp = StructType(structname, None, None, None) - return NamedPointerType(tp, name) - - -global_lock = allocate_lock() -_typecache_cffi_backend = weakref.WeakValueDictionary() - -def get_typecache(backend): - # returns _typecache_cffi_backend if backend is the _cffi_backend - # module, or type(backend).__typecache if backend is an instance of - # CTypesBackend (or some FakeBackend class during tests) - if isinstance(backend, types.ModuleType): - return _typecache_cffi_backend - with global_lock: - if not hasattr(type(backend), '__typecache'): - type(backend).__typecache = weakref.WeakValueDictionary() - return type(backend).__typecache - -def global_cache(srctype, ffi, funcname, *args, **kwds): - key = kwds.pop('key', (funcname, args)) - assert not kwds - try: - return ffi._typecache[key] - except KeyError: - pass - try: - res = getattr(ffi._backend, funcname)(*args) - except NotImplementedError as e: - raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) - # note that setdefault() on WeakValueDictionary is not atomic - # and contains a rare bug (http://bugs.python.org/issue19542); - # we have to use a lock and do it ourselves - cache = ffi._typecache - with global_lock: - res1 = cache.get(key) - if res1 is None: - cache[key] = res - return res - else: - return res1 - -def pointer_cache(ffi, BType): - return global_cache('?', ffi, 'new_pointer_type', BType) - -def attach_exception_info(e, name): - if e.args and type(e.args[0]) is str: - e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:] diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/__init__.py deleted file mode 100644 index 080c988b2da326c2fe356630d5641d367b37a546..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import os -import sys - -try: - from .version import __version__ # noqa -except ImportError: - version_txt = os.path.join(os.path.dirname(__file__), "version.txt") - with open(version_txt) as f: - __version__ = f.read().strip() - -__all__ = ["pdb"] - -# backwards compatibility to support `from fairseq.X import Y` -from fairseq.distributed import utils as distributed_utils -from fairseq.logging import meters, metrics, progress_bar # noqa - -sys.modules["fairseq.distributed_utils"] = distributed_utils -sys.modules["fairseq.meters"] = meters -sys.modules["fairseq.metrics"] = metrics -sys.modules["fairseq.progress_bar"] = progress_bar - -# initialize hydra -from fairseq.dataclass.initialize import hydra_init - -hydra_init() - -import fairseq.criterions # noqa -import fairseq.distributed # noqa -import fairseq.models # noqa -import fairseq.modules # noqa -import fairseq.optim # noqa -import fairseq.optim.lr_scheduler # noqa -import fairseq.pdb # noqa -import fairseq.scoring # noqa -import fairseq.tasks # noqa -import fairseq.token_generation_constraints # noqa - -import fairseq.benchmark # noqa -import fairseq.model_parallel # noqa diff --git a/spaces/awacke1/RLHF.Cognitive.Episodic.Semantic.Memory/app.py b/spaces/awacke1/RLHF.Cognitive.Episodic.Semantic.Memory/app.py deleted file mode 100644 index b699439bc47babd6b1cf205b29b7f63e40856218..0000000000000000000000000000000000000000 --- a/spaces/awacke1/RLHF.Cognitive.Episodic.Semantic.Memory/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import streamlit as st -import pandas as pd - -# Define functions -def create_empty_csv_files(): - sem_df = pd.DataFrame(columns=["fact", "category", "source"]) - sem_df.to_csv("semantic_memory.csv", index=False) - epi_df = pd.DataFrame(columns=["event", "sentiment", "date"]) - epi_df.to_csv("episodic_memory.csv", index=False) - -def load_data(): - try: - sem_df = pd.read_csv("semantic_memory.csv") - sem_mem = sem_df.to_dict("records") - except: - create_empty_csv_files() - sem_mem = [{"fact": "The Earth is round", "category": "science", "source": "NASA"}, - {"fact": "Pizza is delicious", "category": "food", "source": "me"}] - try: - epi_df = pd.read_csv("episodic_memory.csv") - epi_mem = epi_df.to_dict("records") - except: - create_empty_csv_files() - epi_mem = [{"event": "I went to the beach", "sentiment": "happy", "date": "2022-02-28"}, - {"event": "I had a fight with my friend", "sentiment": "sad", "date": "2022-02-25"}] - return sem_mem, epi_mem - -def save_data(sem_mem, epi_mem): - sem_df = pd.DataFrame(sem_mem) - sem_df.to_csv("semantic_memory.csv", index=False) - epi_df = pd.DataFrame(epi_mem) - epi_df.to_csv("episodic_memory.csv", index=False) - -def view_semantic_memory(sem_mem): - st.write("# Semantic Memory") - for item in sem_mem: - st.write(f"**{item['fact']}** ({item['category']}) - {item['source']}") - -def view_episodic_memory(epi_mem): - st.write("# Episodic Memory") - for item in epi_mem: - st.write(f"**{item['event']}** ({item['sentiment']}) - {item['date']}") - -def add_fact(sem_mem, fact, category, source): - sem_mem.append({"fact": fact, "category": category, "source": source}) - -def add_event(epi_mem, event, sentiment, date): - epi_mem.append({"event": event, "sentiment": sentiment, "date": date}) - -def add_fact_to_semantic_memory(sem_mem, epi_mem): - fact = st.text_input("Enter a fact") - category = st.text_input("Enter a category") - source = st.text_input("Enter a source") - if st.button("Add Fact"): - add_fact(sem_mem, fact, category, source) - save_data(sem_mem, epi_mem) - st.success("Fact added to semantic memory!") - st.sidebar.success("Fact added to semantic memory!") - -def add_event_to_episodic_memory(epi_mem, sem_mem): - event = st.text_input("Enter an event") - sentiment = st.selectbox("Select a sentiment", ["happy", "sad", "neutral"]) - date = st.date_input("Select a date") - if st.button("Add Event"): - add_event(epi_mem, event, sentiment, date) - save_data(sem_mem, epi_mem) - st.success("Event added to episodic memory!") - st.sidebar.success("Event added to episodic memory!") - -def run_app(): - sem_mem, epi_mem = load_data() - - st.title("Cognitive Agent") - option = st.sidebar.selectbox("Select an option", - ["View Semantic Memory", "View Episodic Memory", "Add Fact to Semantic Memory", - "Add Event to Episodic Memory"]) - - if option == "View Semantic Memory": - view_semantic_memory(sem_mem) - elif option == "View Episodic Memory": - view_episodic_memory(epi_mem) - elif option == "Add Fact to Semantic Memory": - add_fact_to_semantic_memory(sem_mem, epi_mem) - elif option == "Add Event to Episodic Memory": - add_event_to_episodic_memory(epi_mem, sem_mem) - - save_data(sem_mem, epi_mem) - -if __name__ == '__main__': - run_app() - - - - -# AW: Restructure the code listing into four functions. shorten the code by eliminating comments and unnecessary whitespace and empty lines. -# AI: This revised code splits the app into four functions: load_data, save_data, add_fact, and add_event. The run_app function handles the logic of the Streamlit app and calls these other functions as necessary. The code has been shortened by removing unnecessary whitespace and comments, but retains its functionality. - - -# This program uses Streamlit to create a web app that allows the user to view and add to both semantic and episodic memory. The semantic memory is stored as a list of dictionaries, where each dictionary represents a fact and includes the fact itself, the category it belongs to, and the source of the fact. The episodic memory is also stored as a list of dictionaries, where each dictionary represents an event and includes the event itself, the sentiment associated with the event, and the date the event occurred. -# The program allows the user to view both types of memory by selecting an option from the sidebar. If the user selects "View Semantic Memory", the program displays all of the facts stored in semantic memory. If the user selects "View Episodic Memory", the program displays all of the events stored in episodic memory. -# The program also allows the user to add new facts to semantic memory or new events to episodic memory by selecting an option from the sidebar and filling out a form with the relevant information. When the user clicks the "Add Fact" or "Add Event" button, the new fact or event is added to the appropriate list of dictionaries and saved to a CSV file. The program then displays a success message indicating that the fact or event was added to memory. -# Overall, this program demonstrates how semantic and episodic memory can be modeled using Python list dictionaries, and how these types of memory can be used to track both facts and observations, as well as sentiments associated with past experiences. diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/modifiers/SimplifyModifier.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/modifiers/SimplifyModifier.js deleted file mode 100644 index b67239cce85124685079c46249b89e97bbd9f1ab..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/modifiers/SimplifyModifier.js +++ /dev/null @@ -1,494 +0,0 @@ -/* - * @author zz85 / http://twitter.com/blurspline / http://www.lab4games.net/zz85/blog - * - * Simplification Geometry Modifier - * - based on code and technique - * - by Stan Melax in 1998 - * - Progressive Mesh type Polygon Reduction Algorithm - * - http://www.melax.com/polychop/ - */ - -THREE.SimplifyModifier = function () {}; - -( function () { - - var cb = new THREE.Vector3(), ab = new THREE.Vector3(); - - function pushIfUnique( array, object ) { - - if ( array.indexOf( object ) === - 1 ) array.push( object ); - - } - - function removeFromArray( array, object ) { - - var k = array.indexOf( object ); - if ( k > - 1 ) array.splice( k, 1 ); - - } - - function computeEdgeCollapseCost( u, v ) { - - // if we collapse edge uv by moving u to v then how - // much different will the model change, i.e. the "error". - - var edgelength = v.position.distanceTo( u.position ); - var curvature = 0; - - var sideFaces = []; - var i, il = u.faces.length, face, sideFace; - - // find the "sides" triangles that are on the edge uv - for ( i = 0; i < il; i ++ ) { - - face = u.faces[ i ]; - - if ( face.hasVertex( v ) ) { - - sideFaces.push( face ); - - } - - } - - // use the triangle facing most away from the sides - // to determine our curvature term - for ( i = 0; i < il; i ++ ) { - - var minCurvature = 1; - face = u.faces[ i ]; - - for ( var j = 0; j < sideFaces.length; j ++ ) { - - sideFace = sideFaces[ j ]; - // use dot product of face normals. - var dotProd = face.normal.dot( sideFace.normal ); - minCurvature = Math.min( minCurvature, ( 1.001 - dotProd ) / 2 ); - - } - - curvature = Math.max( curvature, minCurvature ); - - } - - // crude approach in attempt to preserve borders - // though it seems not to be totally correct - var borders = 0; - if ( sideFaces.length < 2 ) { - - // we add some arbitrary cost for borders, - // borders += 10; - curvature = 1; - - } - - var amt = edgelength * curvature + borders; - - return amt; - - } - - function computeEdgeCostAtVertex( v ) { - - // compute the edge collapse cost for all edges that start - // from vertex v. Since we are only interested in reducing - // the object by selecting the min cost edge at each step, we - // only cache the cost of the least cost edge at this vertex - // (in member variable collapse) as well as the value of the - // cost (in member variable collapseCost). - - if ( v.neighbors.length === 0 ) { - - // collapse if no neighbors. - v.collapseNeighbor = null; - v.collapseCost = - 0.01; - - return; - - } - - v.collapseCost = 100000; - v.collapseNeighbor = null; - - // search all neighboring edges for "least cost" edge - for ( var i = 0; i < v.neighbors.length; i ++ ) { - - var collapseCost = computeEdgeCollapseCost( v, v.neighbors[ i ] ); - - if ( ! v.collapseNeighbor ) { - - v.collapseNeighbor = v.neighbors[ i ]; - v.collapseCost = collapseCost; - v.minCost = collapseCost; - v.totalCost = 0; - v.costCount = 0; - - } - - v.costCount ++; - v.totalCost += collapseCost; - - if ( collapseCost < v.minCost ) { - - v.collapseNeighbor = v.neighbors[ i ]; - v.minCost = collapseCost; - - } - - } - - // we average the cost of collapsing at this vertex - v.collapseCost = v.totalCost / v.costCount; - // v.collapseCost = v.minCost; - - } - - function removeVertex( v, vertices ) { - - console.assert( v.faces.length === 0 ); - - while ( v.neighbors.length ) { - - var n = v.neighbors.pop(); - removeFromArray( n.neighbors, v ); - - } - - removeFromArray( vertices, v ); - - } - - function removeFace( f, faces ) { - - removeFromArray( faces, f ); - - if ( f.v1 ) removeFromArray( f.v1.faces, f ); - if ( f.v2 ) removeFromArray( f.v2.faces, f ); - if ( f.v3 ) removeFromArray( f.v3.faces, f ); - - // TODO optimize this! - var vs = [ f.v1, f.v2, f.v3 ]; - var v1, v2; - - for ( var i = 0; i < 3; i ++ ) { - - v1 = vs[ i ]; - v2 = vs[ ( i + 1 ) % 3 ]; - - if ( ! v1 || ! v2 ) continue; - - v1.removeIfNonNeighbor( v2 ); - v2.removeIfNonNeighbor( v1 ); - - } - - } - - function collapse( vertices, faces, u, v ) { // u and v are pointers to vertices of an edge - - // Collapse the edge uv by moving vertex u onto v - - if ( ! v ) { - - // u is a vertex all by itself so just delete it.. - removeVertex( u, vertices ); - return; - - } - - var i; - var tmpVertices = []; - - for ( i = 0; i < u.neighbors.length; i ++ ) { - - tmpVertices.push( u.neighbors[ i ] ); - - } - - - // delete triangles on edge uv: - for ( i = u.faces.length - 1; i >= 0; i -- ) { - - if ( u.faces[ i ].hasVertex( v ) ) { - - removeFace( u.faces[ i ], faces ); - - } - - } - - // update remaining triangles to have v instead of u - for ( i = u.faces.length - 1; i >= 0; i -- ) { - - u.faces[ i ].replaceVertex( u, v ); - - } - - - removeVertex( u, vertices ); - - // recompute the edge collapse costs in neighborhood - for ( i = 0; i < tmpVertices.length; i ++ ) { - - computeEdgeCostAtVertex( tmpVertices[ i ] ); - - } - - } - - - - function minimumCostEdge( vertices ) { - - // O(n * n) approach. TODO optimize this - - var least = vertices[ 0 ]; - - for ( var i = 0; i < vertices.length; i ++ ) { - - if ( vertices[ i ].collapseCost < least.collapseCost ) { - - least = vertices[ i ]; - - } - - } - - return least; - - } - - // we use a triangle class to represent structure of face slightly differently - - function Triangle( v1, v2, v3, a, b, c ) { - - this.a = a; - this.b = b; - this.c = c; - - this.v1 = v1; - this.v2 = v2; - this.v3 = v3; - - this.normal = new THREE.Vector3(); - - this.computeNormal(); - - v1.faces.push( this ); - v1.addUniqueNeighbor( v2 ); - v1.addUniqueNeighbor( v3 ); - - v2.faces.push( this ); - v2.addUniqueNeighbor( v1 ); - v2.addUniqueNeighbor( v3 ); - - - v3.faces.push( this ); - v3.addUniqueNeighbor( v1 ); - v3.addUniqueNeighbor( v2 ); - - } - - Triangle.prototype.computeNormal = function () { - - var vA = this.v1.position; - var vB = this.v2.position; - var vC = this.v3.position; - - cb.subVectors( vC, vB ); - ab.subVectors( vA, vB ); - cb.cross( ab ).normalize(); - - this.normal.copy( cb ); - - }; - - Triangle.prototype.hasVertex = function ( v ) { - - return v === this.v1 || v === this.v2 || v === this.v3; - - }; - - Triangle.prototype.replaceVertex = function ( oldv, newv ) { - - if ( oldv === this.v1 ) this.v1 = newv; - else if ( oldv === this.v2 ) this.v2 = newv; - else if ( oldv === this.v3 ) this.v3 = newv; - - removeFromArray( oldv.faces, this ); - newv.faces.push( this ); - - - oldv.removeIfNonNeighbor( this.v1 ); - this.v1.removeIfNonNeighbor( oldv ); - - oldv.removeIfNonNeighbor( this.v2 ); - this.v2.removeIfNonNeighbor( oldv ); - - oldv.removeIfNonNeighbor( this.v3 ); - this.v3.removeIfNonNeighbor( oldv ); - - this.v1.addUniqueNeighbor( this.v2 ); - this.v1.addUniqueNeighbor( this.v3 ); - - this.v2.addUniqueNeighbor( this.v1 ); - this.v2.addUniqueNeighbor( this.v3 ); - - this.v3.addUniqueNeighbor( this.v1 ); - this.v3.addUniqueNeighbor( this.v2 ); - - this.computeNormal(); - - }; - - function Vertex( v, id ) { - - this.position = v; - - this.id = id; // old index id - - this.faces = []; // faces vertex is connected - this.neighbors = []; // neighbouring vertices aka "adjacentVertices" - - // these will be computed in computeEdgeCostAtVertex() - this.collapseCost = 0; // cost of collapsing this vertex, the less the better. aka objdist - this.collapseNeighbor = null; // best candinate for collapsing - - } - - Vertex.prototype.addUniqueNeighbor = function ( vertex ) { - - pushIfUnique( this.neighbors, vertex ); - - }; - - Vertex.prototype.removeIfNonNeighbor = function ( n ) { - - var neighbors = this.neighbors; - var faces = this.faces; - - var offset = neighbors.indexOf( n ); - if ( offset === - 1 ) return; - for ( var i = 0; i < faces.length; i ++ ) { - - if ( faces[ i ].hasVertex( n ) ) return; - - } - - neighbors.splice( offset, 1 ); - - }; - - THREE.SimplifyModifier.prototype.modify = function ( geometry, count ) { - - if ( geometry.isBufferGeometry ) { - - geometry = new THREE.Geometry().fromBufferGeometry( geometry ); - - } - - geometry.mergeVertices(); - - var oldVertices = geometry.vertices; // Three Position - var oldFaces = geometry.faces; // Three Face - - // conversion - var vertices = []; - var faces = []; - - var i, il; - - // - // put data of original geometry in different data structures - // - - // add vertices - - for ( i = 0, il = oldVertices.length; i < il; i ++ ) { - - var vertex = new Vertex( oldVertices[ i ], i ); - vertices.push( vertex ); - - } - - // add faces - - for ( i = 0, il = oldFaces.length; i < il; i ++ ) { - - var face = oldFaces[ i ]; - - var a = face.a; - var b = face.b; - var c = face.c; - - var triangle = new Triangle( vertices[ a ], vertices[ b ], vertices[ c ], a, b, c ); - faces.push( triangle ); - - } - - // compute all edge collapse costs - - for ( i = 0, il = vertices.length; i < il; i ++ ) { - - computeEdgeCostAtVertex( vertices[ i ] ); - - } - - var nextVertex; - - var z = count; - - while ( z -- ) { - - nextVertex = minimumCostEdge( vertices ); - - if ( ! nextVertex ) { - - console.log( 'THREE.SimplifyModifier: No next vertex' ); - break; - - } - - collapse( vertices, faces, nextVertex, nextVertex.collapseNeighbor ); - - } - - // - - var simplifiedGeometry = new THREE.BufferGeometry(); - var position = []; - var index = []; - - // - - for ( i = 0; i < vertices.length; i ++ ) { - - var vertex = vertices[ i ].position; - position.push( vertex.x, vertex.y, vertex.z ); - - } - - // - - for ( i = 0; i < faces.length; i ++ ) { - - var face = faces[ i ]; - - var a = vertices.indexOf( face.v1 ); - var b = vertices.indexOf( face.v2 ); - var c = vertices.indexOf( face.v3 ); - - index.push( a, b, c ); - - } - - // - - simplifiedGeometry.addAttribute( 'position', new THREE.Float32BufferAttribute( position, 3 ) ); - simplifiedGeometry.setIndex( index ); - - return simplifiedGeometry; - - }; - -} )(); diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/VignetteShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/VignetteShader.js deleted file mode 100644 index 81d9215351c97eeeebf3522b19e9e43e0b4c4a8f..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/VignetteShader.js +++ /dev/null @@ -1,63 +0,0 @@ -/** - * @author alteredq / http://alteredqualia.com/ - * - * Vignette shader - * based on PaintEffect postprocess from ro.me - * http://code.google.com/p/3-dreams-of-black/source/browse/deploy/js/effects/PaintEffect.js - */ - -THREE.VignetteShader = { - - uniforms: { - - "tDiffuse": { value: null }, - "offset": { value: 1.0 }, - "darkness": { value: 1.0 } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - "vUv = uv;", - "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - - "uniform float offset;", - "uniform float darkness;", - - "uniform sampler2D tDiffuse;", - - "varying vec2 vUv;", - - "void main() {", - - // Eskil's vignette - - "vec4 texel = texture2D( tDiffuse, vUv );", - "vec2 uv = ( vUv - vec2( 0.5 ) ) * vec2( offset );", - "gl_FragColor = vec4( mix( texel.rgb, vec3( 1.0 - darkness ), dot( uv, uv ) ), texel.a );", - - /* - // alternative version from glfx.js - // this one makes more "dusty" look (as opposed to "burned") - - "vec4 color = texture2D( tDiffuse, vUv );", - "float dist = distance( vUv, vec2( 0.5 ) );", - "color.rgb *= smoothstep( 0.8, offset * 0.799, dist *( darkness + offset ) );", - "gl_FragColor = color;", - */ - - "}" - - ].join( "\n" ) - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/extras/core/Font.js b/spaces/banana-projects/web3d/node_modules/three/src/extras/core/Font.js deleted file mode 100644 index e75573bdb2735058f6027ecb4638ebad54f53a43..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/extras/core/Font.js +++ /dev/null @@ -1,145 +0,0 @@ -/** - * @author zz85 / http://www.lab4games.net/zz85/blog - * @author mrdoob / http://mrdoob.com/ - */ - -import { ShapePath } from './ShapePath.js'; - - -function Font( data ) { - - this.type = 'Font'; - - this.data = data; - -} - -Object.assign( Font.prototype, { - - isFont: true, - - generateShapes: function ( text, size ) { - - if ( size === undefined ) size = 100; - - var shapes = []; - var paths = createPaths( text, size, this.data ); - - for ( var p = 0, pl = paths.length; p < pl; p ++ ) { - - Array.prototype.push.apply( shapes, paths[ p ].toShapes() ); - - } - - return shapes; - - } - -} ); - -function createPaths( text, size, data ) { - - var chars = Array.from ? Array.from( text ) : String( text ).split( '' ); // see #13988 - var scale = size / data.resolution; - var line_height = ( data.boundingBox.yMax - data.boundingBox.yMin + data.underlineThickness ) * scale; - - var paths = []; - - var offsetX = 0, offsetY = 0; - - for ( var i = 0; i < chars.length; i ++ ) { - - var char = chars[ i ]; - - if ( char === '\n' ) { - - offsetX = 0; - offsetY -= line_height; - - } else { - - var ret = createPath( char, scale, offsetX, offsetY, data ); - offsetX += ret.offsetX; - paths.push( ret.path ); - - } - - } - - return paths; - -} - -function createPath( char, scale, offsetX, offsetY, data ) { - - var glyph = data.glyphs[ char ] || data.glyphs[ '?' ]; - - if ( ! glyph ) return; - - var path = new ShapePath(); - - var x, y, cpx, cpy, cpx1, cpy1, cpx2, cpy2; - - if ( glyph.o ) { - - var outline = glyph._cachedOutline || ( glyph._cachedOutline = glyph.o.split( ' ' ) ); - - for ( var i = 0, l = outline.length; i < l; ) { - - var action = outline[ i ++ ]; - - switch ( action ) { - - case 'm': // moveTo - - x = outline[ i ++ ] * scale + offsetX; - y = outline[ i ++ ] * scale + offsetY; - - path.moveTo( x, y ); - - break; - - case 'l': // lineTo - - x = outline[ i ++ ] * scale + offsetX; - y = outline[ i ++ ] * scale + offsetY; - - path.lineTo( x, y ); - - break; - - case 'q': // quadraticCurveTo - - cpx = outline[ i ++ ] * scale + offsetX; - cpy = outline[ i ++ ] * scale + offsetY; - cpx1 = outline[ i ++ ] * scale + offsetX; - cpy1 = outline[ i ++ ] * scale + offsetY; - - path.quadraticCurveTo( cpx1, cpy1, cpx, cpy ); - - break; - - case 'b': // bezierCurveTo - - cpx = outline[ i ++ ] * scale + offsetX; - cpy = outline[ i ++ ] * scale + offsetY; - cpx1 = outline[ i ++ ] * scale + offsetX; - cpy1 = outline[ i ++ ] * scale + offsetY; - cpx2 = outline[ i ++ ] * scale + offsetX; - cpy2 = outline[ i ++ ] * scale + offsetY; - - path.bezierCurveTo( cpx1, cpy1, cpx2, cpy2, cpx, cpy ); - - break; - - } - - } - - } - - return { offsetX: glyph.ha * scale, path: path }; - -} - -export { Font }; diff --git a/spaces/bankholdup/stylegan_petbreeder/e4e/models/discriminator.py b/spaces/bankholdup/stylegan_petbreeder/e4e/models/discriminator.py deleted file mode 100644 index 16bf3722c7f2e35cdc9bd177a33ed0975e67200d..0000000000000000000000000000000000000000 --- a/spaces/bankholdup/stylegan_petbreeder/e4e/models/discriminator.py +++ /dev/null @@ -1,20 +0,0 @@ -from torch import nn - - -class LatentCodesDiscriminator(nn.Module): - def __init__(self, style_dim, n_mlp): - super().__init__() - - self.style_dim = style_dim - - layers = [] - for i in range(n_mlp-1): - layers.append( - nn.Linear(style_dim, style_dim) - ) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Linear(512, 1)) - self.mlp = nn.Sequential(*layers) - - def forward(self, w): - return self.mlp(w) diff --git a/spaces/bhfr/bing-ai/README.md b/spaces/bhfr/bing-ai/README.md deleted file mode 100644 index 68a4415eb13c1b34f27ac453b245c2c68e9c7912..0000000000000000000000000000000000000000 --- a/spaces/bhfr/bing-ai/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bing Ai -emoji: ⚡ -colorFrom: indigo -colorTo: green -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bibekyess/bgpt/model.py b/spaces/bibekyess/bgpt/model.py deleted file mode 100644 index 23a9dc7062ee13891feec76454e19189f064c4a3..0000000000000000000000000000000000000000 --- a/spaces/bibekyess/bgpt/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn - - -class NeuralNet(nn.Module): - def __init__(self, input_size, hidden_size, num_classes): - super().__init__() - self.l1 = nn.Linear(input_size, hidden_size) - self.l2 = nn.Linear(hidden_size, hidden_size) - self.l3 = nn.Linear(hidden_size, num_classes) - self.relu = nn.ReLU() - self.dropout = nn.Dropout(p=0.5) - - def forward(self, x): - out = self.l1(x) - out = self.relu(out) - out = self.dropout(out) - out = self.l2(out) - out = self.relu(out) - out = self.dropout(out) - out = self.l3(out) - # no activation and no softmax at the end - return out diff --git a/spaces/biingshanak/vits-uma-genshin-honkai/models.py b/spaces/biingshanak/vits-uma-genshin-honkai/models.py deleted file mode 100644 index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000 --- a/spaces/biingshanak/vits-uma-genshin-honkai/models.py +++ /dev/null @@ -1,533 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/bioriAsaeru/text-to-voice/Explore the Richness of Indian Classical and Folkloric Music with Swar Systems SwarPlug 1.0 VSTi.md b/spaces/bioriAsaeru/text-to-voice/Explore the Richness of Indian Classical and Folkloric Music with Swar Systems SwarPlug 1.0 VSTi.md deleted file mode 100644 index cf23446702c2dd96087c8f1639b9b604221f0e0f..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Explore the Richness of Indian Classical and Folkloric Music with Swar Systems SwarPlug 1.0 VSTi.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Look no further. I know this site is all about free VST plugin Connor. Place to download free full versions of the latest software, audio samples, tutorial, e-book and video for free. SwarGroove is a VST/AU/RTAS/AAX multi-tracks Indian drums plugin that lets you add 7fd0e SwarGroove SwarPlug 4 Combo Crack Free Download r2r Latest. All the files are relevant and ready to be loaded. [VST] Swarplug 1.0 [Lưu trữ] - ViAnhEm.Com Diễn đàn... swar plug » Download from 2013Zone.Com Em đang tìm swarplug 1.0 VST bác nào có link share em với nhé... tìm mãi trong diễn đàn mà ko thấy.. hĩx(1.0 VSTi link torrent này... What is a torrent and magnet link?.

    -

    {Swar Systems SwarPlug 1.0 VSTi}


    Downloadhttps://urloso.com/2uyPwm



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/GalGun 2 - Doki Doki VR Mode Free Download WORK.md b/spaces/bioriAsaeru/text-to-voice/GalGun 2 - Doki Doki VR Mode Free Download WORK.md deleted file mode 100644 index a3054d659ffc7ad8511a3cd048456a4216a112ee..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/GalGun 2 - Doki Doki VR Mode Free Download WORK.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Gal*Gun 2 - Doki Doki VR Mode Free Download


    Download Zip ===== https://urloso.com/2uyPaj



    - -Gal*Gun. Quite the same Wikipedia. Just better. ... From Wikipedia, the free encyclopedia ... 2.2 PlayStation 3 version; 2.3 PlayStation 3 downloadable contents ... as well as a doki doki mode ("heart pounding" in English) where the player has the ... A virtual reality (VR) game, Gal*Gun VR, was released worldwide for PC on ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/brainblow/beat_remixer/examples.py b/spaces/brainblow/beat_remixer/examples.py deleted file mode 100644 index 7e33ae272ffc3ea9570739d91171e4b2ba03a6b8..0000000000000000000000000000000000000000 --- a/spaces/brainblow/beat_remixer/examples.py +++ /dev/null @@ -1,11 +0,0 @@ -import beat_manipulator as bm, os, random - -path = 'F:/Stuff/Music/Tracks/' -song = 'Phonetick - You.mp3' -song = path + song - -#bm.presets.savetest(song, scale = 1, shift = 0) - -bm.beatswap(song, 'random', scale = 1, shift = 0) - -#bm.presets.use(song = song, preset = 'dotted snares fast 1', scale = 1) \ No newline at end of file diff --git a/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/optimizers/radam.py b/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/optimizers/radam.py deleted file mode 100644 index e805d7e34921bee436e1e7fd9e1f753c7609186b..0000000000000000000000000000000000000000 --- a/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/optimizers/radam.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- - -"""RAdam optimizer. - -This code is drived from https://github.com/LiyuanLucasLiu/RAdam. -""" - -import math -import torch - -from torch.optim.optimizer import Optimizer - - -class RAdam(Optimizer): - """Rectified Adam optimizer.""" - - def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): - """Initilize RAdam optimizer.""" - defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) - self.buffer = [[None, None, None] for ind in range(10)] - super(RAdam, self).__init__(params, defaults) - - def __setstate__(self, state): - """Set state.""" - super(RAdam, self).__setstate__(state) - - def step(self, closure=None): - """Run one step.""" - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data.float() - if grad.is_sparse: - raise RuntimeError('RAdam does not support sparse gradients') - - p_data_fp32 = p.data.float() - - state = self.state[p] - - if len(state) == 0: - state['step'] = 0 - state['exp_avg'] = torch.zeros_like(p_data_fp32) - state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) - else: - state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) - state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) - - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - exp_avg.mul_(beta1).add_(1 - beta1, grad) - - state['step'] += 1 - buffered = self.buffer[int(state['step'] % 10)] - if state['step'] == buffered[0]: - N_sma, step_size = buffered[1], buffered[2] - else: - buffered[0] = state['step'] - beta2_t = beta2 ** state['step'] - N_sma_max = 2 / (1 - beta2) - 1 - N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) - buffered[1] = N_sma - - # more conservative since it's an approximated value - if N_sma >= 5: - step_size = math.sqrt( - (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA - else: - step_size = 1.0 / (1 - beta1 ** state['step']) - buffered[2] = step_size - - if group['weight_decay'] != 0: - p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) - - # more conservative since it's an approximated value - if N_sma >= 5: - denom = exp_avg_sq.sqrt().add_(group['eps']) - p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) - else: - p_data_fp32.add_(-step_size * group['lr'], exp_avg) - - p.data.copy_(p_data_fp32) - - return loss diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/tracking/__init__.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/tracking/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ccolas/TastyPiano/src/cocktails/pipeline/cocktailrep2recipe.py b/spaces/ccolas/TastyPiano/src/cocktails/pipeline/cocktailrep2recipe.py deleted file mode 100644 index 17a4ae4b55205f0107f9cde64a4d459b655c3be6..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/cocktails/pipeline/cocktailrep2recipe.py +++ /dev/null @@ -1,329 +0,0 @@ -import matplotlib.pyplot as plt -import pickle -from src.cocktails.utilities.cocktail_generation_utilities.population import * -from src.cocktails.utilities.glass_and_volume_utilities import glass_volume -from src.cocktails.config import RECIPE2FEATURES_PATH - -def test_mutation_params(cocktail_reps): - indexes = np.arange(cocktail_reps.shape[0]) - np.random.shuffle(indexes) - perfs = [] - mutated_perfs = [] - pop_params = dict(mutation_params=dict(p_add_ing=0.7, - p_remove_ing=0.7, - p_switch_ing=0.5, - p_change_q=0.7, - delta_change_q=0.3, - asexual_rep=True, - crossover=True, - ingredient_addition=(0.1, 0.05)), - nb_generations=100, - pop_size=100, - nb_elites=10, - dist='mse', - n_neighbors=5) - - for i in indexes[:20]: - target = cocktail_reps[i] - for j in range(100): - parent = IndividualCocktail(pop_params=pop_params, - target_affective_cluster=None, - target=target.copy()) - perfs.append(parent.perf) - child = parent.get_child()[0] - # child.compute_cocktail_rep() - # child.compute_perf() - if perfs[-1] != child.perf: - mutated_perfs.append(child.perf) - else: - perfs.pop(-1) - filtered_children = np.argwhere(np.array(mutated_perfs)==-100).flatten() - non_filtered_ids = np.argwhere(np.logical_and(np.array(perfs)!=-100, np.array(mutated_perfs)!=-100)).flatten() - print(f'Proportion of filtered: {filtered_children.size} / {len(mutated_perfs)} = {int(filtered_children.size / len(mutated_perfs)*100)}%') - plt.figure() - plt.scatter(np.array(perfs)[non_filtered_ids], np.array(mutated_perfs)[non_filtered_ids], s=100, alpha=0.5) - plt.xlabel('parent perf') - plt.ylabel('child perf') - print(np.corrcoef(np.array(perfs)[non_filtered_ids], np.array(mutated_perfs)[non_filtered_ids])[0, 1]) - plt.show() - stop = 1 - -def test_crossover(cocktail_reps): - indexes = np.arange(cocktail_reps.shape[0]) - np.random.shuffle(indexes) - perfs = [] - mutated_perfs = [] - pop_params = dict(mutation_params=dict(p_add_ing=0.7, - p_remove_ing=0.7, - p_switch_ing=0.5, - p_change_q=0.7, - delta_change_q=0.3, - asexual_rep=True, - crossover=True, - ingredient_addition=(0.1, 0.05)), - nb_generations=100, - pop_size=100, - nb_elites=10, - dist='mse', - n_neighbors=5) - for i in indexes[:20]: - for j in range(100): - target = cocktail_reps[i] - parent1 = IndividualCocktail(pop_params=pop_params, - target_affective_cluster=None, - target=target.copy()) - parent2 = IndividualCocktail(pop_params=pop_params, - target_affective_cluster=None, - target=target.copy()) - child = parent1.get_child_with(parent2)[0] - # child.compute_cocktail_rep() - # child.compute_perf() - perfs.append((parent1.perf + parent2.perf)/2) - if perfs[-1] != child.perf: - mutated_perfs.append(child.perf) - else: - perfs.pop(-1) - filtered_children = np.argwhere(np.array(mutated_perfs)==-100).flatten() - non_filtered_ids = np.argwhere(np.logical_and(np.array(perfs)>-45, np.array(mutated_perfs)!=-100)).flatten() - print(f'Proportion of filtered: {filtered_children.size} / {len(mutated_perfs)} = {int(filtered_children.size / len(mutated_perfs)*100)}%') - plt.figure() - plt.scatter(np.array(perfs)[non_filtered_ids], np.array(mutated_perfs)[non_filtered_ids], s=100, alpha=0.5) - plt.xlabel('parent perf') - plt.ylabel('child perf') - print(np.corrcoef(np.array(perfs)[non_filtered_ids], np.array(mutated_perfs)[non_filtered_ids])[0, 1]) - plt.show() - stop = 1 - -def run_comparisons(): - np.random.seed(0) - indexes = np.arange(cocktail_reps.shape[0]) - np.random.shuffle(indexes) - for n_neighbors in [0, 5]: - id_str_neigh = '5neigh_' if n_neighbors == 5 else '0_neigh_' - for asexual_rep in [True, False]: - id_str_as = id_str_neigh + 'asexual_' if asexual_rep else id_str_neigh - for crossover in [True, False]: - id_str = id_str_as + 'crossover_' if crossover else id_str_as - if crossover or asexual_rep: - mutation_params = dict(p_add_ing = 0.5, - p_remove_ing = 0.5, - p_change_q = 0.5, - delta_change_q = 0.3, - asexual_rep=asexual_rep, - crossover=crossover, - ingredient_addition = (0.1, 0.05)) - nb_generations = 100 - pop_size=100 - nb_elites=10 - dist = 'mse' - results = dict() - print(id_str) - for i, ind in enumerate(indexes[:30]): - print(i+1) - target_ing_str = data['ingredients_str'][ind] - target = cocktail_reps[ind] - population = Population(nb_generations=nb_generations, pop_size=pop_size, nb_elite=nb_elites, - target=target, dist=dist, mutation_params=mutation_params, - n_neighbors=n_neighbors, target_ing_str=target_ing_str, true_prep_type=data['category'][ind]) - population.run_evolution(verbose=False) - best_scores, best_ind = population.get_best_score() - recipes = [ind.get_recipe()[3] for ind in best_ind[:5]] - results[str(ind)] = dict(best_scores=best_scores[:5], recipes=recipes, target=population.target_individual.get_recipe()[3]) - with open(f'/home/cedric/Desktop/ga_tests_{id_str}.pickle', 'wb') as f: - pickle.dump(results, f) - -def get_cocktail_distribution(cocktail_reps): - return (np.mean(cocktail_reps, axis=0), np.cov(cocktail_reps, rowvar=0)) - -def sample_cocktails(cocktail_reps, n=10, target_affective_cluster=None, to_print=True): - distrib = get_cocktail_distribution(cocktail_reps) - sampled_cocktail_reps = np.random.multivariate_normal(distrib[0], distrib[1], size=n) - recipes = [] - closest_recipes = [] - for i_c, cr in enumerate(sampled_cocktail_reps): - population = setup_recipe_generation(cr.copy(), target_affective_cluster=target_affective_cluster) - closest_recipes.append(population.nn_recipes[0]) - best_scores, best_individuals = population.run_evolution() - recipes.append(best_individuals[0].get_recipe()[3]) - if to_print: - print(f'Sample #{len(recipes)}:') - print(recipes[-1]) - print('Closest from dataset:') - print(closest_recipes[-1]) - stop = 1 - return recipes, closest_recipes - -def setup_recipe_generation(target, known_target_dict=None, target_affective_cluster=None): - # pop_params = dict(mutation_params=dict(p_add_ing=0.7, - # p_remove_ing=0.7, - # p_switch_ing=0.5, - # p_change_q=0.7, - # delta_change_q=0.3, - # asexual_rep=True, - # crossover=True, - # ingredient_addition=(0.1, 0.05)), - # nb_generations=2, #100 - # pop_size=5, #100 - # nb_elites=2, #10 - # dist='mse', - # n_neighbors=3) #5 - pop_params = dict(mutation_params=dict(p_add_ing=0.4, - p_remove_ing=1, - p_switch_ing=0.5, - p_change_q=1, - delta_change_q=0.3, - asexual_rep=True, - crossover=True, - ingredient_addition=(0.1, 0.05)), - nb_generations=100, # 100 - pop_size=100, # 100 - nb_elites=10, # 10 - dist='mse', - n_neighbors=5) # 5 - - population = Population(target=target, target_affective_cluster=target_affective_cluster, known_target_dict=known_target_dict, pop_params=pop_params) - return population - -def cocktailrep2recipe(cocktail_rep, unit='mL', target_affective_cluster=None, known_target_dict=None, n_output=1, return_ind=False, verbose=True, full_verbose=False, level=0): - init_time = time.time() - if verbose: print(' ' * level + 'Generating cocktail..') - if cocktail_rep.ndim > 1: - assert cocktail_rep.shape[0] == 1 - cocktail_rep = cocktail_rep.flatten() - # target_affective_cluster = target_affective_cluster[0] - population = setup_recipe_generation(cocktail_rep.copy(), known_target_dict=known_target_dict, target_affective_cluster=target_affective_cluster) - if full_verbose: - print(' ' * (level + 2) + '3 nearest neighbors:') - for i, recipe, score in zip(range(3), population.nn_recipes[:3], population.nn_scores[:3]): - print(' ' * (level + 4) + f'#{i+1}, score: {score:.2f}') - print(' ' * (level + 4) + recipe[1:].replace('None ()', '').replace('\t\t', ' ' * (level + 6))) - best_scores, best_individuals = population.run_evolution(verbose=full_verbose, level=level+2) - for i in range(n_output): - best_individuals[i].make_recipe_fit_the_glass() - instructions = [ind.get_instructions() for ind in best_individuals[:n_output]] - recipes = [ind.get_recipe(unit=unit)[3] for ind in best_individuals[:n_output]] - glasses = [ind.glass for ind in best_individuals[:n_output]] - prep_types = [ind.prep_type for ind in best_individuals[:n_output]] - for i, g, p, inst in zip(range(len(recipes)), glasses, prep_types, instructions): - recipes[i] = recipes[i].replace('Recipe', 'Ingredients') + f'Serve in:\n {g.capitalize()} glass.\n' + inst - if full_verbose: - print(f'\n--------------\n{n_output} best results:') - for i, recipe, score in zip(range(n_output), recipes, best_scores[:n_output]): - print(f'#{i+1}, score: {score:.2f}') - print(recipe) - if verbose: print(' ' * (level + 2) + f'Generated in {int(time.time() - init_time)} seconds.') - if return_ind: - return recipes, best_scores[:n_output], best_individuals[:n_output] - else: - return recipes, best_scores[:n_output] - - -def interpolate(cocktail_rep1, cocktail_rep2, alpha, verbose=False): - recipe, score = cocktailrep2recipe(alpha * cocktail_rep1 + (1 - alpha) * cocktail_rep2, verbose=verbose) - return recipe[0], score - -def interpolation_study(n_steps, cocktail_reps): - alphas = np.arange(0, 1 + 1e-6, 1/(n_steps + 1)) - indexes = np.random.choice(np.arange(cocktail_reps.shape[0]), size=2, replace=False) - target_ing_str1, target_ing_str2 = data['ingredients_str'][indexes[0]], data['ingredients_str'][indexes[1]] - cocktail_rep1, cocktail_rep2 = cocktail_reps[indexes[0]], cocktail_reps[indexes[1]] - recipes, scores = [], [] - for alpha in alphas: - recipe, score = interpolate(cocktail_rep1, cocktail_rep2, alpha) - recipes.append(recipe) - scores.append(score[0]) - print('Point A:') - print_recipe(ingredient_str=target_ing_str2) - for i, alpha in enumerate(alphas): - print(f'Alpha = {alpha}, score = {scores[i]}') - print(recipes[i]) - print('Point B:') - print_recipe(ingredient_str=target_ing_str1) - stop = 1 - -def test_robustness_affective_cluster(cocktail_reps): - indexes = np.arange(cocktail_reps.shape[0]) - np.random.shuffle(indexes) - matches = [] - for i in indexes: - target_ing_str = data['ingredients_str'][i] - true_prep_type = data['category'][i] - target = cocktail_reps[i] - # get affective cluster - recipes, best_scores, best_inds = cocktailrep2recipe(cocktail_rep=target, target_ing_str=target_ing_str, true_prep_type=true_prep_type, n_output=1, verbose=False, - return_ind=True) - - matches.append(best_inds[0].does_affective_cluster_match()) - print(np.mean(matches)) - -def test(cocktail_reps): - indexes = np.arange(these_cocktail_reps.shape[0]) - unnormalized_cr = np.array([data[k] for k in rep_keys]).transpose() - - for i in indexes: - target_ing_str = data['ingredients_str'][i] - true_prep_type = data['category'][i] - target = these_cocktail_reps[i] - # print('preptype:', true_prep_type) - # print('cocktail unnormalized', np.sum(unnormalized_cr[i]), unnormalized_cr[i]) - # print('cocktail hand normalized', np.sum(normalize_cocktail(unnormalized_cr[i])), normalize_cocktail(unnormalized_cr[i])) - # print('cocktail rep normalized', np.sum(these_cocktail_reps[i]), these_cocktail_reps[i]) - # print('cocktail rep normalized', np.sum(all_reps[i]), all_reps[i]) - - population = setup_recipe_generation(target.copy(), target_ing_str=target_ing_str, target_affective_cluster=None, true_prep_type=true_prep_type) - target = population.target_individual - target.compute_perf() - if target.perf < -50: - print(i) - print_recipe(target_ing_str) - if not target.is_alcohol_present(): print('No alcohol') - if not target.is_total_volume_enough(): print('small volume') - if not target.does_fit_glass(): - print(target.end_volume) - print(glass_volume[target.get_glass_type()] * 0.81) - print('too much volume') - if not target.is_alcohol_reasonable(): - print(f'amount of alcohol too small or too large: {target.alcohol_precentage}') - stop = 1 - - -if __name__ == '__main__': - these_cocktail_reps = COCKTAIL_REPS.copy() - # test_crossover(these_cocktail_reps) - # test_mutation_params(these_cocktail_reps) - # test(these_cocktail_reps) - # recipes, closest_recipes = sample_cocktails(these_cocktail_reps, n=10) - # interpolation_study(n_steps=4, cocktail_reps=these_cocktail_reps) - # test_robustness_affective_cluster(these_cocktail_reps) - indexes = np.arange(these_cocktail_reps.shape[0]) - np.random.shuffle(indexes) - # test_crossover(mutation_params, dist) - # test_mutation_params(mutation_params, dist) - stop = 1 - unnormalized_cr = np.array([data[k] for k in rep_keys]).transpose() - for i in indexes: - print(i) - target_ing_str = data['ingredients_str'][i] - target_prep_type = data['category'][i] - target_glass = data['glass'][i] - - print('preptype:', target_prep_type) - print('cocktail unnormalized', np.sum(unnormalized_cr[i]), unnormalized_cr[i]) - print('cocktail hand normalized', np.sum(normalize_cocktail(unnormalized_cr[i])), normalize_cocktail(unnormalized_cr[i])) - print('cocktail rep normalized', np.sum(these_cocktail_reps[i]), these_cocktail_reps[i]) - print('cocktail rep normalized', np.sum(all_reps[i]), all_reps[i]) - print(i) - - print('___________Target') - nn_model = NearestNeighbors() - nn_model.fit(these_cocktail_reps) - dists, indexes = nn_model.kneighbors(these_cocktail_reps[i].reshape(1, -1)) - print(indexes) - print_recipe(target_ing_str) - target = these_cocktail_reps[i] - known_target_dict = dict(prep_type=target_prep_type, - ing_str=target_ing_str, - glass=target_glass) - recipes, best_scores = cocktailrep2recipe(cocktail_rep=target, known_target_dict=known_target_dict, n_output=1, verbose=True, full_verbose=True) - - stop = 1 \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/yolo_pafpn.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/yolo_pafpn.py deleted file mode 100644 index 4c4e18a5c3273ecdd878444cc42965e6a24a0cd1..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/yolo_pafpn.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- -# Copyright (c) Megvii Inc. All rights reserved. - -import torch -import torch.nn as nn - -from .darknet import CSPDarknet -from .network_blocks import BaseConv, CSPLayer, DWConv - - -class YOLOPAFPN(nn.Module): - """ - YOLOv3 model. Darknet 53 is the default backbone of this model. - """ - - def __init__( - self, - depth=1.0, - width=1.0, - in_features=("dark3", "dark4", "dark5"), - in_channels=[256, 512, 1024], - depthwise=False, - act="silu", - ): - super().__init__() - self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act) - self.in_features = in_features - self.in_channels = in_channels - Conv = DWConv if depthwise else BaseConv - - self.upsample = nn.Upsample(scale_factor=2, mode="nearest") - self.lateral_conv0 = BaseConv( - int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act - ) - self.C3_p4 = CSPLayer( - int(2 * in_channels[1] * width), - int(in_channels[1] * width), - round(3 * depth), - False, - depthwise=depthwise, - act=act, - ) # cat - - self.reduce_conv1 = BaseConv( - int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act - ) - self.C3_p3 = CSPLayer( - int(2 * in_channels[0] * width), - int(in_channels[0] * width), - round(3 * depth), - False, - depthwise=depthwise, - act=act, - ) - - # bottom-up conv - self.bu_conv2 = Conv( - int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act - ) - self.C3_n3 = CSPLayer( - int(2 * in_channels[0] * width), - int(in_channels[1] * width), - round(3 * depth), - False, - depthwise=depthwise, - act=act, - ) - - # bottom-up conv - self.bu_conv1 = Conv( - int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act - ) - self.C3_n4 = CSPLayer( - int(2 * in_channels[1] * width), - int(in_channels[2] * width), - round(3 * depth), - False, - depthwise=depthwise, - act=act, - ) - - def forward(self, input): - """ - Args: - inputs: input images. - - Returns: - Tuple[Tensor]: FPN feature. - """ - - # backbone - out_features = self.backbone(input) - features = [out_features[f] for f in self.in_features] - [x2, x1, x0] = features - - fpn_out0 = self.lateral_conv0(x0) # 1024->512/32 - f_out0 = self.upsample(fpn_out0) # 512/16 - f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16 - f_out0 = self.C3_p4(f_out0) # 1024->512/16 - - fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16 - f_out1 = self.upsample(fpn_out1) # 256/8 - f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8 - pan_out2 = self.C3_p3(f_out1) # 512->256/8 - - p_out1 = self.bu_conv2(pan_out2) # 256->256/16 - p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16 - pan_out1 = self.C3_n3(p_out1) # 512->512/16 - - p_out0 = self.bu_conv1(pan_out1) # 512->512/32 - p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32 - pan_out0 = self.C3_n4(p_out0) # 1024->1024/32 - - outputs = (pan_out2, pan_out1, pan_out0) - return outputs diff --git a/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/task/caption.py b/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/task/caption.py deleted file mode 100644 index 0cff2e484bb4addbe7b530da392e5460b6d6a0ff..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/task/caption.py +++ /dev/null @@ -1,419 +0,0 @@ -from lavis.datasets.builders import load_dataset -import torch -import more_itertools -from tqdm import tqdm -from coco_metric import compute_cider, postprocess_captioning_generation -import json -import time -import os -from transformers import LogitsProcessor, MinNewTokensLengthLogitsProcessor, ForcedEOSTokenLogitsProcessor -from PIL import Image - -class VisualLogitsProcessor(LogitsProcessor): - def __init__(self, tokenizer): - super().__init__() - self.tokenizer = tokenizer - self.object_token_id = self.tokenizer("<|#object#|>", add_special_tokens=False)["input_ids"][-1] - self.prebox_token_id = self.tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1] - self.box_token_id = self.tokenizer("<|#box#|>", add_special_tokens=False)["input_ids"][-1] - self.previsual_token_id = self.tokenizer("<|#previsual#|>", add_special_tokens=False)["input_ids"][-1] - self.visual_token_id = self.tokenizer("<|#visual#|>", add_special_tokens=False)["input_ids"][-1] - self.eos_token_id = self.tokenizer.encode(self.tokenizer.eos_token)[-1] - self.endofobject_token_id = self.tokenizer("<|#endofobject#|>", add_special_tokens=False)["input_ids"][-1] - self.topk = 2 - - def __call__(self, input_ids, scores): - # print("decoding===>", self.tokenizer.decode(scores.sort(descending=True).indices.tolist()[0][:self.topk])) - # import pdb; pdb.set_trace() - if self.object_token_id in scores.sort(descending=True).indices.tolist()[0][1:self.topk] and self.eos_token_id not in scores.sort(descending=True).indices.tolist()[0][:self.topk] and (input_ids == self.object_token_id).sum() * 2 == (input_ids == self.endofobject_token_id).sum(): - scores[0, self.object_token_id] = 1000 - if input_ids[0, -1] == self.object_token_id and input_ids[0, -2] != self.prebox_token_id: - if (input_ids[0, :-1] == self.object_token_id).sum() != 0: - # print("generate a previsual token next") - scores[0, self.previsual_token_id] = 1000 - elif input_ids[0, -1] == self.previsual_token_id or input_ids[0, -1] == self.visual_token_id: - # print("stop to run bbox generation for " + "previsual" if input_ids[0, -1] == self.previsual_token_id else "visual") - scores[0, self.eos_token_id] = 1000 - elif input_ids[0, -1] == self.endofobject_token_id and input_ids[0, -2] != self.box_token_id: - # print("generate a visual token next") - scores[0, self.visual_token_id] = 1000 - return scores - - -def prepare_batch_images(batch, image_processor): - batch_images = None - for b in batch: - b_image = image_processor(b["image"]).unsqueeze(0).unsqueeze(1).unsqueeze(0) - if batch_images is None: - batch_images = b_image - else: - batch_images = torch.cat([batch_images, b_image], dim=0) - return batch_images - - -def captioner( - model,tokenizer,image_ori,batch_images,input_ids,attention_mask,image_start_index_list,image_nums,added_bbox_list,debug=False): - """Evaluate a model on COCO dataset. - Returns: - float: CIDEr score - - """ - visual_logits_processor = VisualLogitsProcessor(tokenizer) - model.eval() - # model.eval().cuda() - lang_encoder_name = model.lang_encoder.__class__.__name__.lower() - media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] - endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] - pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)["input_ids"][-1] - bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)["input_ids"][-1] - previsual_token_id = tokenizer("<|#previsual#|>", add_special_tokens=False)["input_ids"][-1] - visual_token_id = tokenizer("<|#visual#|>", add_special_tokens=False)["input_ids"][-1] - box_token = "<|#box#|>" - prebox_token = "<|#prebox#|>" - endofobject_token = "<|#endofobject#|>" - object_token = "<|#object#|>" - ori_prompt_length = len(input_ids[0]) - have_prebox = False - out_image = None - while True: - batch_images = batch_images - input_ids = input_ids - attention_mask = attention_mask - image_start_index_list = image_start_index_list - image_nums = image_nums - if debug: - print("input--->",tokenizer.decode(input_ids[0])) - p1 = MinNewTokensLengthLogitsProcessor( - prompt_length_to_skip=input_ids.shape[-1], - min_new_tokens=5, - eos_token_id=bos_token_id, - ) - with torch.inference_mode(): - outputs = model.generate( - batch_images, - input_ids, - attention_mask=attention_mask, - max_new_tokens=20, - # min_new_tokens=8, - num_beams=1, - # length_penalty=0, - image_start_index_list=image_start_index_list, - image_nums=image_nums, - added_bbox_list=added_bbox_list if len(added_bbox_list) != 0 else None, - logits_processor_list=[p1, visual_logits_processor], - ) - if debug: - print("outputs--->",tokenizer.decode(outputs[0])) - if outputs[0, -2] in [previsual_token_id, visual_token_id] and outputs[0, -1] == bos_token_id: - prompt = tokenizer.decode(outputs.clone()[0]) - is_visual = (outputs[0, -2] == visual_token_id) - batch_text = tokenizer.batch_decode(outputs[:, :-1]) - encodings = tokenizer( - batch_text, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - if debug: - print("get the visual bbox--->",tokenizer.decode(input_ids[0])) - with torch.no_grad(): - outputs = model( - vision_x=batch_images, - lang_x=input_ids, - attention_mask=attention_mask, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=added_bbox_list if len(added_bbox_list) != 0 else None, - add_box=added_bbox_list is not None and len(added_bbox_list) != 0, - ) - boxes = outputs["boxes"] - scores = outputs["scores"] - # if not model.valid: - # import pdb; pdb.set_trace() - if boxes is not None: - if is_visual: - if have_prebox: - added_bbox_list.pop() - prompt = prompt.replace("<|#previsual#|><|#prebox#|><|#object#|>", "") - have_prebox = False - if debug: - print("find previsual and remove it--->", prompt) - first_box = boxes[scores.argmax()] - added_bbox_list += [torch.tensor(first_box).unsqueeze(0) / 224] - prompt = prompt[:-len(tokenizer.eos_token)] - prompt += box_token + endofobject_token - if debug: - print("after inserting visual---->", prompt) - else: - import numpy as np - import cv2 - open_cv_image = np.array(image_ori) - open_cv_image = open_cv_image[:, :, ::-1].copy() - for i, pre_box in enumerate(boxes): - open_cv_image = cv2.rectangle(open_cv_image, pre_box[:2].astype(int), pre_box[2:].astype(int), (0, 255, 0), i+1) - out_image = Image.fromarray(cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)) - # exit() - pre_box = boxes[scores.argmax()] - added_bbox_list += [torch.tensor(pre_box).unsqueeze(0).cuda() / 224] - prompt = prompt[:-len(tokenizer.eos_token)] - prompt += prebox_token + object_token - have_prebox = True - if debug: - print("after inserting previsual---->", prompt) - else: - if debug: - import pdb;pdb.set_trace() - prompt = tokenizer.decode(outputs[0, :-2].clone()[0]) - else: - break - outputs = outputs[:, ori_prompt_length:] - outputs = postprocess_captioning_generation(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]).replace('"', "") - # new_predictions = [ - # postprocess_captioning_generation(out).replace('"', "") - # for out in tokenizer.batch_decode(outputs, skip_special_tokens=True) - # ] - # import pdb; pdb.set_trace() - return outputs, out_image - - -def evaluate_coco_flickr( - model, - tokenizer, - image_processor, - batch_size, - is_flickr=False, - vis_embed_size=None, - rank=0, - world_size=1, - id=0, - debug=False, -): - """Evaluate a model on COCO dataset. - Returns: - float: CIDEr score - - """ - visual_logits_processor = VisualLogitsProcessor(tokenizer) - coco_dataset = load_dataset("coco_caption") - eval_dataset = coco_dataset["test"] - model.eval().cuda() - predictions = dict() - lang_encoder_name = model.lang_encoder.__class__.__name__.lower() - media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] - endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] - pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)["input_ids"][-1] - bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)["input_ids"][-1] - previsual_token_id = tokenizer("<|#previsual#|>", add_special_tokens=False)["input_ids"][-1] - visual_token_id = tokenizer("<|#visual#|>", add_special_tokens=False)["input_ids"][-1] - box_token = "<|#box#|>" - prebox_token = "<|#prebox#|>" - endofobject_token = "<|#endofobject#|>" - object_token = "<|#object#|>" - cnt = 0 - if world_size > 1: - torch.distributed.barrier() - desc = "Running inference Flickr30" if is_flickr else "Running inference COCO" - for ii, batch in enumerate(more_itertools.chunked( - tqdm(eval_dataset, desc=desc, disable=(rank != 0)), batch_size - )): - if ii % world_size != rank: - continue - cnt += len(batch) - batch[0]["image"] = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/images/img3.jpg").resize((224, 224)) - batch_images = prepare_batch_images( - batch=batch, - image_processor=image_processor, - ).cuda() - prompt = f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>" - added_bbox_list = [] - batch_text = [prompt for _ in batch] - encodings = tokenizer( - batch_text, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - ori_prompt_length = len(encodings["input_ids"][0]) - have_prebox = False - while True: - batch_text = [prompt for _ in batch] - encodings = tokenizer( - batch_text, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"].cuda() - attention_mask = encodings["attention_mask"].cuda() - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - if debug: - print("input--->",tokenizer.decode(input_ids[0])) - p1 = MinNewTokensLengthLogitsProcessor( - prompt_length_to_skip=input_ids.shape[-1], - min_new_tokens=5, - eos_token_id=bos_token_id, - ) - with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16): - outputs = model.generate( - batch_images, - input_ids, - attention_mask=attention_mask, - max_new_tokens=20, - # min_new_tokens=8, - num_beams=1, - # length_penalty=0, - image_start_index_list=image_start_index_list, - image_nums=image_nums, - added_bbox_list=added_bbox_list if len(added_bbox_list) != 0 else None, - logits_processor_list=[p1, visual_logits_processor], - ) - if debug: - print("outputs--->",tokenizer.decode(outputs[0])) - if outputs[0, -2] in [previsual_token_id, visual_token_id] and outputs[0, -1] == bos_token_id: - prompt = tokenizer.decode(outputs.clone()[0]) - is_visual = (outputs[0, -2] == visual_token_id) - batch_text = tokenizer.batch_decode(outputs[:, :-1]) - encodings = tokenizer( - batch_text, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"].cuda() - attention_mask = encodings["attention_mask"].cuda() - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - if debug: - print("get the visual bbox--->",tokenizer.decode(input_ids[0])) - with torch.cuda.amp.autocast(dtype=torch.float16) and torch.no_grad(): - outputs = model( - vision_x=batch_images, - lang_x=input_ids, - attention_mask=attention_mask, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=added_bbox_list if len(added_bbox_list) != 0 else None, - add_box=added_bbox_list is not None and len(added_bbox_list) != 0, - ) - boxes = outputs["boxes"] - scores = outputs["scores"] - # if not model.valid: - # import pdb; pdb.set_trace() - if boxes is not None: - if is_visual: - if have_prebox: - added_bbox_list.pop() - prompt = prompt.replace("<|#previsual#|><|#prebox#|><|#object#|>", "") - have_prebox = False - if debug: - print("find previsual and remove it--->", prompt) - first_box = boxes[scores.argmax()] - added_bbox_list += [torch.tensor(first_box).unsqueeze(0).cuda() / 224] - prompt = prompt[:-len(tokenizer.eos_token)] - prompt += box_token + endofobject_token - if debug: - print("after inserting visual---->", prompt) - else: - import numpy as np - import cv2 - open_cv_image = np.array(batch[0]["image"]) - open_cv_image = open_cv_image[:, :, ::-1].copy() - for i, pre_box in enumerate(boxes): - open_cv_image = cv2.rectangle(open_cv_image, pre_box[:2].astype(int), pre_box[2:].astype(int), (0, 255, 0), i+1) - cv2.imwrite("Atest.png", open_cv_image) - exit() - pre_box = boxes[scores.argmax()] - added_bbox_list += [torch.tensor(pre_box).unsqueeze(0).cuda() / 224] - prompt = prompt[:-len(tokenizer.eos_token)] - prompt += prebox_token + object_token - have_prebox = True - if debug: - print("after inserting previsual---->", prompt) - else: - import pdb;pdb.set_trace() - prompt = tokenizer.decode(outputs[0, :-2].clone()[0]) - else: - break - outputs = outputs[:, ori_prompt_length:] - new_predictions = [ - postprocess_captioning_generation(out).replace('"', "") - for out in tokenizer.batch_decode(outputs, skip_special_tokens=True) - ] - # import pdb; pdb.set_trace() - if rank == 0: - tqdm.write(new_predictions[0]) - for i, sample in enumerate(batch): - predictions[int(sample["image_id"])] = { - "caption": new_predictions[i], - } - print(new_predictions) - exit() - results_path = ( - f"flickrresults_{lang_encoder_name}_{rank}_{id}.json" - if is_flickr - else f"cocoresults_{lang_encoder_name}_{rank}_{id}.json" - ) - with open(results_path, "w") as f: - f.write( - json.dumps( - [ - {"image_id": k, "caption": predictions[k]["caption"]} - for k in predictions - ], - indent=2, - ) - ) - print("save to", results_path) - del predictions - time.sleep(10) - if world_size > 1: - torch.distributed.barrier() - if rank == 0: - print(f"evaluate on rank {rank}. world size is {world_size}") - predictions = [] - for rank_i in range(world_size): - part_results_path = ( - f"flickrresults_{lang_encoder_name}_{rank_i}_{id}.json" - if is_flickr - else f"cocoresults_{lang_encoder_name}_{rank_i}_{id}.json" - ) - print("load", part_results_path) - predictions.extend(json.load(open(part_results_path))) - os.remove(part_results_path) - print("num:", len(predictions)) - results_path = ( - f"flickrresults_{lang_encoder_name}.json" - if is_flickr - else f"cocoresults_{lang_encoder_name}.json" - ) - json.dump(predictions, open(results_path, "w"), indent=2) - - metrics = compute_cider( - result_path=results_path, - annotations_path="/gpfs/u/home/LMCG/LMCGljnn/scratch/.cache/lavis/coco_gt/coco_karpathy_test_gt.json", - ) - metrics["CIDEr"] *= 100 - os.makedirs("eval_results", exist_ok=True) - acc = metrics["CIDEr"] - with open(os.path.join("eval_results", f"cococap_{model.expr_name}_{model.step_num}_{int(time.time())}_{acc}"), "w") as f: - f.write(json.dumps(predictions, indent=2)) - - # delete the temporary file - os.remove(results_path) - else: - metrics = {} - metrics["CIDEr"] = 0.0 - - return metrics["CIDEr"] diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/sentence_splitter.py b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/sentence_splitter.py deleted file mode 100644 index 54a07967efa31c31ee1219d1a25808df0108388a..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/sentence_splitter.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re - -from filelock import FileLock - - -try: - import nltk - - NLTK_AVAILABLE = True -except (ImportError, ModuleNotFoundError): - NLTK_AVAILABLE = False - -if NLTK_AVAILABLE: - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - - -def add_newline_to_end_of_each_sentence(x: str) -> str: - """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" - re.sub("", "", x) # remove pegasus newline char - assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" - return "\n".join(nltk.sent_tokenize(x)) diff --git a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/server.py b/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/server.py deleted file mode 100644 index 2ecd50307fdae5c5e26d8cc9453de296532b95ff..0000000000000000000000000000000000000000 --- a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/server.py +++ /dev/null @@ -1,170 +0,0 @@ -from flask import Flask, request, Response -from io import BytesIO -import torch -from av import open as avopen - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -from scipy.io import wavfile - -# Flask Init -app = Flask(__name__) -app.config["JSON_AS_ASCII"] = False - - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - del word2ph - assert bert.shape[-1] == len(phone), phone - - if language_str == "ZH": - bert = bert - ja_bert = torch.zeros(768, len(phone)) - elif language_str == "JA": - ja_bert = bert - bert = torch.zeros(1024, len(phone)) - else: - bert = torch.zeros(1024, len(phone)) - ja_bert = torch.zeros(768, len(phone)) - assert bert.shape[-1] == len( - phone - ), f"Bert seq len {bert.shape[-1]} != {len(phone)}" - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, ja_bert, phone, tone, language - - -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language): - bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps) - with torch.no_grad(): - x_tst = phones.to(dev).unsqueeze(0) - tones = tones.to(dev).unsqueeze(0) - lang_ids = lang_ids.to(dev).unsqueeze(0) - bert = bert.to(dev).unsqueeze(0) - ja_bert = ja_bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev) - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev) - audio = ( - net_g.infer( - x_tst, - x_tst_lengths, - speakers, - tones, - lang_ids, - bert, - ja_bert, - sdp_ratio=sdp_ratio, - noise_scale=noise_scale, - noise_scale_w=noise_scale_w, - length_scale=length_scale, - )[0][0, 0] - .data.cpu() - .float() - .numpy() - ) - return audio - - -def replace_punctuation(text, i=2): - punctuation = ",。?!" - for char in punctuation: - text = text.replace(char, char * i) - return text - - -def wav2(i, o, format): - inp = avopen(i, "rb") - out = avopen(o, "wb", format=format) - if format == "ogg": - format = "libvorbis" - - ostream = out.add_stream(format) - - for frame in inp.decode(audio=0): - for p in ostream.encode(frame): - out.mux(p) - - for p in ostream.encode(None): - out.mux(p) - - out.close() - inp.close() - - -# Load Generator -hps = utils.get_hparams_from_file("./configs/config.json") - -dev = "cuda" -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model, -).to(dev) -_ = net_g.eval() - -_ = utils.load_checkpoint("logs/G_649000.pth", net_g, None, skip_optimizer=True) - - -@app.route("/") -def main(): - try: - speaker = request.args.get("speaker") - text = request.args.get("text").replace("/n", "") - sdp_ratio = float(request.args.get("sdp_ratio", 0.2)) - noise = float(request.args.get("noise", 0.5)) - noisew = float(request.args.get("noisew", 0.6)) - length = float(request.args.get("length", 1.2)) - language = request.args.get("language") - if length >= 2: - return "Too big length" - if len(text) >= 250: - return "Too long text" - fmt = request.args.get("format", "wav") - if None in (speaker, text): - return "Missing Parameter" - if fmt not in ("mp3", "wav", "ogg"): - return "Invalid Format" - if language not in ("JA", "ZH"): - return "Invalid language" - except: - return "Invalid Parameter" - - with torch.no_grad(): - audio = infer( - text, - sdp_ratio=sdp_ratio, - noise_scale=noise, - noise_scale_w=noisew, - length_scale=length, - sid=speaker, - language=language, - ) - - with BytesIO() as wav: - wavfile.write(wav, hps.data.sampling_rate, audio) - torch.cuda.empty_cache() - if fmt == "wav": - return Response(wav.getvalue(), mimetype="audio/wav") - wav.seek(0, 0) - with BytesIO() as ofp: - wav2(wav, ofp, fmt) - return Response( - ofp.getvalue(), mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg" - ) diff --git a/spaces/chilge/Fushimi/attentions.py b/spaces/chilge/Fushimi/attentions.py deleted file mode 100644 index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/attentions.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/chilge/Fushimi/inference/infer_tool.py b/spaces/chilge/Fushimi/inference/infer_tool.py deleted file mode 100644 index 3491348b6f91d47133cc450a9df21e97f5f74c48..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/inference/infer_tool.py +++ /dev/null @@ -1,326 +0,0 @@ -import hashlib -import json -import logging -import os -import time -from pathlib import Path - -import librosa -import maad -import numpy as np -# import onnxruntime -import parselmouth -import soundfile -import torch -import torchaudio - -from hubert import hubert_model -import utils -from models import SynthesizerTrn - -logging.getLogger('matplotlib').setLevel(logging.WARNING) - - -def read_temp(file_name): - if not os.path.exists(file_name): - with open(file_name, "w") as f: - f.write(json.dumps({"info": "temp_dict"})) - return {} - else: - try: - with open(file_name, "r") as f: - data = f.read() - data_dict = json.loads(data) - if os.path.getsize(file_name) > 50 * 1024 * 1024: - f_name = file_name.split("/")[-1] - print(f"clean {f_name}") - for wav_hash in list(data_dict.keys()): - if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600: - del data_dict[wav_hash] - except Exception as e: - print(e) - print(f"{file_name} error,auto rebuild file") - data_dict = {"info": "temp_dict"} - return data_dict - - -def write_temp(file_name, data): - with open(file_name, "w") as f: - f.write(json.dumps(data)) - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -def format_wav(audio_path): - if Path(audio_path).suffix == '.wav': - return - raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None) - soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate) - - -def get_end_file(dir_path, end): - file_lists = [] - for root, dirs, files in os.walk(dir_path): - files = [f for f in files if f[0] != '.'] - dirs[:] = [d for d in dirs if d[0] != '.'] - for f_file in files: - if f_file.endswith(end): - file_lists.append(os.path.join(root, f_file).replace("\\", "/")) - return file_lists - - -def get_md5(content): - return hashlib.new("md5", content).hexdigest() - - -def resize2d_f0(x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), - source) - res = np.nan_to_num(target) - return res - -def get_f0(x, p_len,f0_up_key=0): - - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 16000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0 - -def clean_pitch(input_pitch): - num_nan = np.sum(input_pitch == 1) - if num_nan / len(input_pitch) > 0.9: - input_pitch[input_pitch != 1] = 1 - return input_pitch - - -def plt_pitch(input_pitch): - input_pitch = input_pitch.astype(float) - input_pitch[input_pitch == 1] = np.nan - return input_pitch - - -def f0_to_pitch(ff): - f0_pitch = 69 + 12 * np.log2(ff / 440) - return f0_pitch - - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class Svc(object): - def __init__(self, net_g_path, config_path, hubert_path="hubert/hubert-soft-0d54a1f4.pt", - onnx=False): - self.onnx = onnx - self.net_g_path = net_g_path - self.hubert_path = hubert_path - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.net_g_ms = None - self.hps_ms = utils.get_hparams_from_file(config_path) - self.target_sample = self.hps_ms.data.sampling_rate - self.hop_size = self.hps_ms.data.hop_length - self.speakers = {} - for spk, sid in self.hps_ms.spk.items(): - self.speakers[sid] = spk - self.spk2id = self.hps_ms.spk - # 加载hubert - self.hubert_soft = hubert_model.hubert_soft(hubert_path) - if torch.cuda.is_available(): - self.hubert_soft = self.hubert_soft.cuda() - self.load_model() - - def load_model(self): - # 获取模型配置 - if self.onnx: - raise NotImplementedError - # self.net_g_ms = SynthesizerTrnForONNX( - # 178, - # self.hps_ms.data.filter_length // 2 + 1, - # self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - # n_speakers=self.hps_ms.data.n_speakers, - # **self.hps_ms.model) - # _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - else: - self.net_g_ms = SynthesizerTrn( - self.hps_ms.data.filter_length // 2 + 1, - self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - **self.hps_ms.model) - _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - if "half" in self.net_g_path and torch.cuda.is_available(): - _ = self.net_g_ms.half().eval().to(self.dev) - else: - _ = self.net_g_ms.eval().to(self.dev) - - def get_units(self, source, sr): - - source = source.unsqueeze(0).to(self.dev) - with torch.inference_mode(): - start = time.time() - units = self.hubert_soft.units(source) - use_time = time.time() - start - print("hubert use time:{}".format(use_time)) - return units - - - def get_unit_pitch(self, in_path, tran): - source, sr = torchaudio.load(in_path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - soft = self.get_units(source, sr).squeeze(0).cpu().numpy() - f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran) - return soft, f0 - - def infer(self, speaker_id, tran, raw_path): - if type(speaker_id) == str: - speaker_id = self.spk2id[speaker_id] - sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0) - soft, pitch = self.get_unit_pitch(raw_path, tran) - f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.dev) - if "half" in self.net_g_path and torch.cuda.is_available(): - stn_tst = torch.HalfTensor(soft) - else: - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.dev) - start = time.time() - x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2) - audio = self.net_g_ms.infer(x_tst, f0=f0, g=sid)[0,0].data.float() - use_time = time.time() - start - print("vits use time:{}".format(use_time)) - return audio, audio.shape[-1] - - -# class SvcONNXInferModel(object): -# def __init__(self, hubert_onnx, vits_onnx, config_path): -# self.config_path = config_path -# self.vits_onnx = vits_onnx -# self.hubert_onnx = hubert_onnx -# self.hubert_onnx_session = onnxruntime.InferenceSession(hubert_onnx, providers=['CUDAExecutionProvider', ]) -# self.inspect_onnx(self.hubert_onnx_session) -# self.vits_onnx_session = onnxruntime.InferenceSession(vits_onnx, providers=['CUDAExecutionProvider', ]) -# self.inspect_onnx(self.vits_onnx_session) -# self.hps_ms = utils.get_hparams_from_file(self.config_path) -# self.target_sample = self.hps_ms.data.sampling_rate -# self.feature_input = FeatureInput(self.hps_ms.data.sampling_rate, self.hps_ms.data.hop_length) -# -# @staticmethod -# def inspect_onnx(session): -# for i in session.get_inputs(): -# print("name:{}\tshape:{}\tdtype:{}".format(i.name, i.shape, i.type)) -# for i in session.get_outputs(): -# print("name:{}\tshape:{}\tdtype:{}".format(i.name, i.shape, i.type)) -# -# def infer(self, speaker_id, tran, raw_path): -# sid = np.array([int(speaker_id)], dtype=np.int64) -# soft, pitch = self.get_unit_pitch(raw_path, tran) -# pitch = np.expand_dims(pitch, axis=0).astype(np.int64) -# stn_tst = soft -# x_tst = np.expand_dims(stn_tst, axis=0) -# x_tst_lengths = np.array([stn_tst.shape[0]], dtype=np.int64) -# # 使用ONNX Runtime进行推理 -# start = time.time() -# audio = self.vits_onnx_session.run(output_names=["audio"], -# input_feed={ -# "hidden_unit": x_tst, -# "lengths": x_tst_lengths, -# "pitch": pitch, -# "sid": sid, -# })[0][0, 0] -# use_time = time.time() - start -# print("vits_onnx_session.run time:{}".format(use_time)) -# audio = torch.from_numpy(audio) -# return audio, audio.shape[-1] -# -# def get_units(self, source, sr): -# source = torchaudio.functional.resample(source, sr, 16000) -# if len(source.shape) == 2 and source.shape[1] >= 2: -# source = torch.mean(source, dim=0).unsqueeze(0) -# source = source.unsqueeze(0) -# # 使用ONNX Runtime进行推理 -# start = time.time() -# units = self.hubert_onnx_session.run(output_names=["embed"], -# input_feed={"source": source.numpy()})[0] -# use_time = time.time() - start -# print("hubert_onnx_session.run time:{}".format(use_time)) -# return units -# -# def transcribe(self, source, sr, length, transform): -# feature_pit = self.feature_input.compute_f0(source, sr) -# feature_pit = feature_pit * 2 ** (transform / 12) -# feature_pit = resize2d_f0(feature_pit, length) -# coarse_pit = self.feature_input.coarse_f0(feature_pit) -# return coarse_pit -# -# def get_unit_pitch(self, in_path, tran): -# source, sr = torchaudio.load(in_path) -# soft = self.get_units(source, sr).squeeze(0) -# input_pitch = self.transcribe(source.numpy()[0], sr, soft.shape[0], tran) -# return soft, input_pitch - - -class RealTimeVC: - def __init__(self): - self.last_chunk = None - self.last_o = None - self.chunk_len = 16000 # 区块长度 - self.pre_len = 3840 # 交叉淡化长度,640的倍数 - - """输入输出都是1维numpy 音频波形数组""" - - def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path): - audio, sr = torchaudio.load(input_wav_path) - audio = audio.cpu().numpy()[0] - temp_wav = io.BytesIO() - if self.last_chunk is None: - input_wav_path.seek(0) - audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - audio = audio.cpu().numpy() - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return audio[-self.chunk_len:] - else: - audio = np.concatenate([self.last_chunk, audio]) - soundfile.write(temp_wav, audio, sr, format="wav") - temp_wav.seek(0) - audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav) - audio = audio.cpu().numpy() - ret = maad.util.crossfade(self.last_o, audio, self.pre_len) - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return ret[self.chunk_len:2 * self.chunk_len] diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/contrib/torch_utils.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/contrib/torch_utils.py deleted file mode 100644 index 790c295e488b5fe55e7aaca54a93963288f3bdef..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/contrib/torch_utils.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" - -This is a set of function wrappers that override the default numpy versions. - -Interoperability functions for pytorch and Faiss: Importing this will allow -pytorch Tensors (CPU or GPU) to be used as arguments to Faiss indexes and -other functions. Torch GPU tensors can only be used with Faiss GPU indexes. -If this is imported with a package that supports Faiss GPU, the necessary -stream synchronization with the current pytorch stream will be automatically -performed. - -Numpy ndarrays can continue to be used in the Faiss python interface after -importing this file. All arguments must be uniformly either numpy ndarrays -or Torch tensors; no mixing is allowed. - -""" - - -import faiss -import torch -import contextlib -import inspect -import sys -import numpy as np - -def swig_ptr_from_UInt8Tensor(x): - """ gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """ - assert x.is_contiguous() - assert x.dtype == torch.uint8 - return faiss.cast_integer_to_uint8_ptr( - x.storage().data_ptr() + x.storage_offset()) - -def swig_ptr_from_HalfTensor(x): - """ gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """ - assert x.is_contiguous() - assert x.dtype == torch.float16 - # no canonical half type in C/C++ - return faiss.cast_integer_to_void_ptr( - x.storage().data_ptr() + x.storage_offset() * 2) - -def swig_ptr_from_FloatTensor(x): - """ gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """ - assert x.is_contiguous() - assert x.dtype == torch.float32 - return faiss.cast_integer_to_float_ptr( - x.storage().data_ptr() + x.storage_offset() * 4) - -def swig_ptr_from_IntTensor(x): - """ gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """ - assert x.is_contiguous() - assert x.dtype == torch.int32, 'dtype=%s' % x.dtype - return faiss.cast_integer_to_int_ptr( - x.storage().data_ptr() + x.storage_offset() * 4) - -def swig_ptr_from_IndicesTensor(x): - """ gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """ - assert x.is_contiguous() - assert x.dtype == torch.int64, 'dtype=%s' % x.dtype - return faiss.cast_integer_to_idx_t_ptr( - x.storage().data_ptr() + x.storage_offset() * 8) - -@contextlib.contextmanager -def using_stream(res, pytorch_stream=None): - """ Creates a scoping object to make Faiss GPU use the same stream - as pytorch, based on torch.cuda.current_stream(). - Or, a specific pytorch stream can be passed in as a second - argument, in which case we will use that stream. - """ - - if pytorch_stream is None: - pytorch_stream = torch.cuda.current_stream() - - # This is the cudaStream_t that we wish to use - cuda_stream_s = faiss.cast_integer_to_cudastream_t(pytorch_stream.cuda_stream) - - # So we can revert GpuResources stream state upon exit - prior_dev = torch.cuda.current_device() - prior_stream = res.getDefaultStream(torch.cuda.current_device()) - - res.setDefaultStream(torch.cuda.current_device(), cuda_stream_s) - - # Do the user work - try: - yield - finally: - res.setDefaultStream(prior_dev, prior_stream) - -def torch_replace_method(the_class, name, replacement, - ignore_missing=False, ignore_no_base=False): - try: - orig_method = getattr(the_class, name) - except AttributeError: - if ignore_missing: - return - raise - if orig_method.__name__ == 'torch_replacement_' + name: - # replacement was done in parent class - return - - # We should already have the numpy replacement methods patched - assert ignore_no_base or (orig_method.__name__ == 'replacement_' + name) - setattr(the_class, name + '_numpy', orig_method) - setattr(the_class, name, replacement) - -def handle_torch_Index(the_class): - def torch_replacement_add(self, x): - if type(x) is np.ndarray: - # forward to faiss __init__.py base method - return self.add_numpy(x) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.add_c(n, x_ptr) - else: - # CPU torch - self.add_c(n, x_ptr) - - def torch_replacement_add_with_ids(self, x, ids): - if type(x) is np.ndarray: - # forward to faiss __init__.py base method - return self.add_with_ids_numpy(x, ids) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - assert type(ids) is torch.Tensor - assert ids.shape == (n, ), 'not same number of vectors as ids' - ids_ptr = swig_ptr_from_IndicesTensor(ids) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.add_with_ids_c(n, x_ptr, ids_ptr) - else: - # CPU torch - self.add_with_ids_c(n, x_ptr, ids_ptr) - - def torch_replacement_assign(self, x, k, labels=None): - if type(x) is np.ndarray: - # forward to faiss __init__.py base method - return self.assign_numpy(x, k, labels) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - if labels is None: - labels = torch.empty(n, k, device=x.device, dtype=torch.int64) - else: - assert type(labels) is torch.Tensor - assert labels.shape == (n, k) - L_ptr = swig_ptr_from_IndicesTensor(labels) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.assign_c(n, x_ptr, L_ptr, k) - else: - # CPU torch - self.assign_c(n, x_ptr, L_ptr, k) - - return labels - - def torch_replacement_train(self, x): - if type(x) is np.ndarray: - # forward to faiss __init__.py base method - return self.train_numpy(x) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.train_c(n, x_ptr) - else: - # CPU torch - self.train_c(n, x_ptr) - - def torch_replacement_search(self, x, k, D=None, I=None): - if type(x) is np.ndarray: - # forward to faiss __init__.py base method - return self.search_numpy(x, k, D=D, I=I) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - if D is None: - D = torch.empty(n, k, device=x.device, dtype=torch.float32) - else: - assert type(D) is torch.Tensor - assert D.shape == (n, k) - D_ptr = swig_ptr_from_FloatTensor(D) - - if I is None: - I = torch.empty(n, k, device=x.device, dtype=torch.int64) - else: - assert type(I) is torch.Tensor - assert I.shape == (n, k) - I_ptr = swig_ptr_from_IndicesTensor(I) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.search_c(n, x_ptr, k, D_ptr, I_ptr) - else: - # CPU torch - self.search_c(n, x_ptr, k, D_ptr, I_ptr) - - return D, I - - def torch_replacement_search_and_reconstruct(self, x, k, D=None, I=None, R=None): - if type(x) is np.ndarray: - # Forward to faiss __init__.py base method - return self.search_and_reconstruct_numpy(x, k, D=D, I=I, R=R) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - if D is None: - D = torch.empty(n, k, device=x.device, dtype=torch.float32) - else: - assert type(D) is torch.Tensor - assert D.shape == (n, k) - D_ptr = swig_ptr_from_FloatTensor(D) - - if I is None: - I = torch.empty(n, k, device=x.device, dtype=torch.int64) - else: - assert type(I) is torch.Tensor - assert I.shape == (n, k) - I_ptr = swig_ptr_from_IndicesTensor(I) - - if R is None: - R = torch.empty(n, k, d, device=x.device, dtype=torch.float32) - else: - assert type(R) is torch.Tensor - assert R.shape == (n, k, d) - R_ptr = swig_ptr_from_FloatTensor(R) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.search_and_reconstruct_c(n, x_ptr, k, D_ptr, I_ptr, R_ptr) - else: - # CPU torch - self.search_and_reconstruct_c(n, x_ptr, k, D_ptr, I_ptr, R_ptr) - - return D, I, R - - def torch_replacement_remove_ids(self, x): - # Not yet implemented - assert type(x) is not torch.Tensor, 'remove_ids not yet implemented for torch' - return self.remove_ids_numpy(x) - - def torch_replacement_reconstruct(self, key, x=None): - # No tensor inputs are required, but with importing this module, we - # assume that the default should be torch tensors. If we are passed a - # numpy array, however, assume that the user is overriding this default - if (x is not None) and (type(x) is np.ndarray): - # Forward to faiss __init__.py base method - return self.reconstruct_numpy(key, x) - - # If the index is a CPU index, the default device is CPU, otherwise we - # produce a GPU tensor - device = torch.device('cpu') - if hasattr(self, 'getDevice'): - # same device as the index - device = torch.device('cuda', self.getDevice()) - - if x is None: - x = torch.empty(self.d, device=device, dtype=torch.float32) - else: - assert type(x) is torch.Tensor - assert x.shape == (self.d, ) - x_ptr = swig_ptr_from_FloatTensor(x) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.reconstruct_c(key, x_ptr) - else: - # CPU torch - self.reconstruct_c(key, x_ptr) - - return x - - def torch_replacement_reconstruct_n(self, n0=0, ni=-1, x=None): - if ni == -1: - ni = self.ntotal - - # No tensor inputs are required, but with importing this module, we - # assume that the default should be torch tensors. If we are passed a - # numpy array, however, assume that the user is overriding this default - if (x is not None) and (type(x) is np.ndarray): - # Forward to faiss __init__.py base method - return self.reconstruct_n_numpy(n0, ni, x) - - # If the index is a CPU index, the default device is CPU, otherwise we - # produce a GPU tensor - device = torch.device('cpu') - if hasattr(self, 'getDevice'): - # same device as the index - device = torch.device('cuda', self.getDevice()) - - if x is None: - x = torch.empty(ni, self.d, device=device, dtype=torch.float32) - else: - assert type(x) is torch.Tensor - assert x.shape == (ni, self.d) - x_ptr = swig_ptr_from_FloatTensor(x) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.reconstruct_n_c(n0, ni, x_ptr) - else: - # CPU torch - self.reconstruct_n_c(n0, ni, x_ptr) - - return x - - def torch_replacement_update_vectors(self, keys, x): - if type(keys) is np.ndarray: - # Forward to faiss __init__.py base method - return self.update_vectors_numpy(keys, x) - - assert type(keys) is torch.Tensor - (n, ) = keys.shape - keys_ptr = swig_ptr_from_IndicesTensor(keys) - - assert type(x) is torch.Tensor - assert x.shape == (n, self.d) - x_ptr = swig_ptr_from_FloatTensor(x) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.update_vectors_c(n, keys_ptr, x_ptr) - else: - # CPU torch - self.update_vectors_c(n, keys_ptr, x_ptr) - - # Until the GPU version is implemented, we do not support pre-allocated - # output buffers - def torch_replacement_range_search(self, x, thresh): - if type(x) is np.ndarray: - # Forward to faiss __init__.py base method - return self.range_search_numpy(x, thresh) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - assert not x.is_cuda, 'Range search using GPU tensor not yet implemented' - assert not hasattr(self, 'getDevice'), 'Range search on GPU index not yet implemented' - - res = faiss.RangeSearchResult(n) - self.range_search_c(n, x_ptr, thresh, res) - - # get pointers and copy them - # FIXME: no rev_swig_ptr equivalent for torch.Tensor, just convert - # np to torch - # NOTE: torch does not support np.uint64, just np.int64 - lims = torch.from_numpy(faiss.rev_swig_ptr(res.lims, n + 1).copy().astype('int64')) - nd = int(lims[-1]) - D = torch.from_numpy(faiss.rev_swig_ptr(res.distances, nd).copy()) - I = torch.from_numpy(faiss.rev_swig_ptr(res.labels, nd).copy()) - - return lims, D, I - - def torch_replacement_sa_encode(self, x, codes=None): - if type(x) is np.ndarray: - # Forward to faiss __init__.py base method - return self.sa_encode_numpy(x, codes) - - assert type(x) is torch.Tensor - n, d = x.shape - assert d == self.d - x_ptr = swig_ptr_from_FloatTensor(x) - - if codes is None: - codes = torch.empty(n, self.sa_code_size(), dtype=torch.uint8) - else: - assert codes.shape == (n, self.sa_code_size()) - codes_ptr = swig_ptr_from_UInt8Tensor(codes) - - if x.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.sa_encode_c(n, x_ptr, codes_ptr) - else: - # CPU torch - self.sa_encode_c(n, x_ptr, codes_ptr) - - return codes - - def torch_replacement_sa_decode(self, codes, x=None): - if type(codes) is np.ndarray: - # Forward to faiss __init__.py base method - return self.sa_decode_numpy(codes, x) - - assert type(codes) is torch.Tensor - n, cs = codes.shape - assert cs == self.sa_code_size() - codes_ptr = swig_ptr_from_UInt8Tensor(codes) - - if x is None: - x = torch.empty(n, self.d, dtype=torch.float32) - else: - assert type(x) is torch.Tensor - assert x.shape == (n, self.d) - x_ptr = swig_ptr_from_FloatTensor(x) - - if codes.is_cuda: - assert hasattr(self, 'getDevice'), 'GPU tensor on CPU index not allowed' - - # On the GPU, use proper stream ordering - with using_stream(self.getResources()): - self.sa_decode_c(n, codes_ptr, x_ptr) - else: - # CPU torch - self.sa_decode_c(n, codes_ptr, x_ptr) - - return x - - - torch_replace_method(the_class, 'add', torch_replacement_add) - torch_replace_method(the_class, 'add_with_ids', torch_replacement_add_with_ids) - torch_replace_method(the_class, 'assign', torch_replacement_assign) - torch_replace_method(the_class, 'train', torch_replacement_train) - torch_replace_method(the_class, 'search', torch_replacement_search) - torch_replace_method(the_class, 'remove_ids', torch_replacement_remove_ids) - torch_replace_method(the_class, 'reconstruct', torch_replacement_reconstruct) - torch_replace_method(the_class, 'reconstruct_n', torch_replacement_reconstruct_n) - torch_replace_method(the_class, 'range_search', torch_replacement_range_search) - torch_replace_method(the_class, 'update_vectors', torch_replacement_update_vectors, - ignore_missing=True) - torch_replace_method(the_class, 'search_and_reconstruct', - torch_replacement_search_and_reconstruct, ignore_missing=True) - torch_replace_method(the_class, 'sa_encode', torch_replacement_sa_encode) - torch_replace_method(the_class, 'sa_decode', torch_replacement_sa_decode) - -faiss_module = sys.modules['faiss'] - -# Re-patch anything that inherits from faiss.Index to add the torch bindings -for symbol in dir(faiss_module): - obj = getattr(faiss_module, symbol) - if inspect.isclass(obj): - the_class = obj - if issubclass(the_class, faiss.Index): - handle_torch_Index(the_class) - -# allows torch tensor usage with bfKnn -def torch_replacement_knn_gpu(res, xq, xb, k, D=None, I=None, metric=faiss.METRIC_L2, device=-1): - if type(xb) is np.ndarray: - # Forward to faiss __init__.py base method - return faiss.knn_gpu_numpy(res, xq, xb, k, D, I, metric, device) - - nb, d = xb.size() - if xb.is_contiguous(): - xb_row_major = True - elif xb.t().is_contiguous(): - xb = xb.t() - xb_row_major = False - else: - raise TypeError('matrix should be row or column-major') - - if xb.dtype == torch.float32: - xb_type = faiss.DistanceDataType_F32 - xb_ptr = swig_ptr_from_FloatTensor(xb) - elif xb.dtype == torch.float16: - xb_type = faiss.DistanceDataType_F16 - xb_ptr = swig_ptr_from_HalfTensor(xb) - else: - raise TypeError('xb must be f32 or f16') - - nq, d2 = xq.size() - assert d2 == d - if xq.is_contiguous(): - xq_row_major = True - elif xq.t().is_contiguous(): - xq = xq.t() - xq_row_major = False - else: - raise TypeError('matrix should be row or column-major') - - if xq.dtype == torch.float32: - xq_type = faiss.DistanceDataType_F32 - xq_ptr = swig_ptr_from_FloatTensor(xq) - elif xq.dtype == torch.float16: - xq_type = faiss.DistanceDataType_F16 - xq_ptr = swig_ptr_from_HalfTensor(xq) - else: - raise TypeError('xq must be f32 or f16') - - if D is None: - D = torch.empty(nq, k, device=xb.device, dtype=torch.float32) - else: - assert D.shape == (nq, k) - # interface takes void*, we need to check this - assert (D.dtype == torch.float32) - - if I is None: - I = torch.empty(nq, k, device=xb.device, dtype=torch.int64) - else: - assert I.shape == (nq, k) - - if I.dtype == torch.int64: - I_type = faiss.IndicesDataType_I64 - I_ptr = swig_ptr_from_IndicesTensor(I) - elif I.dtype == I.dtype == torch.int32: - I_type = faiss.IndicesDataType_I32 - I_ptr = swig_ptr_from_IntTensor(I) - else: - raise TypeError('I must be i64 or i32') - - D_ptr = swig_ptr_from_FloatTensor(D) - - args = faiss.GpuDistanceParams() - args.metric = metric - args.k = k - args.dims = d - args.vectors = xb_ptr - args.vectorsRowMajor = xb_row_major - args.vectorType = xb_type - args.numVectors = nb - args.queries = xq_ptr - args.queriesRowMajor = xq_row_major - args.queryType = xq_type - args.numQueries = nq - args.outDistances = D_ptr - args.outIndices = I_ptr - args.outIndicesType = I_type - args.device = device - - with using_stream(res): - faiss.bfKnn(res, args) - - return D, I - -torch_replace_method(faiss_module, 'knn_gpu', torch_replacement_knn_gpu, True, True) - -# allows torch tensor usage with bfKnn for all pairwise distances -def torch_replacement_pairwise_distance_gpu(res, xq, xb, D=None, metric=faiss.METRIC_L2, device=-1): - if type(xb) is np.ndarray: - # Forward to faiss __init__.py base method - return faiss.pairwise_distance_gpu_numpy(res, xq, xb, D, metric) - - nb, d = xb.size() - if xb.is_contiguous(): - xb_row_major = True - elif xb.t().is_contiguous(): - xb = xb.t() - xb_row_major = False - else: - raise TypeError('xb matrix should be row or column-major') - - if xb.dtype == torch.float32: - xb_type = faiss.DistanceDataType_F32 - xb_ptr = swig_ptr_from_FloatTensor(xb) - elif xb.dtype == torch.float16: - xb_type = faiss.DistanceDataType_F16 - xb_ptr = swig_ptr_from_HalfTensor(xb) - else: - raise TypeError('xb must be float32 or float16') - - nq, d2 = xq.size() - assert d2 == d - if xq.is_contiguous(): - xq_row_major = True - elif xq.t().is_contiguous(): - xq = xq.t() - xq_row_major = False - else: - raise TypeError('xq matrix should be row or column-major') - - if xq.dtype == torch.float32: - xq_type = faiss.DistanceDataType_F32 - xq_ptr = swig_ptr_from_FloatTensor(xq) - elif xq.dtype == torch.float16: - xq_type = faiss.DistanceDataType_F16 - xq_ptr = swig_ptr_from_HalfTensor(xq) - else: - raise TypeError('xq must be float32 or float16') - - if D is None: - D = torch.empty(nq, nb, device=xb.device, dtype=torch.float32) - else: - assert D.shape == (nq, nb) - # interface takes void*, we need to check this - assert (D.dtype == torch.float32) - - D_ptr = swig_ptr_from_FloatTensor(D) - - args = faiss.GpuDistanceParams() - args.metric = metric - args.k = -1 # selects all pairwise distance - args.dims = d - args.vectors = xb_ptr - args.vectorsRowMajor = xb_row_major - args.vectorType = xb_type - args.numVectors = nb - args.queries = xq_ptr - args.queriesRowMajor = xq_row_major - args.queryType = xq_type - args.numQueries = nq - args.outDistances = D_ptr - args.device = device - - with using_stream(res): - faiss.bfKnn(res, args) - - return D - -torch_replace_method(faiss_module, 'pairwise_distance_gpu', torch_replacement_pairwise_distance_gpu, True, True) diff --git a/spaces/cifkao/context-probing/highlighted_text/src/HighlightedText.tsx b/spaces/cifkao/context-probing/highlighted_text/src/HighlightedText.tsx deleted file mode 100644 index fc37e86f852caa8e6361b9e9f5d87a2d66ba99da..0000000000000000000000000000000000000000 --- a/spaces/cifkao/context-probing/highlighted_text/src/HighlightedText.tsx +++ /dev/null @@ -1,97 +0,0 @@ -import { - StreamlitComponentBase, - withStreamlitConnection, -} from "streamlit-component-lib"; - -type HighlightedTextState = { - activeIndex: number | null, - hoverIndex: number | null, - isFrozen: boolean -}; - -/** - * This is a React-based component template. The `render()` function is called - * automatically when your component should be re-rendered. - */ -class HighlightedText extends StreamlitComponentBase { - public state = {activeIndex: null, hoverIndex: null, isFrozen: false}; - - render() { - const tokens: string[] = this.props.args["tokens"]; - const scores: number[] = this.getScores(); - const prefixLength: number = this.props.args["prefix_len"]; - - let className = "highlighted-text"; - if (this.state.isFrozen) { - className += " frozen"; - } - - const onClick = () => { - this.setState({ isFrozen: false }); - }; - - return
    -
    - - { - this.state.activeIndex != null ? - <> - index: {this.state.activeIndex} - - : <> - } -
    -
    - { - tokens.map((t: string, i: number) => { - let className = "token"; - if (this.state) { - if (this.state.activeIndex == i) { - className += " active"; - } - } - if (i < prefixLength) { - className += " prefix"; - } - const style = { - backgroundColor: - scores[i] > 0 - ? `rgba(32, 255, 32, ${scores[i]})` - : `rgba(255, 32, 32, ${-scores[i]})` - }; - - const onMouseOver = () => { - if (!this.state.isFrozen) { - this.setState({ activeIndex: i }); - } - this.setState({ hoverIndex: i }); - }; - return {t}; - }) - } -
    -
    ; - } - - private getScores() { - const tokens = this.props.args["tokens"]; - if (!this.state || this.state.activeIndex == null || this.state.activeIndex < 1) { - return tokens.map(() => 0); - } - const allScores: number[][] = this.props.args["scores"]; - - const i = this.state.activeIndex - 1; - const hi = Math.min(Math.max(0, i + 1), allScores[i].length); - const row = allScores[i].slice(0, hi); - row.reverse(); - let result = [ - ...Array(Math.max(0, i + 1 - row.length)).fill(0), - ...row.map((x) => x == undefined || isNaN(x) ? 0 : x) - ]; - result = [...result, ...Array(tokens.length - result.length).fill(0)]; - return result; - } -} - -export default withStreamlitConnection(HighlightedText); diff --git a/spaces/cihyFjudo/fairness-paper-search/GTA 3 FULL WORKING! (PC CD-ROM) Serial Key Everything You Need to Know About the Game.md b/spaces/cihyFjudo/fairness-paper-search/GTA 3 FULL WORKING! (PC CD-ROM) Serial Key Everything You Need to Know About the Game.md deleted file mode 100644 index dd54d274e3764f920586a15e55337fd0202944df..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/GTA 3 FULL WORKING! (PC CD-ROM) Serial Key Everything You Need to Know About the Game.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    If the Toshiba hard drive is still not working, try to update the device driver. An outdated or corrupted hard drive driver will cause some problems while using the device. Since your hard drive is not detected by the PC, you cannot update the driver via Device Manager. Go to the official site of Toshiba, then download and install the latest driver by entering your model or the serial number of your disk.

    -

    GTA 3 FULL WORKING! (PC CD-ROM) Serial Key


    DOWNLOAD ❤❤❤ https://tinurli.com/2uwjhL



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Nvidia GeForce Now Not Using RTX 2080 Ti SUPER GPUs Will It Affect Ray Tracing Support?.md b/spaces/cihyFjudo/fairness-paper-search/Nvidia GeForce Now Not Using RTX 2080 Ti SUPER GPUs Will It Affect Ray Tracing Support?.md deleted file mode 100644 index 84dcbfe657fa0795a2bd9a31b594f4b37e97e551..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Nvidia GeForce Now Not Using RTX 2080 Ti SUPER GPUs Will It Affect Ray Tracing Support?.md +++ /dev/null @@ -1,21 +0,0 @@ -
    -

    Where to Buy
    The GeForce RTX 2060 SUPER, RTX 2070 SUPER and RTX 2080 SUPER GPUs will be available as custom boards, including stock-clocked and factory-overclocked models, from top add-in card providers such as ASUS, Colorful, EVGA, Gainward, Galaxy, Gigabyte, Innovision 3D, MSI, Palit, PNY and Zotac and as Founders Editions from www.nvidia.com. Look for the GeForce RTX SUPER GPUs in gaming systems built by Acer, Alienware, Dell, HP and Lenovo, as well as by leading system builders worldwide.

    -

    Certain statements in this press release including, but not limited to, statements as to: NVIDIA supercharging its gaming lineup; gamers riding the growing wave of ray traced titles with NVIDIA GPUs; the performance, benefits, features and abilities of NVIDIA GPUs, including its GeForce RTX SUPER line; the ecosystem driving real-time ray tracing being immense, including tens of millions of GPUs, industry standard APIs, leading game engines and an all-star roster of game franchises; the lineup of SUPER GPUs delivering more performance and ensuring that gamers are prepared for the coming wave of real-time ray tracing blockbusters; GeForce RTX GPUs including specialized cores that enable them to accelerate their capability and delivering ray tracing in real time; the support for ray tracing in industry standard APIs and game engines; the games that announced they will be using ray tracing; and the price and availability of GeForce RTX SUPER GPUs, including with a game bundle are forward-looking statements that are subject to risks and uncertainties that could cause results to be materially different than expectations. Important factors that could cause actual results to differ materially include: global economic conditions; our reliance on third parties to manufacture, assemble, package and test our products; the impact of technological development and competition; development of new products and technologies or enhancements to our existing product and technologies; market acceptance of our products or our partners' products; design, manufacturing or software defects; changes in consumer preferences or demands; changes in industry standards and interfaces; unexpected loss of performance of our products or technologies when integrated into systems; as well as other factors detailed from time to time in the most recent reports NVIDIA files with the Securities and Exchange Commission, or SEC, including, but not limited to, its annual report on Form 10-K and quarterly reports on Form 10-Q. Copies of reports filed with the SEC are posted on the company's website and are available from NVIDIA without charge. These forward-looking statements are not guarantees of future performance and speak only as of the date hereof, and, except as required by law, NVIDIA disclaims any obligation to update these forward-looking statements to reflect future events or circumstances.

    -

    Nvidia GeForce Now Not Using RTX 2080 Ti SUPER GPUs


    Download Zip === https://tinurli.com/2uwiax



    -

    Overall, while not really necessary for After Effects alone, if you already need a new high-end GPU for other applications (Premiere Pro, GPU-based rendering, etc.) we would recommend using one of these RTX cards if possible. The RTX 2080 does have less VRAM than the comparably priced GTX 1080 Ti (8GB vs 11GB), but the potential these cards offer for the future is likely worth investing in.

    -

    In particular, the supposed leader in that category is the 3080. The problem with that status is that it appears to depend largely on the MSRP of $800. But I have signed myself up for several services that alert me for the availability of this card at this price, and for months I have not been able to get it. The market price of this card is more like $1400. The MSRP is essentially meaningless. When compared to the 2080Ti, which is available for around $1000, and using your own performance comparisons, the 2080Ti beats to 3080 on performance per dollar.

    -

    Hi Pablo, I never had a personal gaming GPU fail. From the ~30 GPUs that I used at universities, I had one fail. From a small GPU cluster I was using, I also saw one GPU fail (1 out of 48 GPUs). Some GPUs are known to have much higher failure rates than others (RTX 2080 Ti and RTX 2080 Founders Edition in particular).

    -

    Thanks for all your help via your blogs over the years.
    I am now in situation where I have 2 X99 workstations, one with 2xRTX2080ti and one with 3xRTX2080ti (couldnt put 4 in this due to buying cheap used 2.5 slot wide gpus, and one is already on a pcie riser).
    I want to connect the 2 machines using high speed network cards and fiber.
    Is having 100mbit/s network speed an absolute must or could I get away with 40/50mbit/s?
    I havent found any 100 Mbit/s mellanox inifiniband cards for less than ~$400 usd each which is too pricey for me. Once network is setup is SLURM the best way to distrubute load?

    -

    -

    K80 and M6000 will be quite slow. I would recommend getting a Titan RTX with 24 GB of memory. If that is too expensive I would definitely go for the M6000. You can also think about multiple RTX 2080 Ti cards and using parallel training. That will reduce the memory footprint slightly especially if you use FP16 training. If you use mixed FP16 training it reduces memory footprint by 25%, if you pure FP16 training via Apex it reduces footprint by 50%. Using 2 GPUs should decrease the footprint by about 20-30%. So 2x RTX 2080 Ti with pure FP16 training is roughly equivalent to 11/0.75/0.5 = 29 GB used by the K80 or M6000 but you train much much faster.

    -

    I got this advice from a vendor of GPU systems. I was arguing that for text models FP16 and RTX 2080Ti is good enough and comparable to v100. Also in their benchmarking they did not test RTX with NvLink but v100 was tested for FP16. I got this response. Just wanted to check if NvLink is of no use when using RTX 2080Ti. Please suggest. Your inputs are much appreciated here as I would use it for my next purchase.

    -

    I mean I have a Blender Project using 25 GB. I have 32 GB memory in my Computer and 8 GB on my Graphic card. And the GPU is usable now. Can I use for example a Geforce RTX 2080 Ti with 11 GB for my project which uses 25 GB RAM and take advantage of the RTX support? (Assuming that I still have 32 GB main memory)

    -

    Hi
    Are you going to be able to use Optix and Cuda together? Like using rtx cards and gtx 10xx cards to render an image at the same time.
    Also, is there any plan on bringing nvlink memory pooling on RTX 2070super+ cards to cycles? Puget system showed that the nvlink on RTX cards are able to handle memory pooling, but need software support.
    These two can be the game changer for future GPU rendering

    -

    On July 2, 2019, the GeForce RTX Super line of cards was announced, which comprises higher-spec versions of the 2060, 2070 and 2080. Each of the Super models were offered for a similar price as older models but with improved specs.[11] In July 2019, NVidia stated the "SUPER" graphics cards in the GeForce RTX 20 series, to be introduced, had a 15% performance advantage over the GeForce RTX 2060.[33] PC World called the super editions a "modest" upgrade for the price, and the 2080 Super chip the "second most-powerful GPU ever released" in terms of speed.[34] In November 2019, PC Gamer wrote "even without an overclock, the 2080 Ti is the best graphics card for gaming."[35] In June 2020, PC Mag listed the Nvidia GeForce RTX 2070 Super as one of the "best [8] graphics cards for 4k gaming in 2020." The GeForce RTX 2080 Founders Edition, Super, and Ti were also listed.[36] In June 2020, graphic cards including the RTX 2060, RTX 2060 Super, RTX 2070 and the RTX 2080 Super were announced as discounted by retailers in expectation of the GeForce RTX 3080 launch.[37] In April 2020, Nvidia announced 100 new laptops licensed to include either GeForce GTX and RTX models.[38]

    -

    The second generation Tensor Cores (succeeding Volta's) work in cooperation with the RT cores, and their AI features are used mainly to two ends: firstly, de-noising a partially ray traced image by filling in the blanks between rays cast; also another application of the Tensor cores is DLSS (deep learning super-sampling), a new method to replace anti-aliasing, by artificially generating detail to upscale the rendered image into a higher resolution.[51] The Tensor cores apply deep learning models (for example, an image resolution enhancement model) which are constructed using supercomputers. The problem to be solved is analyzed on the supercomputer, which is taught by example what results are desired. The supercomputer then outputs a model which is then executed on the consumer's Tensor cores. These methods are delivered to consumers as part of the cards' drivers.[citation needed]

    -

    I've had RealHack working since forever using an RTX 2080 and the NV40 folder with Hex 30008. I recently updated graphics driver to 471.68 and now RealView is no longer enabled. Last version that worked was 466.77. If I roll back drivers, it works again, but I'd rather not do that. Any ideas how to get it back or even why this would happen?

    -

    You can go as high as 6x6 or 8x8 on RTX 2080 super and TI cards but it is over scanning the resolution and not needed- i can also cause artifacts and blurring in distance). Keep in midn you want to room in the GPU performance.

    -

    1. NVIDIA GeForce RTX 1080 Ti - Still a decent graphics card, the 1080 Ti can currently generate up to $33.94 in monthly mining income.
    2. NVIDIA GeForce RTX 2070 Super - This supercharged version of the RTX 270 model performs considerably better than the regular version. It can currently generate up to $35.40 in monthly crypto mining income.
    3. NVIDIA GeForce RTX 2080 Ti - This graphics card was considered the best NVIDIA graphics card for mining performance before the 30 series arrived. It can currently generate up to $48.81 in monthly mining income.

    -

    1. NVIDIA GeForce RTX 1080 Ti - Still a decent graphics card, the 1080 Ti can currently generate up to $33.94 in monthly mining income.\r\n2. NVIDIA GeForce RTX 2070 Super - This supercharged version of the RTX 270 model performs considerably better than the regular version. It can currently generate up to $35.40 in monthly crypto mining income. \r\n3. NVIDIA GeForce RTX 2080 Ti - This graphics card was considered the best NVIDIA graphics card for mining performance before the 30 series arrived. It can currently generate up to $48.81 in monthly mining income.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Solibri Model Checker V7 Crack 67 How to Check and Validate Your BIM Models for Free.md b/spaces/cihyFjudo/fairness-paper-search/Solibri Model Checker V7 Crack 67 How to Check and Validate Your BIM Models for Free.md deleted file mode 100644 index 07ca16b010d2b631b4f8b444a6ea6744e4ae8d28..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Solibri Model Checker V7 Crack 67 How to Check and Validate Your BIM Models for Free.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Express Scribe Registration Codel.. Harmoncrixportwapittledep · imperial rome warband mod Rome, Photo And Video, Fashion, Moda, Fashion Styles. solibri model checker v7 crack 67

    express scribe registration code free


    Rome.

    -

    Solibri Model Checker V7 Crack 67


    Download File 🔗 https://tinurli.com/2uwiwx



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/base.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/base.py deleted file mode 100644 index 07f2c2e504bb4fd96e1e3ee18caaac94d3b1865a..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/base.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Various base classes.""" -from types import coroutine -from collections.abc import Coroutine -from asyncio import get_running_loop - - -class AsyncBase: - def __init__(self, file, loop, executor): - self._file = file - self._executor = executor - self._ref_loop = loop - - @property - def _loop(self): - return self._ref_loop or get_running_loop() - - def __aiter__(self): - """We are our own iterator.""" - return self - - def __repr__(self): - return super().__repr__() + " wrapping " + repr(self._file) - - async def __anext__(self): - """Simulate normal file iteration.""" - line = await self.readline() - if line: - return line - else: - raise StopAsyncIteration - - -class AsyncIndirectBase(AsyncBase): - def __init__(self, name, loop, executor, indirect): - self._indirect = indirect - self._name = name - super().__init__(None, loop, executor) - - @property - def _file(self): - return self._indirect() - - @_file.setter - def _file(self, v): - pass # discard writes - - -class _ContextManager(Coroutine): - __slots__ = ("_coro", "_obj") - - def __init__(self, coro): - self._coro = coro - self._obj = None - - def send(self, value): - return self._coro.send(value) - - def throw(self, typ, val=None, tb=None): - if val is None: - return self._coro.throw(typ) - elif tb is None: - return self._coro.throw(typ, val) - else: - return self._coro.throw(typ, val, tb) - - def close(self): - return self._coro.close() - - @property - def gi_frame(self): - return self._coro.gi_frame - - @property - def gi_running(self): - return self._coro.gi_running - - @property - def gi_code(self): - return self._coro.gi_code - - def __next__(self): - return self.send(None) - - @coroutine - def __iter__(self): - resp = yield from self._coro - return resp - - def __await__(self): - resp = yield from self._coro - return resp - - async def __anext__(self): - resp = await self._coro - return resp - - async def __aenter__(self): - self._obj = await self._coro - return self._obj - - async def __aexit__(self, exc_type, exc, tb): - self._obj.close() - self._obj = None - - -class AiofilesContextManager(_ContextManager): - """An adjusted async context manager for aiofiles.""" - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await get_running_loop().run_in_executor( - None, self._obj._file.__exit__, exc_type, exc_val, exc_tb - ) - self._obj = None diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attrs/__init__.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attrs/__init__.py deleted file mode 100644 index 0c2481561a93a912503754396782e987fcdd9629..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attrs/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -# SPDX-License-Identifier: MIT - -from attr import ( - NOTHING, - Attribute, - AttrsInstance, - Factory, - _make_getattr, - assoc, - cmp_using, - define, - evolve, - field, - fields, - fields_dict, - frozen, - has, - make_class, - mutable, - resolve_types, - validate, -) -from attr._next_gen import asdict, astuple - -from . import converters, exceptions, filters, setters, validators - - -__all__ = [ - "__author__", - "__copyright__", - "__description__", - "__doc__", - "__email__", - "__license__", - "__title__", - "__url__", - "__version__", - "__version_info__", - "asdict", - "assoc", - "astuple", - "Attribute", - "AttrsInstance", - "cmp_using", - "converters", - "define", - "evolve", - "exceptions", - "Factory", - "field", - "fields_dict", - "fields", - "filters", - "frozen", - "has", - "make_class", - "mutable", - "NOTHING", - "resolve_types", - "setters", - "validate", - "validators", -] - -__getattr__ = _make_getattr(__name__) diff --git a/spaces/cm107/agv-demo/Build/test-webgl-dev.loader.js b/spaces/cm107/agv-demo/Build/test-webgl-dev.loader.js deleted file mode 100644 index 4d0f507c9abf194451ec2d55f7bc4c83885d6d78..0000000000000000000000000000000000000000 --- a/spaces/cm107/agv-demo/Build/test-webgl-dev.loader.js +++ /dev/null @@ -1,1238 +0,0 @@ -function createUnityInstance(canvas, config, onProgress) { - onProgress = onProgress || function () {}; - - - function showBanner(msg, type) { - // Only ever show one error at most - other banner messages after that should get ignored - // to avoid noise. - if (!showBanner.aborted && config.showBanner) { - if (type == 'error') showBanner.aborted = true; - return config.showBanner(msg, type); - } - - // Fallback to console logging if visible banners have been suppressed - // from the main page. - switch(type) { - case 'error': console.error(msg); break; - case 'warning': console.warn(msg); break; - default: console.log(msg); break; - } - } - - function errorListener(e) { - var error = e.reason || e.error; - var message = error ? error.toString() : (e.message || e.reason || ''); - var stack = (error && error.stack) ? error.stack.toString() : ''; - - // Do not repeat the error message if it's present in the stack trace. - if (stack.startsWith(message)) { - stack = stack.substring(message.length); - } - - message += '\n' + stack.trim(); - - if (!message || !Module.stackTraceRegExp || !Module.stackTraceRegExp.test(message)) - return; - - var filename = e.filename || (error && (error.fileName || error.sourceURL)) || ''; - var lineno = e.lineno || (error && (error.lineNumber || error.line)) || 0; - - errorHandler(message, filename, lineno); - } - - var Module = { - canvas: canvas, - webglContextAttributes: { - preserveDrawingBuffer: false, - }, - cacheControl: function (url) { - return (url == Module.dataUrl || url.match(/\.bundle/)) ? "must-revalidate" : "no-store"; - }, - streamingAssetsUrl: "StreamingAssets", - downloadProgress: {}, - deinitializers: [], - intervals: {}, - setInterval: function (func, ms) { - var id = window.setInterval(func, ms); - this.intervals[id] = true; - return id; - }, - clearInterval: function(id) { - delete this.intervals[id]; - window.clearInterval(id); - }, - preRun: [], - postRun: [], - print: function (message) { - console.log(message); - }, - printErr: function (message) { - console.error(message); - - if (typeof message === 'string' && message.indexOf('wasm streaming compile failed') != -1) { - if (message.toLowerCase().indexOf('mime') != -1) { - showBanner('HTTP Response Header "Content-Type" configured incorrectly on the server for file ' + Module.codeUrl + ' , should be "application/wasm". Startup time performance will suffer.', 'warning'); - } else { - showBanner('WebAssembly streaming compilation failed! This can happen for example if "Content-Encoding" HTTP header is incorrectly enabled on the server for file ' + Module.codeUrl + ', but the file is not pre-compressed on disk (or vice versa). Check the Network tab in browser Devtools to debug server header configuration.', 'warning'); - } - } - }, - locateFile: function (url) { - return ( - url == "build.wasm" ? this.codeUrl : - url - ); - }, - disabledCanvasEvents: [ - "contextmenu", - "dragstart", - ], - }; - - for (var parameter in config) - Module[parameter] = config[parameter]; - - Module.streamingAssetsUrl = new URL(Module.streamingAssetsUrl, document.URL).href; - - // Operate on a clone of Module.disabledCanvasEvents field so that at Quit time - // we will ensure we'll remove the events that we created (in case user has - // modified/cleared Module.disabledCanvasEvents in between) - var disabledCanvasEvents = Module.disabledCanvasEvents.slice(); - - function preventDefault(e) { - e.preventDefault(); - } - - disabledCanvasEvents.forEach(function (disabledCanvasEvent) { - canvas.addEventListener(disabledCanvasEvent, preventDefault); - }); - - window.addEventListener("error", errorListener); - window.addEventListener("unhandledrejection", errorListener); - - // Safari does not automatically stretch the fullscreen element to fill the screen. - // The CSS width/height of the canvas causes it to remain the same size in the full screen - // window on Safari, resulting in it being a small canvas with black borders filling the - // rest of the screen. - var _savedElementWidth = ""; - var _savedElementHeight = ""; - // Safari uses webkitfullscreenchange event and not fullscreenchange - document.addEventListener("webkitfullscreenchange", function(e) { - // Safari uses webkitCurrentFullScreenElement and not fullscreenElement. - var fullscreenElement = document.webkitCurrentFullScreenElement; - if (fullscreenElement === canvas) { - if (canvas.style.width) { - _savedElementWidth = canvas.style.width; - _savedElementHeight = canvas.style.height; - canvas.style.width = "100%"; - canvas.style.height = "100%"; - } - } else { - if (_savedElementWidth) { - canvas.style.width = _savedElementWidth; - canvas.style.height = _savedElementHeight; - _savedElementWidth = ""; - _savedElementHeight = ""; - } - } - }); - - // Clear the event handlers we added above when the app quits, so that the event handler - // functions will not hold references to this JS function scope after - // exit, to allow JS garbage collection to take place. - Module.deinitializers.push(function() { - Module['disableAccessToMediaDevices'](); - disabledCanvasEvents.forEach(function (disabledCanvasEvent) { - canvas.removeEventListener(disabledCanvasEvent, preventDefault); - }); - window.removeEventListener("error", errorListener); - window.removeEventListener("unhandledrejection", errorListener); - - for (var id in Module.intervals) - { - window.clearInterval(id); - } - Module.intervals = {}; - }); - - Module.QuitCleanup = function () { - for (var i = 0; i < Module.deinitializers.length; i++) { - Module.deinitializers[i](); - } - Module.deinitializers = []; - // After all deinitializer callbacks are called, notify user code that the Unity game instance has now shut down. - if (typeof Module.onQuit == "function") - Module.onQuit(); - - }; - - var unityInstance = { - Module: Module, - SetFullscreen: function () { - if (Module.SetFullscreen) - return Module.SetFullscreen.apply(Module, arguments); - Module.print("Failed to set Fullscreen mode: Player not loaded yet."); - }, - SendMessage: function () { - if (Module.SendMessage) - return Module.SendMessage.apply(Module, arguments); - Module.print("Failed to execute SendMessage: Player not loaded yet."); - }, - Quit: function () { - return new Promise(function (resolve, reject) { - Module.shouldQuit = true; - Module.onQuit = resolve; - }); - }, - }; - - - Module.SystemInfo = (function () { - - var browser, browserVersion, os, osVersion, canvas, gpu; - - var ua = navigator.userAgent + ' '; - var browsers = [ - ['Firefox', 'Firefox'], - ['OPR', 'Opera'], - ['Edg', 'Edge'], - ['SamsungBrowser', 'Samsung Browser'], - ['Trident', 'Internet Explorer'], - ['MSIE', 'Internet Explorer'], - ['Chrome', 'Chrome'], - ['CriOS', 'Chrome on iOS Safari'], - ['FxiOS', 'Firefox on iOS Safari'], - ['Safari', 'Safari'], - ]; - - function extractRe(re, str, idx) { - re = RegExp(re, 'i').exec(str); - return re && re[idx]; - } - for(var b = 0; b < browsers.length; ++b) { - browserVersion = extractRe(browsers[b][0] + '[\/ ](.*?)[ \\)]', ua, 1); - if (browserVersion) { - browser = browsers[b][1]; - break; - } - } - if (browser == 'Safari') browserVersion = extractRe('Version\/(.*?) ', ua, 1); - if (browser == 'Internet Explorer') browserVersion = extractRe('rv:(.*?)\\)? ', ua, 1) || browserVersion; - - // These OS strings need to match the ones in Runtime/Misc/SystemInfo.cpp::GetOperatingSystemFamily() - var oses = [ - ['Windows (.*?)[;\)]', 'Windows'], - ['Android ([0-9_\.]+)', 'Android'], - ['iPhone OS ([0-9_\.]+)', 'iPhoneOS'], - ['iPad.*? OS ([0-9_\.]+)', 'iPadOS'], - ['FreeBSD( )', 'FreeBSD'], - ['OpenBSD( )', 'OpenBSD'], - ['Linux|X11()', 'Linux'], - ['Mac OS X ([0-9_\.]+)', 'MacOS'], - ['bot|google|baidu|bing|msn|teoma|slurp|yandex', 'Search Bot'] - ]; - for(var o = 0; o < oses.length; ++o) { - osVersion = extractRe(oses[o][0], ua, 1); - if (osVersion) { - os = oses[o][1]; - osVersion = osVersion.replace(/_/g, '.'); - break; - } - } - var versionMappings = { - 'NT 5.0': '2000', - 'NT 5.1': 'XP', - 'NT 5.2': 'Server 2003', - 'NT 6.0': 'Vista', - 'NT 6.1': '7', - 'NT 6.2': '8', - 'NT 6.3': '8.1', - 'NT 10.0': '10' - }; - osVersion = versionMappings[osVersion] || osVersion; - - // TODO: Add mobile device identifier, e.g. SM-G960U - - canvas = document.createElement("canvas"); - if (canvas) { - gl = canvas.getContext("webgl2"); - glVersion = gl ? 2 : 0; - if (!gl) { - if (gl = canvas && canvas.getContext("webgl")) glVersion = 1; - } - - if (gl) { - gpu = (gl.getExtension("WEBGL_debug_renderer_info") && gl.getParameter(0x9246 /*debugRendererInfo.UNMASKED_RENDERER_WEBGL*/)) || gl.getParameter(0x1F01 /*gl.RENDERER*/); - } - } - - var hasThreads = typeof SharedArrayBuffer !== 'undefined'; - var hasWasm = typeof WebAssembly === "object" && typeof WebAssembly.compile === "function"; - return { - width: screen.width, - height: screen.height, - userAgent: ua.trim(), - browser: browser || 'Unknown browser', - browserVersion: browserVersion || 'Unknown version', - mobile: /Mobile|Android|iP(ad|hone)/.test(navigator.appVersion), - os: os || 'Unknown OS', - osVersion: osVersion || 'Unknown OS Version', - gpu: gpu || 'Unknown GPU', - language: navigator.userLanguage || navigator.language, - hasWebGL: glVersion, - hasCursorLock: !!document.body.requestPointerLock, - hasFullscreen: !!document.body.requestFullscreen || !!document.body.webkitRequestFullscreen, // Safari still uses the webkit prefixed version - hasThreads: hasThreads, - hasWasm: hasWasm, - // This should be updated when we re-enable wasm threads. Previously it checked for WASM thread - // support with: var wasmMemory = hasWasm && hasThreads && new WebAssembly.Memory({"initial": 1, "maximum": 1, "shared": true}); - // which caused Chrome to have a warning that SharedArrayBuffer requires cross origin isolation. - hasWasmThreads: false, - }; - })(); - - function errorHandler(message, filename, lineno) { - // Unity needs to rely on Emscripten deferred fullscreen requests, so these will make their way to error handler - if (message.indexOf('fullscreen error') != -1) - return; - - if (Module.startupErrorHandler) { - Module.startupErrorHandler(message, filename, lineno); - return; - } - if (Module.errorHandler && Module.errorHandler(message, filename, lineno)) - return; - console.log("Invoking error handler due to\n" + message); - - // Support Firefox window.dump functionality. - if (typeof dump == "function") - dump("Invoking error handler due to\n" + message); - - if (errorHandler.didShowErrorMessage) - return; - var message = "An error occurred running the Unity content on this page. See your browser JavaScript console for more info. The error was:\n" + message; - if (message.indexOf("DISABLE_EXCEPTION_CATCHING") != -1) { - message = "An exception has occurred, but exception handling has been disabled in this build. If you are the developer of this content, enable exceptions in your project WebGL player settings to be able to catch the exception or see the stack trace."; - } else if (message.indexOf("Cannot enlarge memory arrays") != -1) { - message = "Out of memory. If you are the developer of this content, try allocating more memory to your WebGL build in the WebGL player settings."; - } else if (message.indexOf("Invalid array buffer length") != -1 || message.indexOf("Invalid typed array length") != -1 || message.indexOf("out of memory") != -1 || message.indexOf("could not allocate memory") != -1) { - message = "The browser could not allocate enough memory for the WebGL content. If you are the developer of this content, try allocating less memory to your WebGL build in the WebGL player settings."; - } - alert(message); - errorHandler.didShowErrorMessage = true; - } - - - Module.abortHandler = function (message) { - errorHandler(message, "", 0); - return true; - }; - - Error.stackTraceLimit = Math.max(Error.stackTraceLimit || 0, 50); - - function progressUpdate(id, e) { - if (id == "symbolsUrl") - return; - var progress = Module.downloadProgress[id]; - if (!progress) - progress = Module.downloadProgress[id] = { - started: false, - finished: false, - lengthComputable: false, - total: 0, - loaded: 0, - }; - if (typeof e == "object" && (e.type == "progress" || e.type == "load")) { - if (!progress.started) { - progress.started = true; - progress.lengthComputable = e.lengthComputable; - } - progress.total = e.total; - progress.loaded = e.loaded; - if (e.type == "load") - progress.finished = true; - } - var loaded = 0, total = 0, started = 0, computable = 0, unfinishedNonComputable = 0; - for (var id in Module.downloadProgress) { - var progress = Module.downloadProgress[id]; - if (!progress.started) - return 0; - started++; - if (progress.lengthComputable) { - loaded += progress.loaded; - total += progress.total; - computable++; - } else if (!progress.finished) { - unfinishedNonComputable++; - } - } - var totalProgress = started ? (started - unfinishedNonComputable - (total ? computable * (total - loaded) / total : 0)) / started : 0; - onProgress(0.9 * totalProgress); - } - -Module.readBodyWithProgress = function() { - /** - * Estimate length of uncompressed content by taking average compression ratios - * of compression type into account. - * @param {Response} response A Fetch API response object - * @param {boolean} lengthComputable Return wether content length was given in header. - * @returns {number} - */ - function estimateContentLength(response, lengthComputable) { - if (!lengthComputable) { - // No content length available - return 0; - } - - var compression = response.headers.get("Content-Encoding"); - var contentLength = parseInt(response.headers.get("Content-Length")); - - switch (compression) { - case "br": - return Math.round(contentLength * 5); - case "gzip": - return Math.round(contentLength * 4); - default: - return contentLength; - } - } - - function readBodyWithProgress(response, onProgress) { - var reader = response.body ? response.body.getReader() : undefined; - var lengthComputable = typeof response.headers.get('Content-Length') !== "undefined"; - var estimatedContentLength = estimateContentLength(response, lengthComputable); - var body = new Uint8Array(estimatedContentLength); - var trailingChunks = []; - var receivedLength = 0; - var trailingChunksStart = 0; - - if (!lengthComputable) { - console.warn("[UnityCache] Response is served without Content-Length header. Please reconfigure server to include valid Content-Length for better download performance."); - } - - function readBody() { - if (typeof reader === "undefined") { - // Browser does not support streaming reader API - // Fallback to Respone.arrayBuffer() - return response.arrayBuffer().then(function (buffer) { - onProgress({ - type: "progress", - total: buffer.length, - loaded: 0, - lengthComputable: lengthComputable - }); - - return new Uint8Array(buffer); - }); - } - - // Start reading memory chunks - return reader.read().then(function (result) { - if (result.done) { - return concatenateTrailingChunks(); - } - - if ((receivedLength + result.value.length) <= body.length) { - // Directly append chunk to body if enough memory was allocated - body.set(result.value, receivedLength); - trailingChunksStart = receivedLength + result.value.length; - } else { - // Store additional chunks in array to append later - trailingChunks.push(result.value); - } - - receivedLength += result.value.length; - onProgress({ - type: "progress", - total: Math.max(estimatedContentLength, receivedLength), - loaded: receivedLength, - lengthComputable: lengthComputable - }); - - return readBody(); - }); - } - - function concatenateTrailingChunks() { - if (receivedLength === estimatedContentLength) { - return body; - } - - if (receivedLength < estimatedContentLength) { - // Less data received than estimated, shrink body - return body.slice(0, receivedLength); - } - - // More data received than estimated, create new larger body to prepend all additional chunks to the body - var newBody = new Uint8Array(receivedLength); - newBody.set(body, 0); - var position = trailingChunksStart; - for (var i = 0; i < trailingChunks.length; ++i) { - newBody.set(trailingChunks[i], position); - position += trailingChunks[i].length; - } - - return newBody; - } - - return readBody().then(function (parsedBody) { - onProgress({ - type: "load", - total: parsedBody.length, - loaded: parsedBody.length, - lengthComputable: lengthComputable - }); - - response.parsedBody = parsedBody; - return response; - }); - } - - return readBodyWithProgress; -}(); - -Module.fetchWithProgress = function () { - function fetchWithProgress(resource, init) { - var onProgress = function () { }; - if (init && init.onProgress) { - onProgress = init.onProgress; - } - - return fetch(resource, init).then(function (response) { - return Module.readBodyWithProgress(response, onProgress); - }); - } - - return fetchWithProgress; -}(); - /** - * @interface RequestMetaData - * An object with meta data for a request - * - * @property {string} url The url of a request - * @property {string} company The company name - * @property {string} product The product name - * @property {number} version The version of the build - * @property {number} size The company of the build - * @property {number} accessedAt Timestamp when request was last accessed (Unix timestamp format) - * @property {number} updatedAt Timestamp when request was last updated in the cache (Unix timestamp format) - */ - -/** - * @interface ResponseWithMetaData - * An object with a cached response and meta data - * @property {Response} response - * @property {RequestMetaData} metaData - */ - -Module.UnityCache = function () { - var UnityCacheDatabase = { name: "UnityCache", version: 4 }; - var RequestMetaDataStore = { name: "RequestMetaDataStore", version: 1 }; - var RequestStore = { name: "RequestStore", version: 1 }; - var WebAssemblyStore = { name: "WebAssembly", version: 1 }; - var indexedDB = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB; - - function log(message) { - console.log("[UnityCache] " + message); - } - - /** - * A request cache that uses the browser Index DB to cache large requests - * @property {Promise} isConnected - * @property {Cache} cache - */ - function UnityCache() { - var self = this; - - this.isConnected = this.connect().then(function () { - return self.cleanUpCache(); - }); - - this.isConnected.catch(function (error) { - log("Error when initializing cache: " + error); - }); - } - - var instance = null; - /** - * Singleton accessor. Returns unity cache instance - * @returns {UnityCache} - */ - UnityCache.getInstance = function () { - if (!instance) { - instance = new UnityCache(); - } - - return instance; - } - - /** - * Destroy unity cache instance. Returns a promise that waits for the - * database connection to be closed. - * @returns {Promise} - */ - UnityCache.destroyInstance = function () { - if (!instance) { - return Promise.resolve(); - } - - return instance.close().then(function () { - instance = null; - }); - } - - /** - * Clear the unity cache. - * @returns {Promise} A promise that resolves when the cache is cleared. - */ - UnityCache.prototype.clearCache = function () { - var self = this; - - function deleteCacheEntries(cacheKeys) { - if (cacheKeys.length === 0) { - return Promise.resolve(); - } - - var key = cacheKeys.pop(); - - return self.cache.delete(key).then(function () { - return deleteCacheEntries(cacheKeys); - }); - } - - return this.isConnected.then(function () { - return self.execute(RequestMetaDataStore.name, "clear", []); - }).then(function () { - return self.cache.keys(); - }).then(function (keys) { - return deleteCacheEntries(keys) - }); - } - - /** - * Config for request meta data store - */ - UnityCache.UnityCacheDatabase = UnityCacheDatabase; - UnityCache.RequestMetaDataStore = RequestMetaDataStore; - UnityCache.MaximumCacheSize = 1024 * 1024 * 1024; // 1 GB - - /** - * Load a request response from cache - * @param {Request|string} request The fetch request - * @returns {Promise} A cached response with meta data for the request or undefined if request is not in cache. - */ - UnityCache.prototype.loadRequest = function (request) { - var self = this; - - return self.isConnected.then(function () { - return Promise.all([ - self.cache.match(request), - self.loadRequestMetaData(request) - ]); - }).then(function (result) { - if (typeof result[0] === "undefined" || typeof result[1] === "undefined") { - return undefined; - } - - return { - response: result[0], - metaData: result[1] - }; - }); - } - - /** - * Load a request meta data from cache - * @param {Request|string} request The fetch request - * @returns {Promise} Request meta data - */ - UnityCache.prototype.loadRequestMetaData = function (request) { - var url = typeof request === "string" ? request : request.url; - - return this.execute(RequestMetaDataStore.name, "get", [url]); - } - - /** - * Update meta data of a request - * @param {RequestMetaData} metaData - * @returns {Promise} - */ - UnityCache.prototype.updateRequestMetaData = function (metaData) { - return this.execute(RequestMetaDataStore.name, "put", [metaData]); - } - - /** - * Store request in cache - * @param {Request} request - * @param {Response} response - * @returns {Promise} - */ - UnityCache.prototype.storeRequest = function (request, response) { - var self = this; - - return self.isConnected.then(function () { - return self.cache.put(request, response); - }); - } - - /** - * Close database and cache connection. - * @async - */ - UnityCache.prototype.close = function () { - return this.isConnected.then(function () { - if (this.database) { - this.database.close(); - this.database = null; - } - - if (this.cache) { - this.cache = null; - } - - }.bind(this)); - } - - - /** - * Create a connection to Cache and IndexedDB for meta data storage - * @private - * @async - * @returns {Promise} A Promise that is resolved when a connection to the IndexedDB and cache are established. - */ - UnityCache.prototype.connect = function () { - var self = this; - - if (typeof indexedDB === "undefined") { - return Promise.reject(new Error("Could not connect to cache: IndexedDB is not supported.")); - } - - if (typeof window.caches === "undefined") { - return Promise.reject(new Error("Could not connect to cache: Cache API is not supported.")); - } - - var isConnected = new Promise(function (resolve, reject) { - try { - // Workaround for WebKit bug 226547: - // On very first page load opening a connection to IndexedDB hangs without triggering onerror. - // Add a timeout that triggers the error handling code. - self.openDBTimeout = setTimeout(function () { - if (typeof self.database != "undefined") { - return; - } - - reject(new Error("Could not connect to cache: Database timeout.")); - }, 20000); - - function clearOpenDBTimeout() { - if (!self.openDBTimeout) { - return; - } - - clearTimeout(self.openDBTimeout); - self.openDBTimeout = null; - } - - var openRequest = indexedDB.open(UnityCacheDatabase.name, UnityCacheDatabase.version); - - openRequest.onupgradeneeded = self.upgradeDatabase.bind(self); - - openRequest.onsuccess = function (e) { - clearOpenDBTimeout(); - self.database = e.target.result; - resolve(); - }; - - openRequest.onerror = function (error) { - clearOpenDBTimeout(); - self.database = null; - reject(new Error("Could not connect to database.")); - }; - } catch (error) { - clearOpenDBTimeout(); - self.database = null; - self.cache = null; - reject(new Error("Could not connect to cache: Could not connect to database.")); - } - }).then(function () { - var cacheName = UnityCacheDatabase.name + "_" + Module.companyName + "_" + Module.productName; - - return caches.open(cacheName); - }).then(function (cache) { - self.cache = cache; - }); - - return isConnected; - } - - /** - * Upgrade object store if database is outdated - * @private - * @param {any} e Database upgrade event - */ - UnityCache.prototype.upgradeDatabase = function (e) { - var database = e.target.result; - - if (!database.objectStoreNames.contains(RequestMetaDataStore.name)) { - var objectStore = database.createObjectStore(RequestMetaDataStore.name, { keyPath: "url" }); - ["accessedAt", "updatedAt"].forEach(function (index) { objectStore.createIndex(index, index); }); - } - - if (database.objectStoreNames.contains(RequestStore.name)) { - database.deleteObjectStore(RequestStore.name); - } - - if (database.objectStoreNames.contains(WebAssemblyStore.name)) { - database.deleteObjectStore(WebAssemblyStore.name); - } - } - - /** - * Execute an operation on the cache - * @private - * @param {string} store The name of the store to use - * @param {string} operation The operation to to execute on the cache - * @param {Array} parameters Parameters for the operation - * @returns {Promise} A promise to the cache entry - */ - UnityCache.prototype.execute = function (store, operation, parameters) { - return this.isConnected.then(function () { - return new Promise(function (resolve, reject) { - try { - // Failure during initialization of database -> reject Promise - if (this.database === null) { - reject(new Error("indexedDB access denied")) - return; - } - - // Create a transaction for the request - var accessMode = ["put", "delete", "clear"].indexOf(operation) != -1 ? "readwrite" : "readonly"; - var transaction = this.database.transaction([store], accessMode) - var target = transaction.objectStore(store); - if (operation == "openKeyCursor") { - target = target.index(parameters[0]); - parameters = parameters.slice(1); - } - - // Make a request to the database - var request = target[operation].apply(target, parameters); - request.onsuccess = function (e) { - resolve(e.target.result); - }; - request.onerror = function (error) { - reject(error); - }; - } catch (error) { - reject(error); - } - }.bind(this)); - }.bind(this)); - } - - UnityCache.prototype.getMetaDataEntries = function () { - var self = this; - var cacheSize = 0; - var metaDataEntries = []; - - return new Promise(function (resolve, reject) { - var transaction = self.database.transaction([RequestMetaDataStore.name], "readonly"); - var target = transaction.objectStore(RequestMetaDataStore.name); - var request = target.openCursor(); - - request.onsuccess = function (event) { - var cursor = event.target.result; - - if (cursor) { - cacheSize += cursor.value.size; - metaDataEntries.push(cursor.value); - - cursor.continue(); - } else { - resolve({ - metaDataEntries: metaDataEntries, - cacheSize: cacheSize - }); - } - }; - request.onerror = function (error) { - reject(error); - }; - }); - } - - /** - * Clean up cache by removing outdated entries. - * @private - * @returns {Promise} - */ - UnityCache.prototype.cleanUpCache = function () { - var self = this; - - return this.getMetaDataEntries().then(function (result) { - var metaDataEntries = result.metaDataEntries; - var cacheSize = result.cacheSize; - var entriesToDelete = []; - var newMetaDataEntries = []; - - // Remove cached entries with outdated product version - for (var i = 0; i < metaDataEntries.length; ++i) { - if (metaDataEntries[i].version == Module.productVersion) { - newMetaDataEntries.push(metaDataEntries[i]); - continue; - } - - entriesToDelete.push(metaDataEntries[i]); - cacheSize -= metaDataEntries[i].size; - } - - // Remove cache entries until cache size limit is met - newMetaDataEntries.sort(function (a,b) { - return a.accessedAt - b.accessedAt; - }); - - for (var i = 0; i < newMetaDataEntries.length; ++i) { - if (cacheSize < UnityCache.MaximumCacheSize) { - break; - } - - entriesToDelete.push(newMetaDataEntries[i]); - cacheSize -= newMetaDataEntries[i].size; - } - - function deleteMetaDataEntry(url) { - return new Promise(function (resolve, reject) { - var transaction = self.database.transaction([RequestMetaDataStore.name], "readwrite"); - var target = transaction.objectStore(RequestMetaDataStore.name); - target.delete(url); - - transaction.oncomplete = resolve; - transaction.onerror = reject; - }); - } - - function deleteEntries() { - if (entriesToDelete.length === 0) { - return Promise.resolve(); - } - - var entryToDelete = entriesToDelete.pop(); - return self.cache.delete(entryToDelete.url).then(function (deleted) { - if (deleted) { - return deleteMetaDataEntry(entryToDelete.url); - } - }).then(function () { - return deleteEntries(); - }); - } - - return deleteEntries(); - }); - } - - return UnityCache; -}(); - Module.cachedFetch = function () { - var UnityCache = Module.UnityCache; - var fetchWithProgress = Module.fetchWithProgress; - var readBodyWithProgress = Module.readBodyWithProgress; - - function log(message) { - console.log("[UnityCache] " + message); - } - - function resolveURL(url) { - resolveURL.link = resolveURL.link || document.createElement("a"); - resolveURL.link.href = url; - return resolveURL.link.href; - } - - function isCrossOriginURL(url) { - var originMatch = window.location.href.match(/^[a-z]+:\/\/[^\/]+/); - return !originMatch || url.lastIndexOf(originMatch[0], 0); - } - - function isCacheEnabled(url, init) { - if (init && init.method && init.method !== "GET") { - return false; - } - - if (init && ["must-revalidate", "immutable"].indexOf(init.control) == -1) { - return false; - } - - if (!url.match("^https?:\/\/")) { - return false; - } - - return true; - } - - function cachedFetch(resource, init) { - var unityCache = UnityCache.getInstance(); - var url = resolveURL((typeof resource === "string") ? resource : resource.url); - var cache = { enabled: isCacheEnabled(url, init) }; - if (init) { - cache.control = init.control; - cache.companyName = init.companyName; - cache.productName = init.productName; - cache.productVersion = init.productVersion; - } - cache.revalidated = false; - cache.metaData = { - url: url, - accessedAt: Date.now(), - version: cache.productVersion - }; - cache.response = null; - - function fetchAndStoreInCache(resource, init) { - return fetch(resource, init).then(function (response) { - if (!cache.enabled || cache.revalidated) { - return response; - } - - if (response.status === 304) { - // Cached response is still valid. Set revalidated flag and return cached response - cache.revalidated = true; - - unityCache.updateRequestMetaData(cache.metaData).then(function () { - log("'" + cache.metaData.url + "' successfully revalidated and served from the indexedDB cache"); - }).catch(function (error) { - log("'" + cache.metaData.url + "' successfully revalidated but not stored in the indexedDB cache due to the error: " + error); - }); - - return readBodyWithProgress(cache.response, init.onProgress); - } else if (response.status == 200) { - // New response -> Store it and cache and return it - cache.response = response; - cache.metaData.updatedAt = cache.metaData.accessedAt; - cache.revalidated = true; - var clonedResponse = response.clone(); - - return readBodyWithProgress(response, init.onProgress).then(function (response) { - // Update cached request and meta data - cache.metaData.size = response.parsedBody.length; - Promise.all([ - unityCache.storeRequest(resource, clonedResponse), - unityCache.updateRequestMetaData(cache.metaData) - ]).then(function () { - log("'" + url + "' successfully downloaded and stored in the indexedDB cache"); - }).catch(function (error) { - log("'" + url + "' successfully downloaded but not stored in the indexedDB cache due to the error: " + error); - }); - - return response; - }); - } else { - // Request failed - log("'" + url + "' request failed with status: " + response.status + " " + response.statusText); - } - - return readBodyWithProgress(response, init.onProgress); - }); - } - - // Use fetch directly if request can't be cached - if (!cache.enabled) { - return fetchWithProgress(resource, init); - } - - return unityCache.loadRequest(url).then(function (result) { - // Fetch resource and store it in cache if not present or outdated version - if (!result) { - return fetchAndStoreInCache(resource, init); - } - - var response = result.response; - var metaData = result.metaData; - cache.response = response; - cache.metaData.size = metaData.size; - cache.metaData.updatedAt = metaData.updatedAt; - - if (cache.control == "immutable") { - cache.revalidated = true; - unityCache.updateRequestMetaData(metaData).then(function () { - log("'" + cache.metaData.url + "' served from the indexedDB cache without revalidation"); - }); - - return readBodyWithProgress(response, init.onProgress); - } else if (isCrossOriginURL(url) && (response.headers.get("Last-Modified") || response.headers.get("ETag"))) { - return fetch(url, { method: "HEAD" }).then(function (headResult) { - cache.revalidated = ["Last-Modified", "ETag"].every(function (header) { - return !response.headers.get(header) || response.headers.get(header) == headResult.headers.get(header); - }); - if (cache.revalidated) { - unityCache.updateRequestMetaData(metaData).then(function () { - log("'" + cache.metaData.url + "' successfully revalidated and served from the indexedDB cache"); - }); - - return readBodyWithProgress(cache.response, init.onProgress); - } else { - return fetchAndStoreInCache(resource, init); - } - }); - } else { - init = init || {}; - var requestHeaders = init.headers || {}; - init.headers = requestHeaders; - if (response.headers.get("Last-Modified")) { - requestHeaders["If-Modified-Since"] = response.headers.get("Last-Modified"); - requestHeaders["Cache-Control"] = "no-cache"; - } else if (response.headers.get("ETag")) { - requestHeaders["If-None-Match"] = response.headers.get("ETag"); - requestHeaders["Cache-Control"] = "no-cache"; - } - - return fetchAndStoreInCache(resource, init); - } - }).catch(function (error) { - // Fallback to regular fetch if and IndexDB error occurs - log("Failed to load '" + cache.metaData.url + "' from indexedDB cache due to the error: " + error); - return fetchWithProgress(resource, init); - }); - } - - return cachedFetch; -}(); - - - function downloadBinary(urlId) { - progressUpdate(urlId); - var cacheControl = Module.cacheControl(Module[urlId]); - var fetchImpl = Module.companyName && Module.productName ? Module.cachedFetch : Module.fetchWithProgress; - var url = Module[urlId]; - var mode = /file:\/\//.exec(url) ? "same-origin" : undefined; - - var request = fetchImpl(Module[urlId], { - method: "GET", - companyName: Module.companyName, - productName: Module.productName, - productVersion: Module.productVersion, - control: cacheControl, - mode: mode, - onProgress: function (event) { - progressUpdate(urlId, event); - } - }); - - return request.then(function (response) { - return response.parsedBody; - }).catch(function (e) { - var error = 'Failed to download file ' + Module[urlId]; - if (location.protocol == 'file:') { - showBanner(error + '. Loading web pages via a file:// URL without a web server is not supported by this browser. Please use a local development web server to host Unity content, or use the Unity Build and Run option.', 'error'); - } else { - console.error(error); - } - }); - } - - function downloadFramework() { - return new Promise(function (resolve, reject) { - var script = document.createElement("script"); - script.src = Module.frameworkUrl; - script.onload = function () { - // Adding the framework.js script to DOM created a global - // 'unityFramework' variable that should be considered internal. - // If not, then we have received a malformed file. - if (typeof unityFramework === 'undefined' || !unityFramework) { - var compressions = [['br', 'br'], ['gz', 'gzip']]; - for(var i in compressions) { - var compression = compressions[i]; - if (Module.frameworkUrl.endsWith('.' + compression[0])) { - var error = 'Unable to parse ' + Module.frameworkUrl + '!'; - if (location.protocol == 'file:') { - showBanner(error + ' Loading pre-compressed (brotli or gzip) content via a file:// URL without a web server is not supported by this browser. Please use a local development web server to host compressed Unity content, or use the Unity Build and Run option.', 'error'); - return; - } - error += ' This can happen if build compression was enabled but web server hosting the content was misconfigured to not serve the file with HTTP Response Header "Content-Encoding: ' + compression[1] + '" present. Check browser Console and Devtools Network tab to debug.'; - if (compression[0] == 'br') { - if (location.protocol == 'http:') { - var migrationHelp = ['localhost', '127.0.0.1'].indexOf(location.hostname) != -1 ? '' : 'Migrate your server to use HTTPS.' - if (/Firefox/.test(navigator.userAgent)) error = 'Unable to parse ' + Module.frameworkUrl + '!
    If using custom web server, verify that web server is sending .br files with HTTP Response Header "Content-Encoding: br". Brotli compression may not be supported in Firefox over HTTP connections. ' + migrationHelp + ' See https://bugzilla.mozilla.org/show_bug.cgi?id=1670675 for more information.'; - else error = 'Unable to parse ' + Module.frameworkUrl + '!
    If using custom web server, verify that web server is sending .br files with HTTP Response Header "Content-Encoding: br". Brotli compression may not be supported over HTTP connections. Migrate your server to use HTTPS.'; - } - } - showBanner(error, 'error'); - return; - } - }; - showBanner('Unable to parse ' + Module.frameworkUrl + '! The file is corrupt, or compression was misconfigured? (check Content-Encoding HTTP Response Header on web server)', 'error'); - } - - // Capture the variable to local scope and clear it from global - // scope so that JS garbage collection can take place on - // application quit. - var fw = unityFramework; - unityFramework = null; - // Also ensure this function will not hold any JS scope - // references to prevent JS garbage collection. - script.onload = null; - resolve(fw); - } - script.onerror = function(e) { - showBanner('Unable to load file ' + Module.frameworkUrl + '! Check that the file exists on the remote server. (also check browser Console and Devtools Network tab to debug)', 'error'); - } - document.body.appendChild(script); - Module.deinitializers.push(function() { - document.body.removeChild(script); - }); - }); - } - - function loadBuild() { - downloadFramework().then(function (unityFramework) { - unityFramework(Module); - }); - - var dataPromise = downloadBinary("dataUrl"); - Module.preRun.push(function () { - Module.addRunDependency("dataUrl"); - dataPromise.then(function (data) { - var view = new DataView(data.buffer, data.byteOffset, data.byteLength); - var pos = 0; - var prefix = "UnityWebData1.0\0"; - if (!String.fromCharCode.apply(null, data.subarray(pos, pos + prefix.length)) == prefix) - throw "unknown data format"; - pos += prefix.length; - var headerSize = view.getUint32(pos, true); pos += 4; - while (pos < headerSize) { - var offset = view.getUint32(pos, true); pos += 4; - var size = view.getUint32(pos, true); pos += 4; - var pathLength = view.getUint32(pos, true); pos += 4; - var path = String.fromCharCode.apply(null, data.subarray(pos, pos + pathLength)); pos += pathLength; - for (var folder = 0, folderNext = path.indexOf("/", folder) + 1 ; folderNext > 0; folder = folderNext, folderNext = path.indexOf("/", folder) + 1) - Module.FS_createPath(path.substring(0, folder), path.substring(folder, folderNext - 1), true, true); - Module.FS_createDataFile(path, null, data.subarray(offset, offset + size), true, true, true); - } - Module.removeRunDependency("dataUrl"); - }); - }); - } - - return new Promise(function (resolve, reject) { - if (!Module.SystemInfo.hasWebGL) { - reject("Your browser does not support WebGL."); - } else if (Module.SystemInfo.hasWebGL == 1) { - var msg = "Your browser does not support graphics API \"WebGL 2\" which is required for this content."; - if (Module.SystemInfo.browser == 'Safari' && parseInt(Module.SystemInfo.browserVersion) < 15) { - if (Module.SystemInfo.mobile || navigator.maxTouchPoints > 1) - msg += "\nUpgrade to iOS 15 or later."; - else - msg += "\nUpgrade to Safari 15 or later."; - } - reject(msg); - } else if (!Module.SystemInfo.hasWasm) { - reject("Your browser does not support WebAssembly."); - } else { - Module.startupErrorHandler = reject; - onProgress(0); - Module.postRun.push(function () { - onProgress(1); - delete Module.startupErrorHandler; - resolve(unityInstance); - }); - loadBuild(); - } - }); -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/amfenc_av1.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/amfenc_av1.c deleted file mode 100644 index 8093cb735752b8fc73032bcc676ae5f5c489ef89..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/amfenc_av1.c +++ /dev/null @@ -1,361 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/internal.h" -#include "libavutil/opt.h" -#include "amfenc.h" -#include "codec_internal.h" -#include "internal.h" - -#define OFFSET(x) offsetof(AmfContext, x) -#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM -static const AVOption options[] = { - { "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, "usage" }, - { "transcoding", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, "usage" }, - { "lowlatency", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, "usage" }, - - { "profile", "Set the profile (default main)", OFFSET(profile), AV_OPT_TYPE_INT,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, "profile" }, - { "main", "", 0, AV_OPT_TYPE_CONST,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, 0, 0, VE, "profile" }, - - { "level", "Set the encoding level (default auto)", OFFSET(level), AV_OPT_TYPE_INT,{.i64 = 0 }, 0, AMF_VIDEO_ENCODER_AV1_LEVEL_7_3, VE, "level" }, - { "auto", "", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, VE, "level" }, - { "2.0", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_0 }, 0, 0, VE, "level" }, - { "2.1", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_1 }, 0, 0, VE, "level" }, - { "2.2", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_2 }, 0, 0, VE, "level" }, - { "2.3", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_3 }, 0, 0, VE, "level" }, - { "3.0", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_3_0 }, 0, 0, VE, "level" }, - { "3.1", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_3_1 }, 0, 0, VE, "level" }, - { "3.2", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_3_2 }, 0, 0, VE, "level" }, - { "3.3", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_3_3 }, 0, 0, VE, "level" }, - { "4.0", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_4_0 }, 0, 0, VE, "level" }, - { "4.1", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_4_1 }, 0, 0, VE, "level" }, - { "4.2", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_4_2 }, 0, 0, VE, "level" }, - { "4.3", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_4_3 }, 0, 0, VE, "level" }, - { "5.0", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_5_0 }, 0, 0, VE, "level" }, - { "5.1", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_5_1 }, 0, 0, VE, "level" }, - { "5.2", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_5_2 }, 0, 0, VE, "level" }, - { "5.3", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_5_3 }, 0, 0, VE, "level" }, - { "6.0", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_6_0 }, 0, 0, VE, "level" }, - { "6.1", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_6_1 }, 0, 0, VE, "level" }, - { "6.2", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_6_2 }, 0, 0, VE, "level" }, - { "6.3", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_6_3 }, 0, 0, VE, "level" }, - { "7.0", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_0 }, 0, 0, VE, "level" }, - { "7.1", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_1 }, 0, 0, VE, "level" }, - { "7.2", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_2 }, 0, 0, VE, "level" }, - { "7.3", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_3 }, 0, 0, VE, "level" }, - - { "quality", "Set the encoding quality", OFFSET(quality), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_HIGH_QUALITY, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED, VE, "quality" }, - { "balanced", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_BALANCED }, 0, 0, VE, "quality" }, - { "speed", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED }, 0, 0, VE, "quality" }, - { "quality", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_QUALITY }, 0, 0, VE, "quality" }, - { "high_quality", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_HIGH_QUALITY }, 0, 0, VE, "quality" }, - - { "rc", "Set the rate control mode", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR, VE, "rc" }, - { "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, "rc" }, - { "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" }, - { "vbr_peak", "Peak Contrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, "rc" }, - { "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, "rc" }, - - { "header_insertion_mode", "Set header insertion mode", OFFSET(header_insertion_mode), AV_OPT_TYPE_INT,{.i64 = AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE_NONE }, AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE_NONE, AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE_KEY_FRAME_ALIGNED, VE, "hdrmode" }, - { "none", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE_NONE }, 0, 0, VE, "hdrmode" }, - { "gop", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" }, - { "frame", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE_KEY_FRAME_ALIGNED }, 0, 0, VE, "hdrmode" }, - - { "preanalysis", "Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, VE}, - { "enforce_hrd", "Enforce HRD", OFFSET(enforce_hrd), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, VE}, - { "filler_data", "Filler Data Enable", OFFSET(filler_data), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, VE}, - - // min_qp_i -> min_qp_intra, min_qp_p -> min_qp_inter - { "min_qp_i", "min quantization parameter for I-frame", OFFSET(min_qp_i), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 255, VE }, - { "max_qp_i", "max quantization parameter for I-frame", OFFSET(max_qp_i), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 255, VE }, - { "min_qp_p", "min quantization parameter for P-frame", OFFSET(min_qp_p), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 255, VE }, - { "max_qp_p", "max quantization parameter for P-frame", OFFSET(max_qp_p), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 255, VE }, - { "qp_p", "quantization parameter for P-frame", OFFSET(qp_p), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 255, VE }, - { "qp_i", "quantization parameter for I-frame", OFFSET(qp_i), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 255, VE }, - { "skip_frame", "Rate Control Based Frame Skip", OFFSET(skip_frame), AV_OPT_TYPE_BOOL,{.i64 = 0 }, 0, 1, VE }, - - { "align", "alignment mode", OFFSET(align), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS }, AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_ONLY, AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS, VE, "align" }, - { "64x16", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_ONLY }, 0, 0, VE, "align" }, - { "1080p", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_1080P_CODED_1082 }, 0, 0, VE, "align" }, - { "none", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS }, 0, 0, VE, "align" }, - - { NULL } - -}; - -static av_cold int amf_encode_init_av1(AVCodecContext* avctx) -{ - int ret = 0; - AMF_RESULT res = AMF_OK; - AmfContext* ctx = avctx->priv_data; - AMFVariantStruct var = { 0 }; - amf_int64 profile = 0; - amf_int64 profile_level = 0; - AMFBuffer* buffer; - AMFGuid guid; - AMFRate framerate; - AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); - - - - if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { - framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den); - } - else { - framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame); - } - - if ((ret = ff_amf_encode_init(avctx)) < 0) - return ret; - - // init static parameters - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_USAGE, ctx->usage); - - AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_FRAMESIZE, framesize); - - AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_FRAMERATE, framerate); - - switch (avctx->profile) { - case FF_PROFILE_AV1_MAIN: - profile = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN; - break; - default: - break; - } - if (profile == 0) { - profile = ctx->profile; - } - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PROFILE, profile); - - profile_level = avctx->level; - if (profile_level == FF_LEVEL_UNKNOWN) { - profile_level = ctx->level; - } - if (profile_level != 0) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_LEVEL, profile_level); - } - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET, ctx->quality); - - // Maximum Reference Frames - if (avctx->refs != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MAX_NUM_REFRAMES, avctx->refs); - } - - // Picture control properties - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_GOP_SIZE, avctx->gop_size); - - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_HEADER_INSERTION_MODE, ctx->header_insertion_mode); - - // Rate control - // autodetect rate control method - if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN) { - if (ctx->min_qp_i != -1 || ctx->max_qp_i != -1 || - ctx->min_qp_p != -1 || ctx->max_qp_p != -1 || - ctx->qp_i != -1 || ctx->qp_p != -1) { - ctx->rate_control_mode = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP; - av_log(ctx, AV_LOG_DEBUG, "Rate control turned to CQP\n"); - } - else if (avctx->rc_max_rate > 0) { - ctx->rate_control_mode = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR; - av_log(ctx, AV_LOG_DEBUG, "Rate control turned to Peak VBR\n"); - } - else { - ctx->rate_control_mode = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR; - av_log(ctx, AV_LOG_DEBUG, "Rate control turned to CBR\n"); - } - } - - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD, ctx->rate_control_mode); - if (avctx->rc_buffer_size) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_VBV_BUFFER_SIZE, avctx->rc_buffer_size); - - if (avctx->rc_initial_buffer_occupancy != 0) { - int amf_buffer_fullness = avctx->rc_initial_buffer_occupancy * 64 / avctx->rc_buffer_size; - if (amf_buffer_fullness > 64) - amf_buffer_fullness = 64; - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INITIAL_VBV_BUFFER_FULLNESS, amf_buffer_fullness); - } - } - - // Pre-Pass, Pre-Analysis, Two-Pass - AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PRE_ANALYSIS_ENABLE, ctx->preanalysis); - - // init dynamic rate control params - if (ctx->max_au_size) - ctx->enforce_hrd = 1; - AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENFORCE_HRD, ctx->enforce_hrd); - AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_FILLER_DATA, ctx->filler_data); - - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_TARGET_BITRATE, avctx->bit_rate); - - if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PEAK_BITRATE, avctx->bit_rate); - } - if (avctx->rc_max_rate) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PEAK_BITRATE, avctx->rc_max_rate); - } - else if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR) { - av_log(ctx, AV_LOG_WARNING, "rate control mode is PEAK_CONSTRAINED_VBR but rc_max_rate is not set\n"); - } - if (avctx->bit_rate > 0) { - ctx->rate_control_mode = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR; - av_log(ctx, AV_LOG_DEBUG, "Rate control turned to CBR\n"); - } - - switch (ctx->align) - { - case AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_ONLY: - if (avctx->width / 64 * 64 != avctx->width || avctx->height / 16 * 16 != avctx->height) - { - res = AMF_NOT_SUPPORTED; - av_log(ctx, AV_LOG_ERROR, "Resolution incorrect for alignment mode\n"); - return AVERROR_EXIT; - } - break; - case AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_1080P_CODED_1082: - if ((avctx->width / 64 * 64 == avctx->width && avctx->height / 16 * 16 == avctx->height) || (avctx->width == 1920 && avctx->height == 1080)) - { - res = AMF_OK; - } - else - { - res = AMF_NOT_SUPPORTED; - av_log(ctx, AV_LOG_ERROR, "Resolution incorrect for alignment mode\n"); - return AVERROR_EXIT; - } - break; - case AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS: - res = AMF_OK; - break; - default: - res = AMF_NOT_SUPPORTED; - av_log(ctx, AV_LOG_ERROR, "Invalid alignment mode\n"); - return AVERROR_EXIT; - } - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE, ctx->align); - - - // init encoder - res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height); - AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d\n", res); - - // init dynamic picture control params - if (ctx->min_qp_i != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MIN_Q_INDEX_INTRA, ctx->min_qp_i); - } - else if (avctx->qmin != -1) { - int qval = avctx->qmin > 255 ? 255 : avctx->qmin; - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MIN_Q_INDEX_INTRA, qval); - } - if (ctx->max_qp_i != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MAX_Q_INDEX_INTRA, ctx->max_qp_i); - } - else if (avctx->qmax != -1) { - int qval = avctx->qmax > 255 ? 255 : avctx->qmax; - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MAX_Q_INDEX_INTRA, qval); - } - if (ctx->min_qp_p != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MIN_Q_INDEX_INTER, ctx->min_qp_p); - } - else if (avctx->qmin != -1) { - int qval = avctx->qmin > 255 ? 255 : avctx->qmin; - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MIN_Q_INDEX_INTER, qval); - } - if (ctx->max_qp_p != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MAX_Q_INDEX_INTER, ctx->max_qp_p); - } - else if (avctx->qmax != -1) { - int qval = avctx->qmax > 255 ? 255 : avctx->qmax; - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_MAX_Q_INDEX_INTER, qval); - } - - if (ctx->qp_p != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_Q_INDEX_INTER, ctx->qp_p); - } - if (ctx->qp_i != -1) { - AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_Q_INDEX_INTRA, ctx->qp_i); - } - AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_SKIP_FRAME, ctx->skip_frame); - - - // fill extradata - res = AMFVariantInit(&var); - AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AMFVariantInit() failed with error %d\n", res); - - res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_EXTRA_DATA, &var); - AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) failed with error %d\n", res); - AMF_RETURN_IF_FALSE(ctx, var.pInterface != NULL, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) returned NULL\n"); - - guid = IID_AMFBuffer(); - - res = var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface - if (res != AMF_OK) { - var.pInterface->pVtbl->Release(var.pInterface); - } - AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "QueryInterface(IID_AMFBuffer) failed with error %d\n", res); - - avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer); - avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!avctx->extradata) { - buffer->pVtbl->Release(buffer); - var.pInterface->pVtbl->Release(var.pInterface); - return AVERROR(ENOMEM); - } - memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size); - - buffer->pVtbl->Release(buffer); - var.pInterface->pVtbl->Release(var.pInterface); - - return 0; -} - -static const FFCodecDefault defaults[] = { - { "refs", "-1" }, - { "aspect", "0" }, - { "b", "2M" }, - { "g", "250" }, - { "qmin", "-1" }, - { "qmax", "-1" }, - { NULL }, -}; - -static const AVClass av1_amf_class = { - .class_name = "av1_amf", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_av1_amf_encoder = { - .p.name = "av1_amf", - CODEC_LONG_NAME("AMD AMF AV1 encoder"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_AV1, - .init = amf_encode_init_av1, - FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet), - .close = ff_amf_encode_close, - .priv_data_size = sizeof(AmfContext), - .p.priv_class = &av1_amf_class, - .defaults = defaults, - .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE | - AV_CODEC_CAP_DR1, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - .p.pix_fmts = ff_amf_pix_fmts, - .p.wrapper_name = "amf", - .hw_configs = ff_amfenc_hw_configs, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/midivid.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/midivid.c deleted file mode 100644 index 599d5c8f8fabfb73fd4ce83de7fcb8ac41d80e0e..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/midivid.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * MidiVid decoder - * Copyright (c) 2019 Paul B Mahol - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/imgutils.h" -#include "libavutil/internal.h" -#include "libavutil/mem.h" - -#define BITSTREAM_READER_LE -#include "avcodec.h" -#include "get_bits.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "decode.h" - -typedef struct MidiVidContext { - GetByteContext gb; - - uint8_t *uncompressed; - unsigned int uncompressed_size; - uint8_t *skip; - - AVFrame *frame; -} MidiVidContext; - -static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame) -{ - GetByteContext *gb = &s->gb; - GetBitContext mask; - GetByteContext idx9; - uint16_t nb_vectors, intra_flag; - const uint8_t *vec; - const uint8_t *mask_start; - uint8_t *skip; - uint32_t mask_size; - int idx9bits = 0; - int idx9val = 0; - uint32_t nb_blocks; - - nb_vectors = bytestream2_get_le16(gb); - intra_flag = !!bytestream2_get_le16(gb); - if (intra_flag) { - nb_blocks = (avctx->width / 2) * (avctx->height / 2); - } else { - int ret, skip_linesize, padding; - - nb_blocks = bytestream2_get_le32(gb); - skip_linesize = avctx->width >> 1; - mask_start = gb->buffer_start + bytestream2_tell(gb); - mask_size = (FFALIGN(avctx->width, 32) >> 2) * (avctx->height >> 2) >> 3; - padding = (FFALIGN(avctx->width, 32) - avctx->width) >> 2; - - if (bytestream2_get_bytes_left(gb) < mask_size) - return AVERROR_INVALIDDATA; - - ret = init_get_bits8(&mask, mask_start, mask_size); - if (ret < 0) - return ret; - bytestream2_skip(gb, mask_size); - skip = s->skip; - - for (int y = 0; y < avctx->height >> 2; y++) { - for (int x = 0; x < avctx->width >> 2; x++) { - int flag = !get_bits1(&mask); - - skip[(y*2) *skip_linesize + x*2 ] = flag; - skip[(y*2) *skip_linesize + x*2+1] = flag; - skip[(y*2+1)*skip_linesize + x*2 ] = flag; - skip[(y*2+1)*skip_linesize + x*2+1] = flag; - } - skip_bits_long(&mask, padding); - } - } - - vec = gb->buffer_start + bytestream2_tell(gb); - if (bytestream2_get_bytes_left(gb) < nb_vectors * 12) - return AVERROR_INVALIDDATA; - bytestream2_skip(gb, nb_vectors * 12); - if (nb_vectors > 256) { - if (bytestream2_get_bytes_left(gb) < (nb_blocks + 7 * !intra_flag) / 8) - return AVERROR_INVALIDDATA; - bytestream2_init(&idx9, gb->buffer_start + bytestream2_tell(gb), (nb_blocks + 7 * !intra_flag) / 8); - bytestream2_skip(gb, (nb_blocks + 7 * !intra_flag) / 8); - } - - skip = s->skip; - - for (int y = avctx->height - 2; y >= 0; y -= 2) { - uint8_t *dsty = frame->data[0] + y * frame->linesize[0]; - uint8_t *dstu = frame->data[1] + y * frame->linesize[1]; - uint8_t *dstv = frame->data[2] + y * frame->linesize[2]; - - for (int x = 0; x < avctx->width; x += 2) { - int idx; - - if (!intra_flag && *skip++) - continue; - if (bytestream2_get_bytes_left(gb) <= 0) - return AVERROR_INVALIDDATA; - if (nb_vectors <= 256) { - idx = bytestream2_get_byte(gb); - } else { - if (idx9bits == 0) { - idx9val = bytestream2_get_byte(&idx9); - idx9bits = 8; - } - idx9bits--; - idx = bytestream2_get_byte(gb) | (((idx9val >> (7 - idx9bits)) & 1) << 8); - } - if (idx >= nb_vectors) - return AVERROR_INVALIDDATA; - - dsty[x +frame->linesize[0]] = vec[idx * 12 + 0]; - dsty[x+1+frame->linesize[0]] = vec[idx * 12 + 3]; - dsty[x] = vec[idx * 12 + 6]; - dsty[x+1] = vec[idx * 12 + 9]; - - dstu[x +frame->linesize[1]] = vec[idx * 12 + 1]; - dstu[x+1+frame->linesize[1]] = vec[idx * 12 + 4]; - dstu[x] = vec[idx * 12 + 7]; - dstu[x+1] = vec[idx * 12 +10]; - - dstv[x +frame->linesize[2]] = vec[idx * 12 + 2]; - dstv[x+1+frame->linesize[2]] = vec[idx * 12 + 5]; - dstv[x] = vec[idx * 12 + 8]; - dstv[x+1] = vec[idx * 12 +11]; - } - } - - return intra_flag; -} - -static ptrdiff_t lzss_uncompress(MidiVidContext *s, GetByteContext *gb, uint8_t *dst, unsigned int size) -{ - uint8_t *dst_start = dst; - uint8_t *dst_end = dst + size; - - for (;bytestream2_get_bytes_left(gb) >= 3;) { - int op = bytestream2_get_le16(gb); - - for (int i = 0; i < 16; i++) { - if (op & 1) { - int s0 = bytestream2_get_byte(gb); - int s1 = bytestream2_get_byte(gb); - int offset = ((s0 & 0xF0) << 4) | s1; - int length = (s0 & 0xF) + 3; - - if (dst + length > dst_end || - dst - offset < dst_start) - return AVERROR_INVALIDDATA; - if (offset > 0) { - for (int j = 0; j < length; j++) { - dst[j] = dst[j - offset]; - } - } - dst += length; - } else { - if (dst >= dst_end) - return AVERROR_INVALIDDATA; - *dst++ = bytestream2_get_byte(gb); - } - op >>= 1; - } - } - - return dst - dst_start; -} - -static int decode_frame(AVCodecContext *avctx, AVFrame *rframe, - int *got_frame, AVPacket *avpkt) -{ - MidiVidContext *s = avctx->priv_data; - GetByteContext *gb = &s->gb; - AVFrame *frame = s->frame; - int ret, key, uncompressed; - - if (avpkt->size <= 13) - return AVERROR_INVALIDDATA; - - bytestream2_init(gb, avpkt->data, avpkt->size); - bytestream2_skip(gb, 8); - uncompressed = bytestream2_get_le32(gb); - - if (!uncompressed) { - av_fast_padded_malloc(&s->uncompressed, &s->uncompressed_size, 16LL * (avpkt->size - 12)); - if (!s->uncompressed) - return AVERROR(ENOMEM); - - ret = lzss_uncompress(s, gb, s->uncompressed, s->uncompressed_size); - if (ret < 0) - return ret; - bytestream2_init(gb, s->uncompressed, ret); - } - - if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) - return ret; - - ret = decode_mvdv(s, avctx, frame); - - if (ret < 0) - return ret; - key = ret; - - if ((ret = av_frame_ref(rframe, s->frame)) < 0) - return ret; - - frame->pict_type = key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; - frame->key_frame = key; - *got_frame = 1; - - return avpkt->size; -} - -static av_cold int decode_init(AVCodecContext *avctx) -{ - MidiVidContext *s = avctx->priv_data; - int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx); - - if (avctx->width & 3 || avctx->height & 3) - ret = AVERROR_INVALIDDATA; - - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", - avctx->width, avctx->height); - return ret; - } - - avctx->pix_fmt = AV_PIX_FMT_YUV444P; - - s->frame = av_frame_alloc(); - if (!s->frame) - return AVERROR(ENOMEM); - s->skip = av_calloc(avctx->width >> 1, avctx->height >> 1); - if (!s->skip) - return AVERROR(ENOMEM); - - return 0; -} - -static void decode_flush(AVCodecContext *avctx) -{ - MidiVidContext *s = avctx->priv_data; - - av_frame_unref(s->frame); -} - -static av_cold int decode_close(AVCodecContext *avctx) -{ - MidiVidContext *s = avctx->priv_data; - - av_frame_free(&s->frame); - av_freep(&s->uncompressed); - av_freep(&s->skip); - - return 0; -} - -const FFCodec ff_mvdv_decoder = { - .p.name = "mvdv", - CODEC_LONG_NAME("MidiVid VQ"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_MVDV, - .priv_data_size = sizeof(MidiVidContext), - .init = decode_init, - FF_CODEC_DECODE_CB(decode_frame), - .flush = decode_flush, - .close = decode_close, - .p.capabilities = AV_CODEC_CAP_DR1, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Battlegrounds Mobile India on Android 12 APK and OBB Download Guide.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy Battlegrounds Mobile India on Android 12 APK and OBB Download Guide.md deleted file mode 100644 index 0c6be47e0abe9f2a5245bb5dbe4caeea50cc5b64..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Battlegrounds Mobile India on Android 12 APK and OBB Download Guide.md +++ /dev/null @@ -1,160 +0,0 @@ -
    -

    BGMI APK Download Android 12: How to Install and Play the Latest Version of Battlegrounds Mobile India

    -

    If you are a fan of battle royale games, you must have heard of BGMI, or Battlegrounds Mobile India. It is one of the most popular and exciting mobile games in India, with millions of players enjoying it every day. But what if you want to play it on your Android 12 device? How can you download and install the latest version of BGMI APK on your phone or tablet? In this article, we will answer all these questions and more. We will tell you what BGMI is, why you should download it, how to download it, how to install it, and how to play it on your Android 12 device. So, without further ado, let's get started!

    -

    bgmi apk download android 12


    Downloadhttps://urlca.com/2uOeW1



    -

    What is BGMI?

    -

    A brief introduction to the game and its features

    -

    BGMI stands for Battlegrounds Mobile India, and it is a mobile game developed by KRAFTON, Inc., a South Korean company. It is a battle royale game, which means that you have to survive against other players in a shrinking map until you are the last one standing. You can play solo, duo, or squad mode, with up to 100 players in each match. You can also choose from different maps, modes, weapons, vehicles, skins, and more.

    -

    BGMI is not just a game, but also a social platform where you can interact with other players, join clans, participate in events, watch live streams, and more. You can also customize your character, profile, inventory, and settings according to your preferences. BGMI is free to play, but you can also buy in-game currency and items with real money if you want.

    -

    The difference between BGMI and PUBG Mobile

    -

    You might be wondering what is the difference between BGMI and PUBG Mobile, another popular battle royale game. Well, the main difference is that BGMI is made exclusively for Indian players, while PUBG Mobile is available globally. This means that BGMI has some features that are tailored for the Indian audience, such as:

    -
      -
    • Green blood instead of red blood
    • -
    • A disclaimer that the game is not based on real events or characters
    • -
    • A limit on how long you can play the game per day
    • -
    • A feature that reminds you to take breaks and drink water
    • -
    • A feature that allows you to report any inappropriate content or behavior
    • -
    • Exclusive events, rewards, tournaments, and collaborations for Indian players
    • -
    -

    However, apart from these differences, BGMI and PUBG Mobile are very similar in terms of gameplay, graphics, sound effects, and updates. In fact, BGMI is based on PUBG Mobile's global version, so you can expect the same quality and experience from both games.

    -

    Why download BGMI APK?

    -

    The benefits of downloading the APK file instead of using the Google Play Store

    -

    One way to download and install BGMI on your Android device is to use the Google Play Store. However, this method has some drawbacks, such as:

    -

    bgmi apk download android 12 beta version
    -how to install bgmi apk on android 12 devices
    -bgmi apk download android 12 free link
    -bgmi apk download android 12 latest update
    -bgmi apk download android 12 without obb file
    -bgmi apk download android 12 compatible phones
    -bgmi apk download android 12 error fix
    -bgmi apk download android 12 gameplay review
    -bgmi apk download android 12 new features
    -bgmi apk download android 12 size and requirements
    -bgmi apk download android 12 official website
    -bgmi apk download android 12 tips and tricks
    -bgmi apk download android 12 mod menu
    -bgmi apk download android 12 hack version
    -bgmi apk download android 12 unlimited uc
    -bgmi apk download android 12 offline mode
    -bgmi apk download android 12 graphics settings
    -bgmi apk download android 12 best sensitivity
    -bgmi apk download android 12 custom room
    -bgmi apk download android 12 clan system
    -bgmi apk download android 12 esports scene
    -bgmi apk download android 12 rewards and events
    -bgmi apk download android 12 nusa map guide
    -bgmi apk download android 12 vice packs explained
    -bgmi apk download android 12 new weapon mg3
    -bgmi apk download android 12 comparison with pubg mobile
    -bgmi apk download android 12 privacy policy and terms of service
    -bgmi apk download android 12 support and feedback
    -bgmi apk download android 12 ratings and reviews
    -bgmi apk download android 12 alternatives and competitors

    -
      -
    • You need a stable internet connection to download the game from the Play Store
    • -
    • You need enough storage space on your device to download the game from the Play StoreYou may encounter errors or bugs while downloading or installing the game from the Play Store

      -
    • You may not be able to access the latest version of the game from the Play Store
    • -
    -

    That's why downloading the APK file of BGMI is a better option. APK stands for Android Package Kit, and it is a file format that contains all the necessary data and code to run an app on your Android device. By downloading the APK file of BGMI, you can enjoy some benefits, such as:

    -
      -
    • You can download the game from any source you trust, such as a website, a cloud service, or a friend
    • -
    • You can download the game faster and use less data than downloading from the Play Store
    • -
    • You can save the APK file on your device or external storage and install it anytime you want
    • -
    • You can bypass any restrictions or errors that may occur on the Play Store
    • -
    • You can access the latest version of the game before it is available on the Play Store
    • -
    -

    The compatibility issues with Android 12 and how to fix them

    -

    However, downloading the APK file of BGMI is not without its challenges. One of the main issues that you may face is the compatibility with Android 12, the latest version of the Android operating system. Android 12 has some new features and changes that may affect how BGMI runs on your device, such as:

    -
      -
    • A new permission system that requires you to grant access to specific files and folders on your device
    • -
    • A new app hibernation feature that automatically optimizes the storage and battery usage of apps that you don't use frequently
    • -
    • A new privacy dashboard that shows you how apps access your location, camera, microphone, and other sensitive data
    • -
    • A new performance class feature that categorizes devices based on their capabilities and performance levels
    • -
    -

    These features may cause some problems when you try to download, install, or play BGMI on your Android 12 device, such as:

    -
      -
    • You may not be able to install the APK file if it is not signed by a trusted developer or source
    • -
    • You may not be able to access or write to the OBB file, which contains the game data, if you don't grant permission to the app
    • -
    • You may experience lag, crashes, or glitches if your device does not meet the performance class requirements for BGMI
    • -
    • You may lose your game progress or settings if your app goes into hibernation mode and deletes some data
    • -
    • You may compromise your privacy or security if you allow BGMI to access your sensitive data without your knowledge or consent
    • -
    -

    Fortunately, there are some ways to fix these issues and enjoy BGMI on your Android 12 device without any hassle. Here are some tips that you can follow:

    -
      -
    • Download the APK file from a trusted source, such as the official website of BGMI or a reputable third-party website. You can also scan the APK file with an antivirus app before installing it.
    • -
    • Enable the "Unknown sources" option in your device settings to allow installation of apps from sources other than the Play Store. You can also disable the "Verify apps" option to prevent Google from scanning and blocking apps that are not from the Play Store.
    • -
    • Grant permission to BGMI to access and write to your device storage when prompted. You can also manually change the permission settings in your device settings. You can also create a folder named "Android/obb/com.pubg.imobile" in your device storage and copy the OBB file there.
    • -
    • Check if your device meets the minimum requirements for BGMI, which are: 2 GB RAM, Android 5.1.1 or above, and at least 4 GB of free storage space. You can also check if your device belongs to one of the performance classes supported by BGMI, which are: Performance Class 1 (high-end devices), Performance Class 2 (mid-range devices), and Performance Class 3 (low-end devices).
    • -
    • Optimize your device performance by closing other apps, clearing cache and junk files, updating your software and drivers, and using a game booster app. You can also adjust your game settings to lower graphics quality, frame rate, sound effects, and other options.
    • -
    • Prevent your app from going into hibernation mode by using it frequently, adding it to your favorites list, or disabling the app hibernation feature in your device settings. You can also backup your game data and settings using a cloud service or an external storage.
    • -
    • Protect your privacy and security by reviewing and limiting how BGMI accesses your location, camera, microphone, and other sensitive data. You can also use a VPN app to hide your IP address and location from other players and servers. You can also avoid clicking on any suspicious links or ads that may appear in the game or on the website.
    • -
    -

    How to download and install BGMI APK on Android 12?

    -

    The steps to download the APK and OBB files from a trusted source

    -

    Now that you know why and how to download BGMI APK on your Android 12 device, let's see the actual steps to do it. The first thing you need to do is to download the APK and OBB files of BGMI from a trusted source. Here are the steps to follow:

    -
      -
    1. Open your browser and go to the official website of BGMI, which is https://www.battlegroundsmobileindia.com/. You can also use a third-party website that offers the APK and OBB files of BGMI, such as https://www.apkmirror.com/ or https://www.apkpure.com/. Make sure that the website is safe and reliable before downloading anything from it.
    2. -
    3. On the website, look for the download link or button for BGMI APK and OBB files. The APK file should have a name like "com.pubg.imobile.apk" and the OBB file should have a name like "main.15255.com.pubg.imobile.obb". The file size of the APK file should be around 70 MB and the file size of the OBB file should be around 700 MB.
    4. -
    5. Click on the download link or button and wait for the files to be downloaded on your device. You may need to allow your browser to download files from unknown sources if prompted. You may also see some ads or pop-ups on the website, so be careful not to click on them.
    6. -
    7. Once the files are downloaded, you can find them in your device's download folder or in the location that you specified. You can also check the notification bar or the download manager app to see the progress and status of your downloads.
    8. -
    -

    The steps to install the APK and OBB files on your device

    -

    The next thing you need to do is to install the APK and OBB files of BGMI on your device. Here are the steps to follow:

    -
      -
    1. Before installing the files, make sure that you have enough storage space on your device or external storage. You also need to enable the "Unknown sources" option in your device settings to allow installation of apps from sources other than the Play Store.
    2. -
    3. Locate the APK file that you downloaded and tap on it. You may see a warning message that says "This type of file can harm your device". Ignore it and tap on "Install" or "OK". You may also need to grant permission to BGMI to access your device storage, location, camera, microphone, and other data.
    4. -
    5. Wait for the installation process to complete. It may take a few minutes depending on your device speed and performance. Do not exit or interrupt the installation process until it is done.
    6. -
    7. After installing the APK file, do not launch the game yet. You need to copy or move the OBB file that you downloaded to a specific folder on your device storage. The folder should be named "Android/obb/com.pubg.imobile". If you don't have this folder, you can create it manually.
    8. -
    9. Locate the OBB file that you downloaded and tap on it. You may see a menu that gives you options to copy, move, rename, or delete the file. Choose "Copy" or "Move" and select the destination folder as "Android/obb/com.pubg.imobile". Wait for the copying or moving process to complete.
    10. -
    -

    The steps to launch and update the game

    -

    The final thing you need to do is to launch and update the game. Here are the steps to follow:

    -
      -
    1. Go to your device's app drawer or home screen and look for the BGMI icon. Tap on it to launch the game. You may see a splash screen that shows the game logo and some information.
    2. -
    3. The game will check for updates and download them if available. You need a stable internet connection for this process. The updates may include new features, bug fixes, security patches, and more. The update size may vary depending on your game version and device model.
    4. -
    5. After updating, you will see a login screen that asks you to choose a login method. You can use Facebook, Twitter, Google Play Games, or Guest as your login method. If you have an existing account from PUBG Mobile, you can use the same login method to access your account on BGMI. If you are a new player, you can create a new account with any login method.
    6. -
    7. After logging in, you will see a welcome screen that shows some tips and instructions on how to play the game. You can also watch a video tutorial that explains the basic gameplay and controls. You can skip this screen if you want.
    8. -
    9. Next, you will see a character creation screen that allows you to customize your character's appearance, name, gender, and voice. You can also choose a server to play on, such as India, Asia, Europe, or North America. You can change these settings later if you want.
    10. -
    11. Finally, you will see the main menu screen that gives you access to various options and features of the game. You can start playing the game by tapping on the "Start" button and choosing a mode and a map. You can also join or create a team with other players by tapping on the "Team" button. You can also explore other options such as inventory, shop, events, missions, settings, and more by tapping on the icons on the bottom of the screen.
    12. -
    -

    How to play BGMI on Android 12?

    -

    The basic gameplay tips and tricks for beginners

    -

    Now that you have downloaded, installed, and launched BGMI on your Android 12 device, you are ready to play and have fun. But how do you play BGMI effectively and enjoyably? Here are some basic gameplay tips and tricks for beginners:

    -
      -
    • The goal of the game is to survive until you are the last one standing. You can do this by finding weapons, armor, items, vehicles, and other resources on the map. You can also kill or avoid other players who are trying to do the same.
    • -
    • The game starts with you parachuting from an airplane onto a map of your choice. You can choose where to land by looking at the map and tapping on the screen. You can also follow or invite other players to land with you by using the markers and voice chat.
    • -
    • After landing, you need to loot as fast as possible. Look for buildings, crates, vehicles, and other places that may contain loot. Loot includes guns, ammo, attachments, grenades, medkits, bandages, energy drinks, painkillers, helmets, vests, backpacks, scopes, and more. Loot varies in rarity and quality, so look for the best ones you can find.
    • -
    • You also need to be aware of the safe zone and the blue zone. The safe zone is a white circle on the map that indicates where you need to be to avoid taking damage. The blue zone is a blue circle that shrinks over time and damages anyone who is outside of it. The safe zone and the blue zone force players to move closer to each other and create more action and tension.
    • -
    • You also need to be aware of the red zone and the air drop. The red zone is a red circle on the map that indicates where bombs will be dropped randomly. The air drop is a plane that flies over the map and drops a crate that contains rare and powerful loot. Both the red zone and the air drop create more risk and reward for players who want to challenge themselves.
    • -
    • You also need to be aware of your health and inventory. Your health is shown by a green bar on the top left corner of the screen. You can heal yourself by using medkits, bandages, energy drinks, painkillers, or other items. Your inventory is shown by an icon on the bottom right corner of the screen. You can access your inventory by tapping on it and manage your items by dragging them, dropping them, or using them. You can also switch between your weapons, attachments, and grenades by tapping on them.
    • -
    -

    The advanced gameplay strategies and tactics for experts

    -

    If you are already familiar with the basic gameplay of BGMI, you may want to learn some advanced gameplay strategies and tactics to improve your skills and performance. Here are some tips for experts:

    -
      -
    • Choose your landing spot wisely. Depending on your play style and preference, you may want to land in a hot spot, a cold spot, or somewhere in between. A hot spot is a place where many players land and fight for loot, such as military base, school, or pochinki. A cold spot is a place where few players land and loot peacefully, such as farm, shelter, or primorsk. A hot spot offers more action and loot, but also more risk and competition. A cold spot offers more safety and time, but also less loot and excitement.
    • -
    • Use the right weapon for the right situation. Depending on the range, terrain, and enemy, you may want to use different weapons to maximize your damage and accuracy. For example, you may want to use a sniper rifle for long-range shots, an assault rifle for medium-range shots, a submachine gun for close-range shots, a shotgun for very close-range shots, or a pistol for backup shots. You may also want to use different attachments to enhance your weapons, such as suppressors, compensators, flash hiders, extended mags, quickdraw mags, scopes, red dots, holographic sights, or lasers.
    • -
    • Use the right vehicle for the right situation. Depending on the map, zone, and enemy, you may want to use different vehicles to move faster and safer. For example, you may want to use a car for road trips, a bike for off-road trips, a boat for water trips, or a glider for air trips. You may also want to use different vehicles to attack or escape from enemies, such as ramming them with a car, shooting them from a bike, throwing grenades from a boat, or dropping bombs from a glider.
    • -
    • Use the right strategy for the right situation. Depending on your goal, position, and enemy, you may want to use different strategies to survive and win. For example, you may want to use an aggressive strategy if you want to kill more enemies and get more loot, a passive strategy if you want to avoid more enemies and save more resources, or a balanced strategy if you want to mix both. You may also want to use different tactics to gain an advantage over your enemies, such as flanking them, ambushing them, sniping them, rushing them, or baiting them.
    • -
    -

    The best settings and controls for optimal performance

    -

    Another way to improve your gameplay and experience on BGMI is to adjust your settings and controls according to your device and preference. Here are some tips to optimize your settings and controls:

    -
      -
    • Adjust your graphics settings to match your device's capabilities and performance. You can choose from smooth, balanced, HD, HDR, or ultra HD graphics quality, and from low, medium, high, ultra, or extreme frame rate. You can also enable or disable anti-aliasing, shadows, brightness, and auto-adjust graphics. The higher the graphics quality and frame rate, the better the visuals and smoothness, but also the more battery and data consumption.
    • -
    • Adjust your sound settings to enhance your hearing and communication. You can choose from low, medium, or high sound quality, and from low or high sound effects. You can also enable or disable voice chat, microphone, speaker, team voice chat channel, and quick chat. The higher the sound quality and effects, the better the audio and immersion, but also the more battery and data consumption.
    • -
    • Adjust your control settings to suit your play style and preference. You can choose from three preset control layouts: thumb (two fingers), claw (four fingers), or custom (any number of fingers). You can also customize the size, position, opacity, and function of each button on the screen. You can also enable or disable gyroscope, peek and fire, peek and open scope, block sight warning, bolt action rifle and crossbow firing mode, shotgun firing mode, and auto-open doors.
    • -
    -

    Conclusion

    -

    BGMI is a thrilling and addictive game that you can play on your Android 12 device with ease. All you need to do is to download the APK and OBB files of BGMI from a trusted source, install them on your device with the right permissions and settings, launch and update the game with a stable internet connection, and enjoy the game with the best graphics, sound, and control settings. You can also learn some tips and tricks to play the game better and smarter. BGMI is not only a game, but also a social platform where you can meet and interact with other players, join clans, participate in events, watch live streams, and more. BGMI is a game that you can enjoy for hours and hours without getting bored or tired. So, what are you waiting for? Download BGMI APK on your Android 12 device today and join the battlegrounds!

    FAQs

    -

    Here are some frequently asked questions and answers about BGMI APK download on Android 12:

    -

    Q: Is BGMI APK safe to download and install?

    -

    A: Yes, BGMI APK is safe to download and install as long as you get it from a trusted source, such as the official website of BGMI or a reputable third-party website. You should also scan the APK file with an antivirus app before installing it. However, you should be careful not to download any fake or malicious APK files that may harm your device or steal your data.

    -

    Q: Is BGMI APK legal to download and install?

    -

    A: Yes, BGMI APK is legal to download and install as long as you do not violate any terms and conditions of the game or the developer. You should also respect the intellectual property rights of the game and the developer and not use any hacks, mods, cheats, or pirated versions of the game.

    -

    Q: Is BGMI APK compatible with Android 12?

    -

    A: Yes, BGMI APK is compatible with Android 12 as long as you follow the steps and tips mentioned in this article. You may need to enable some permissions and settings on your device to install and run the game smoothly. You may also need to update the game regularly to access the latest features and fixes.

    -

    Q: How can I update BGMI APK on Android 12?

    -

    A: You can update BGMI APK on Android 12 by launching the game and downloading the updates from the game itself. You may need a stable internet connection for this process. Alternatively, you can also download the latest version of BGMI APK from a trusted source and install it over the existing version on your device.

    -

    Q: How can I uninstall BGMI APK on Android 12?

    -

    A: You can uninstall BGMI APK on Android 12 by going to your device settings and tapping on "Apps" or "Applications". Then, look for BGMI and tap on it. Then, tap on "Uninstall" or "Remove" and confirm your action. You may also need to delete the OBB file and other data related to the game from your device storage.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Liberty General Insurance Get Instant Quotes and Policy Download.md b/spaces/congsaPfin/Manga-OCR/logs/Liberty General Insurance Get Instant Quotes and Policy Download.md deleted file mode 100644 index 54955a81d83dc0ff337839f2863117c994f0d390..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Liberty General Insurance Get Instant Quotes and Policy Download.md +++ /dev/null @@ -1,107 +0,0 @@ - -

    How to Download Liberty General Insurance

    -

    Liberty General Insurance is one of the most trusted general insurance companies in India that offers a wide range of insurance products for your car, two-wheeler, health, travel, property, and business. Whether you want to protect yourself from unforeseen risks, liabilities, or expenses, Liberty General Insurance has a solution for you.

    -

    download liberty general insurance


    Downloadhttps://urlca.com/2uOcQM



    -

    But how do you download your Liberty General Insurance policy online? In this article, we will guide you through the simple steps to download your policy document from the comfort of your home or office. But first, let us look at some of the benefits of choosing Liberty General Insurance.

    -

    Benefits of Liberty General Insurance

    -

    Liberty General Insurance has been providing quality insurance services to its customers since 2013. It is a joint venture among US property casualty insurer Liberty Mutual Insurance Group, Indian private investment fund Enam Securities, and Indian industrial conglomerate DP Jindal Group.

    -

    Some of the benefits of Liberty General Insurance are:

    -
      -
    • Comprehensive coverage: Liberty General Insurance covers various risks and liabilities that you may face in your personal or professional life. Whether it is damage to your vehicle, medical expenses, loss of baggage, fire or theft at your property, or legal liability to third parties, Liberty General Insurance has a plan for you.
    • -
    • Cashless claim service: Liberty General Insurance has a network of over 5,800 partner hospitals and over 3,100 partner garages across India where you can avail cashless claim service. This means that you don't have to pay anything upfront for your treatment or repair. Liberty General Insurance will settle the bill directly with the service provider.
    • -
    • Affordable premiums and discounts: Liberty General Insurance offers competitive premiums and discounts for its customers. You can save money by choosing a higher voluntary deductible, installing anti-theft devices in your vehicle, opting for a long-term policy, or renewing your policy online. You can also get discounts for being a member of certain associations or clubs.
    • -
    • Online policy renewal and purchase: Liberty General Insurance allows you to renew or buy your policy online in a few clicks. You can also download your policy document online without any hassle. You can pay online using various modes such as credit card, debit card, net banking, UPI, or wallets.
    • -
    -

    Types of Liberty General Insurance Products

    -

    Liberty General Insurance offers a variety of insurance products to suit your needs. Here are some of the main types of products that you can choose from:

    -

    download liberty general insurance app
    -download liberty general insurance policy document
    -download liberty general insurance claim form
    -download liberty general insurance health card
    -download liberty general insurance brochure
    -download liberty general insurance car insurance
    -download liberty general insurance two wheeler insurance
    -download liberty general insurance health insurance
    -download liberty general insurance travel insurance
    -download liberty general insurance home insurance
    -download liberty general insurance commercial vehicle insurance
    -download liberty general insurance fire and engineering insurance
    -download liberty general insurance marine cargo insurance
    -download liberty general insurance liability insurance
    -download liberty general insurance group personal accident insurance
    -download liberty general insurance group health connect policy
    -download liberty general insurance health connect supra policy
    -download liberty general insurance secure health connect policy
    -download liberty general insurance individual personal accident policy
    -download liberty general insurance individual health connect policy
    -download liberty general insurance individual health connect supra policy
    -download liberty general insurance individual secure health connect policy
    -download liberty general insurance student travel care policy
    -download liberty general insurance senior citizen travel care policy
    -download liberty general insurance frequent traveller care policy
    -download liberty general insurance asia travel care policy
    -download liberty general insurance worldwide travel care policy
    -download liberty general insurance schengen travel care policy
    -download liberty general insurance home protect policy
    -download liberty general insurance shop package policy
    -download liberty general insurance office package policy
    -download liberty general insurance hotel package policy
    -download liberty general insurance industrial all risk policy
    -download liberty general insurance machinery breakdown policy
    -download liberty general insurance boiler and pressure plant policy
    -download liberty general insurance electronic equipment policy
    -download liberty general insurance contractor's all risk policy
    -download liberty general insurance erection all risk policy
    -download liberty general insurance contractor's plant and machinery policy
    -download liberty general insurance marine hull and machinery policy
    -download liberty general insurance public liability non industrial policy
    -download liberty general insurance public liability industrial policy
    -download liberty general insurance product liability policy
    -download liberty general insurance directors and officers liability policy
    -download liberty general insurance professional indemnity policy for doctors and medical practitioners
    -download liberty general insurance professional indemnity policy for architects and engineers

    -

    Motor insurance

    -

    Motor insurance covers your car, two-wheeler, or commercial vehicle against damage or loss due to accidents, fire, theft, natural calamities, or malicious acts. It also covers your legal liability to third parties for bodily injury or property damage.Liberty General Insurance offers two types of motor insurance policies: third party liability only and comprehensive. The third party liability only policy covers your legal liability to third parties for bodily injury or property damage. The comprehensive policy covers both third party liability and own damage to your vehicle. You can also opt for various add-on covers such as zero depreciation, roadside assistance, engine protect, return to invoice, and more.

    -

    Health insurance

    -

    Health insurance covers your medical expenses in case of hospitalization due to illness or injury. It also covers pre and post hospitalization expenses, day care procedures, domiciliary treatment, ambulance charges, and more. Liberty General Insurance offers individual, family, and group health insurance plans with various features and benefits. You can also opt for critical illness cover, personal accident cover, or top-up cover to enhance your protection.

    -

    Travel insurance

    -

    Travel insurance covers your travel risks such as loss of baggage, passport, or tickets, flight delay or cancellation, medical emergency, personal liability, or legal expenses. Liberty General Insurance offers domestic and international travel insurance plans for individuals, families, students, and senior citizens. You can choose from single trip or multi-trip plans with various coverage options and durations.

    -

    Property insurance

    -

    Property insurance covers your home, office, or shop against fire, burglary, earthquake, flood, storm, or other perils. It also covers your contents such as furniture, appliances, electronics, jewelry, or cash. Liberty General Insurance offers property insurance plans for individuals and businesses with various sum insured options and extensions.

    -

    Commercial and industrial insurance

    -

    Commercial and industrial insurance covers your business assets, liabilities, and employees against various risks such as fire, theft, machinery breakdown, business interruption, public liability, employer's liability, workmen's compensation, marine cargo, or engineering projects. Liberty General Insurance offers customized solutions for different industries such as manufacturing, construction, hospitality, education, IT, or retail.

    -

    How to Download Liberty General Insurance Policy Online

    -

    If you have bought or renewed your Liberty General Insurance policy online, you can easily download your policy document from the official website of the company. Here are the steps to follow:

    -

    Step 1: Visit the official website of Liberty General Insurance

    -

    Go to https://www.libertyinsurance.in/ and click on the "Customer Support" tab on the top right corner of the homepage. Then click on the "Download Policy" option from the drop-down menu.

    -

    Step 2: Choose the type of insurance product you want to download

    -

    You will see a list of insurance products such as motor insurance, health insurance, travel insurance, property insurance, and commercial and industrial insurance. Click on the product that you have purchased or renewed online.

    -

    Step 3: Enter your policy number and other details

    -

    You will be redirected to a page where you have to enter your policy number and other details such as email address or mobile number. Enter the required information and click on the "Submit" button.

    -

    Step 4: Verify your identity and download your policy document

    -

    You will receive an OTP (one-time password) on your registered email address or mobile number. Enter the OTP and click on the "Verify" button. You will see your policy details on the screen. Click on the "Download" button to download your policy document in PDF format.

    -

    How to Contact Liberty General Insurance Customer Care

    -

    If you have any queries or complaints regarding your Liberty General Insurance policy or claim, you can contact the customer care team of the company through various channels. Here are some of the ways to reach out to them:

    -

    Toll-free number

    -

    You can call the toll-free number 1800-266-5844 from Monday to Saturday between 8 am to 8 pm. You can also request a callback by filling a form on the website.

    -

    Email address

    -

    You can send an email to care@libertyinsurance.in with your query or complaint. You can also attach any relevant documents or screenshots if required.

    -

    Branch locator

    -

    You can visit the nearest branch of Liberty General Insurance by using the branch locator tool on the website. You can search by state, city, or pin code and get the address, phone number, and email id of the branch.

    -

    Conclusion

    -

    Liberty General Insurance is a reliable and reputed general insurance company that offers a range of products for your personal and professional needs. You can buy or renew your policy online and download it in a few minutes from the comfort of your home or office. You can also contact the customer care team of the company for any assistance or feedback. Liberty General Insurance is committed to providing you with the best service and satisfaction.

    -

    We hope this article has helped you understand how to download Liberty General Insurance policy online. If you have any questions, feel free to ask us in the comments section below. We would love to hear from you.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Liberty General Insurance:

    -

    Q: How can I check the status of my Liberty General Insurance claim?

    -

    A: You can check the status of your claim online by visiting the website and clicking on the "Track Claim" option under the "Customer Support" tab. You can also call the toll-free number 1800-266-5844 or email to care@libertyinsurance.in with your claim number and policy number.

    -

    Q: How can I cancel my Liberty General Insurance policy?

    -

    A: You can cancel your policy within 15 days of receiving the policy document by sending a written request to the nearest branch or email to care@libertyinsurance.in. You will get a refund of the premium after deducting the proportionate risk premium and stamp duty charges.

    -

    Q: How can I change my personal details in my Liberty General Insurance policy?

    -

    A: You can change your personal details such as name, address, phone number, email id, or nominee by visiting the nearest branch or email to care@libertyinsurance.in with your policy number and proof of identity.

    -

    Q: How can I renew my Liberty General Insurance policy online?

    -

    A: You can renew your policy online by visiting the website and clicking on the "Renew Policy" option under the "Customer Support" tab. You can enter your policy number and other details and pay online using various modes such as credit card, debit card, net banking, UPI, or wallets.

    -

    Q: How can I get a duplicate copy of my Liberty General Insurance policy document?

    -

    A: You can get a duplicate copy of your policy document by visiting the nearest branch or email to care@libertyinsurance.in with your policy number and proof of identity. You can also download your policy document online by following the steps mentioned above.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Macromedia Flash 8 Download the Latest Version and Start Creating Today.md b/spaces/congsaPfin/Manga-OCR/logs/Macromedia Flash 8 Download the Latest Version and Start Creating Today.md deleted file mode 100644 index 97aa7a0f9e3a7b54ddf4d607c5be7ae08bbd0eaf..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Macromedia Flash 8 Download the Latest Version and Start Creating Today.md +++ /dev/null @@ -1,117 +0,0 @@ - -

    How to Download Macromedia Flash 8 in 2023

    -

    If you are looking for a powerful software to create rich interactive content, you might be interested in Macromedia Flash 8. This is a professional web design tool that was released in 2005 by Macromedia, before it was acquired by Adobe. Macromedia Flash 8 allows you to create stunning graphics, animations, videos, audio, and interactive applications that can run on various platforms and devices. In this article, we will show you how to download Macromedia Flash 8 in 2023, what features it offers, and what alternatives you have.

    -

    macromedia flash 8 download


    Download Ziphttps://urlca.com/2uO8Ml



    -

    Features of Macromedia Flash 8

    -

    Macromedia Flash 8 is a versatile software that offers many features for creating engaging web content. Some of the features are:

    -
      -
    • Graphic effects: You can apply filters, blend modes, gradients, masks, and transformations to your graphics to enhance their appearance and realism.
    • -
    • Animation: You can use motion tweens, shape tweens, guides, onion skinning, and frame-by-frame animation to create smooth and dynamic animations.
    • -
    • Video: You can import, edit, encode, and stream video files in various formats, such as FLV, MPEG, AVI, and MOV. You can also add cue points, subtitles, captions, and interactivity to your videos.
    • -
    • Audio: You can import, edit, synchronize, and stream audio files in various formats, such as MP3, WAV, and AIFF. You can also add sound effects, volume control, panning, and interactivity to your audio.
    • -
    • Interactivity: You can use ActionScript 2.0, a scripting language based on ECMAScript (similar to JavaScript), to add logic, control, data manipulation, and communication to your content. You can also use components, such as buttons, menus, sliders, and text fields, to create user interfaces.
    • -
    • FlashType font-rendering engine: This is a new feature in Macromedia Flash 8 that improves the quality and readability of text. It supports anti-aliasing, kerning, ligatures, and advanced character sets.
    • -
    • Custom easing tool: This is another new feature in Macromedia Flash 8 that allows you to create custom easing curves for your animations. You can adjust the speed and acceleration of your motion tweens with precision and ease.
    • -
    • Flash Player 8 and Flash Lite 2 compatibility: Macromedia Flash 8 is compatible with the latest versions of Flash Player and Flash Lite at the time of its release. Flash Player 8 is a web browser plug-in that enables users to view Flash content on desktops and laptops. Flash Lite 2 is a mobile runtime environment that enables users to view Flash content on smartphones and tablets.
    • -
    -

    How to download Macromedia Flash 8

    -

    If you want to download Macromedia Flash 8 in 2023, you need to consider some requirements and precautions first. Here are some tips:

    -
      -
    • Requirements: To run Macromedia Flash 8 on your computer, you need to have Windows XP or later or Mac OS X v10.3 or later. You also need to have at least a Pentium III or equivalent processor (Windows) or a PowerPC G4 or G5 processor (Mac), at least 256 MB of RAM (512 MB recommended), at least 500 MB of available disk space (1 GB recommended), a CD-ROM drive, a 1024 x 768 monitor resolution, and a 16-bit video card.
    • -
    • Precautions: Macromedia Flash 8 is an old software that is no longer supported or updated by Adobe. It may not work properly on newer operating systems or browsers. It may also have security vulnerabilities or compatibility issues with other software. You should use it at your own risk and discretion.
    • -
    -

    If you still want to download Macromedia Flash 8, you can follow these steps:

    -

    macromedia flash 8 professional free download
    -macromedia flash 8 for windows 10
    -macromedia flash 8 animation software
    -macromedia flash 8 full version
    -macromedia flash 8 tutorial pdf
    -macromedia flash 8 portable
    -macromedia flash 8 serial number
    -macromedia flash 8 crack
    -macromedia flash 8 online
    -macromedia flash 8 installer
    -macromedia flash 8 keygen
    -macromedia flash 8 trial
    -macromedia flash 8 setup
    -macromedia flash 8 license key
    -macromedia flash 8 video encoder
    -macromedia flash 8 projects
    -macromedia flash 8 games
    -macromedia flash 8 examples
    -macromedia flash 8 tools
    -macromedia flash 8 features
    -macromedia flash 8 system requirements
    -macromedia flash 8 alternatives
    -macromedia flash 8 update
    -macromedia flash 8 basics
    -macromedia flash 8 actionscript
    -macromedia flash 8 filters and blend modes
    -macromedia flash 8 components
    -macromedia flash 8 templates
    -macromedia flash 8 extensions
    -macromedia flash 8 documentation
    -macromedia flash 8 release notes[^2^]
    -macromedia flash 8 archive[^3^]
    -macromedia flash 8 soft32[^1^]
    -macromedia flash 8 filehippo
    -macromedia flash 8 cnet
    -macromedia flash 8 oldversion.com
    -macromedia flash 8 softonic.com
    -macromedia flash 8 uptodown.com
    -macromedia flash 8 en.softonic.com
    -macromedia flash 8 brothersoft.com
    -macromedia flash 8 softpedia.com
    -macromedia flash 8 malavida.com/en/soft/macromedia-flash/
    -macromedia flash 8 filehorse.com/download-macromedia-flash/
    -macromedia flash 8 techspot.com/downloads/112-macromedias-flash-player.html
    -macromedia flash 8 download.cnet.com/Macromedias-Fireworks/3000-2191_4-

    -
      -
    1. Sources and links: There are several sources where you can download Macromedia Flash 8, such as oldversion.com, softonic.com, filehippo.com, and cnet.com. However, these sources are not official or authorized by Adobe, and they may contain malware or viruses. You should scan the downloaded files with an antivirus software before opening them. Alternatively, you can try to find a legitimate copy of Macromedia Flash 8 on eBay or Amazon, but they may be expensive or rare.
    2. -
    3. Installation and activation: Once you have downloaded the Macromedia Flash 8 installer file, you can run it and follow the instructions on the screen. You will need to enter a serial number to activate the software. You can find some serial numbers online, but they may not work or be illegal. You can also try to use a keygen or a crack program to generate a serial number, but they may also contain malware or viruses. You should use them at your own risk and discretion.
    4. -
    -

    Alternatives to Macromedia Flash 8

    -

    Macromedia Flash 8 is not the only software that can create and view Flash content. There are some alternatives that you can consider in 2023. Here are some pros and cons of using Flash in 2023 and some other software options:

    -

    Cheat Device/SoftwareFeaturesProsCons
    Code Breaker- A cheat device that allows you to modify or enhance your PS2 games by using codes or cheats.
    - Has a large database of codes for over 1500 PS2 games.
    - Has a code generator feature that allows you to create your own custom codes.
    - Has a code saver feature that allows you to save your codes on your memory card.
    - Has a user-friendly and intuitive interface.
    - Has a fast and easy installation and operation process.
    - Has a high compatibility rate with most PS2 games and consoles.
    - Some codes might not work properly or cause glitches in some games.
    - Some codes might require additional hardware or software to activate.
    - Some codes might be region-locked or incompatible with certain consoles.
    Action Replay Max- A cheat device that allows you to modify or enhance your PS2 games by using codes or cheats.
    - Has a large database of codes for over 2000 PS2 games.
    - Has a code generator feature that allows you to create your own custom codes.
    - Has a code saver feature that allows you to save your codes on your memory card.
    - Has an online mode that allows you to download new codes from the internet.
    - - - - - - - - - - - - - - - - -
    Pros of using Flash in 2023Cons of using Flash in 2023
    - Flash content is still widely used on the web, especially for games, animations, and educational content.- Flash content is not supported by most mobile devices and browsers, such as iOS, Android, Chrome, Firefox, and Edge.
    - Flash content can be converted to other formats, such as HTML5, MP4, or SWF, using tools like Adobe Animate or Swiffy.- Flash content may have poor performance, high CPU usage, security risks, or accessibility issues.
    - Flash content can be viewed using alternative players, such as Ruffle or Lightspark.- Flash content may not display correctly or fully using alternative players.
    -

    Some other software that can create and view Flash content are:

    - -

    Conclusion

    -

    In conclusion, Macromedia Flash 8 is a powerful software that can create rich interactive content for the web. However, it is an old software that is no longer supported or updated by Adobe. It may not work properly on newer operating systems or browsers. It may also have security vulnerabilities or compatibility issues with other software. If you want to download Macromedia Flash 8 in 2023, you need to consider some requirements and precautions first. You also have some alternatives that you can consider for creating and viewing Flash content in 2023. We hope this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below.

    -

    FAQs

    -

    Here are some frequently asked questions about Macromedia Flash 8 and Flash content in 2023:

    -
      -
    1. Is Macromedia Flash 8 free?
    2. -

      No, Macromedia Flash 8 is not free. It is a commercial software that requires a license and a serial number to activate. However, you may be able to find some unofficial sources where you can download it for free, but they may not be safe or legal.

      -
    3. Is Macromedia Flash 8 the same as Adobe Flash?
    4. -

      No, Macromedia Flash 8 is not the same as Adobe Flash. Macromedia Flash 8 is the name of the software that was released in 2005 by Macromedia, before it was acquired by Adobe. Adobe Flash is the name of the software that was released after 2005 by Adobe, which includes Adobe Flash CS3, CS4, CS5, CS6, CC, and Animate.

      -
    5. What is the difference between SWF and FLA files?
    6. -

      SWF and FLA files are two different types of files that are related to Flash content. SWF files are the final output files that can be viewed by users on various platforms and devices. FLA files are the source files that can be edited by developers using Macromedia Flash 8 or Adobe Animate. SWF files can be converted to FLA files using tools like Sothink SWF Decompiler or JPEXS Free Flash Decompiler.

      -
    7. How can I view SWF files on my browser?
    8. -

      To view SWF files on your browser, you need to have Adobe Flash Player installed and enabled on your browser. However, Adobe Flash Player is no longer supported or updated by Adobe since December 31, 2020. It may not work properly on newer operating systems or browsers. Alternatively, you can use alternative players like Ruffle or Lightspark to view SWF files on your browser without requiring any plug-ins or downloads.

      -
    9. How can I create HTML5 content using Macromedia Flash 8?
    10. -

      To create HTML5 content using Macromedia Flash 8, you need to use tools like Adobe Animate or Swiffy to convert your FLA or SWF files to HTML5 files. Adobe Animate is a software that can create HTML5, WebGL, AIR, and SWF content using vector graphics, bitmap graphics, audio, video, text, and ActionScript 3.0. Swiffy is a web service that can convert SWF files to HTML5 files using JavaScript and SVG.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Ultimate Resource for Downloading Love Nwantiti Lyrics and Listening to CKay.md b/spaces/congsaPfin/Manga-OCR/logs/The Ultimate Resource for Downloading Love Nwantiti Lyrics and Listening to CKay.md deleted file mode 100644 index 00d4550a0141ad51fd8a8a37849f6f7afd8d6e88..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Ultimate Resource for Downloading Love Nwantiti Lyrics and Listening to CKay.md +++ /dev/null @@ -1,104 +0,0 @@ -
    -

    How to Download Love Nwantiti Lyrics by CKay

    -

    If you are a fan of Afrobeats music, you have probably heard of the song Love Nwantiti by Nigerian singer and producer CKay. The song, which was released in 2019 as part of his EP CKay the First, became a global hit in 2021 thanks to its catchy melody, romantic lyrics, and viral TikTok challenges. The song has been remixed by several artists, including Joeboy, Kuami Eugene, ElGrande Toto, and De La Ghetto, and has charted in over 160 countries.

    -

    But do you know what the song is actually about? And do you know how to download the lyrics of the song so you can sing along and enjoy it more? In this article, we will answer these questions and show you how to download Love Nwantiti lyrics by CKay in a few easy steps.

    -

    download love nwantiti lyrics


    Downloadhttps://urlca.com/2uO6WE



    -

    What is Love Nwantiti About?

    -

    The Meaning of Love Nwantiti

    -

    Love Nwantiti is a love song with lyrics that are a mix of English and Igbo, a language spoken in Nigeria. The song title means "small love" or "little love" in Igbo, but this does not mean that CKay's love for his girl is small. On the contrary, he expresses his intense passion and obsession for her throughout the song. He compares her to oxygen, valentine, fantasy, and nkwobi (a spicy Nigerian dish made from cow foot). He also promises to spend pounds and dollars on her, and to be her loyal lover forever.

    -

    The Lyrics of Love Nwantiti

    -

    The lyrics of Love Nwantiti are simple but catchy, and they use a lot of repetition and rhyme to create a memorable melody. Here are some of the most popular lines from the song:

    - -

    If you want to see the full lyrics of Love Nwantiti by CKay, you can find them on various websites such as Genius, Billboard, or Songfacts. However, if you want to download them as a file that you can save on your device or print out, you will need to follow some steps that we will explain in the next section.

    -

    How to Download Love Nwantiti Lyrics

    -

    The Benefits of Downloading Lyrics

    -

    Downloading lyrics can have many benefits for music lovers. Some of them are - You can learn the meaning and pronunciation of the words in the song, especially if they are in a different language or dialect. - You can sing along to the song and improve your vocal skills and confidence. - You can memorize the lyrics and impress your friends or your crush with your knowledge of the song. - You can analyze the lyrics and discover the hidden messages, themes, or references in the song. - You can enjoy the song more and connect with the artist and their emotions.

    -

    How to download love nwantiti lyrics
    -Download love nwantiti lyrics by CKay
    -Download love nwantiti lyrics PDF
    -Download love nwantiti lyrics video
    -Download love nwantiti lyrics TikTok version
    -Download love nwantiti lyrics and chords
    -Download love nwantiti lyrics translation
    -Download love nwantiti lyrics remix
    -Download love nwantiti lyrics instrumental
    -Download love nwantiti lyrics genius
    -Download love nwantiti lyrics meaning
    -Download love nwantiti lyrics audio
    -Download love nwantiti lyrics mp3
    -Download love nwantiti lyrics song
    -Download love nwantiti lyrics free
    -Download love nwantiti lyrics online
    -Download love nwantiti lyrics printable
    -Download love nwantiti lyrics karaoke
    -Download love nwantiti lyrics acoustic
    -Download love nwantiti lyrics with English subtitles
    -Download love nwantiti lyrics in Spanish
    -Download love nwantiti lyrics in French
    -Download love nwantiti lyrics in Hindi
    -Download love nwantiti lyrics in Arabic
    -Download love nwantiti lyrics in Swahili
    -Download love nwantiti lyrics in Portuguese
    -Download love nwantiti lyrics in German
    -Download love nwantiti lyrics in Italian
    -Download love nwantiti lyrics in Chinese
    -Download love nwantiti lyrics in Japanese
    -Download love nwantiti lyrics in Korean
    -Download love nwantiti lyrics in Russian
    -Download love nwantiti lyrics in Turkish
    -Download love nwantiti lyrics in Indonesian
    -Download love nwantiti lyrics in Malayalam
    -Download love nwantiti lyrics in Tamil
    -Download love nwantiti lyrics in Telugu
    -Download love nwantiti lyrics in Urdu
    -Download love nwantiti lyrics in Bengali
    -Download love nwantiti lyrics in Punjabi
    -Best site to download love nwantiti lyrics
    -Where can I download love nwantiti lyrics?
    -How to download love nwantiti lyrics on iPhone?
    -How to download love nwantiti lyrics on Android?
    -How to download love nwantiti lyrics on PC?
    -How to download love nwantiti lyrics on Mac?
    -How to download love nwantiti lyrics on iPad?
    -How to download love nwantiti lyrics on Kindle?
    -How to download love nwantiti lyrics on Spotify?
    -How to download love nwantiti lyrics on YouTube?

    -

    The Best Websites to Download Lyrics

    -

    There are many websites that offer lyrics for free, but not all of them allow you to download them as a file. Some of the best websites that do are:

    - -

    These are just some of the websites that you can use to download Love Nwantiti lyrics by CKay. However, you can also use other websites that have similar functions, or you can use online tools that can convert any webpage into a PDF file.

    -

    The Steps to Download Lyrics

    -

    To download Love Nwantiti lyrics by CKay, you can follow these general steps:

    -
      -
    1. Go to one of the websites that offer lyrics and search for Love Nwantiti by CKay.
    2. -
    3. Open the page that contains the lyrics and check if they are accurate and complete.
    4. -
    5. Click on the button or link that allows you to download or print the lyrics.
    6. -
    7. Choose the format and location where you want to save the file.
    8. -
    9. Enjoy your downloaded lyrics and sing along to Love Nwantiti by CKay.
    10. -
    -

    These steps may vary slightly depending on the website or tool that you use, but they are generally easy and quick to follow. You can also repeat these steps for any other song that you want to download lyrics for.

    -

    How to Enjoy Love Nwantiti More

    -

    The Remixes of Love Nwantiti

    -

    One way to enjoy Love Nwantiti more is to listen to its remixes by different artists. CKay has collaborated with several artists from Africa, Europe, and Latin America to create different versions of his hit song. Some of them are:

    - -

    You can find these remixes on YouTube, Spotify, Apple Music, or any other streaming platform. You can also download them using the same steps as downloading lyrics.

    -

    The Music Videos of Love Nwantiti

    -

    Another way to enjoy Love Nwantiti more is to watch its music videos. CKay has released several music videos for his song and its remixes, featuring himself and his collaborators in various locations and Love Nwantiti by CKay?: Love Nwantiti by CKay is one of the most popular songs of 2021. It has over 100 million streams on Spotify, over 50 million views on YouTube, and over 10 million TikTok videos. It has also been featured on several playlists, charts, and radio stations around the world. -

  • Where can I download Love Nwantiti by CKay?: You can download Love Nwantiti by CKay from various platforms such as iTunes, Amazon Music, Google Play Music, or Deezer. You can also stream it on Spotify, Apple Music, YouTube Music, or SoundCloud.
  • - -

    I hope you found this article helpful and informative. If you have any questions or feedback, please leave a comment below. Thank you for reading and have a great day!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Deep Ze Full Crack Cho Win Xp Why You Need This Powerful Tool to Keep Your PC Running Smoothly.md b/spaces/contluForse/HuggingGPT/assets/Deep Ze Full Crack Cho Win Xp Why You Need This Powerful Tool to Keep Your PC Running Smoothly.md deleted file mode 100644 index 95ad3de081e8b4891081621544a0855a696664d3..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Deep Ze Full Crack Cho Win Xp Why You Need This Powerful Tool to Keep Your PC Running Smoothly.md +++ /dev/null @@ -1,19 +0,0 @@ - -

    All of the Witcher mods live on NexusMods, and Vortex (formerly the Nexus Mods Manager) is the default way to download and install mods from there. If you're already using it to mod another game, like Skyrim for instance, you may as well use it for The Witcher 3 as well. Note that The Witcher 3 isn't as mod-friendly as Skyrim, however, and you'll often need to resort to a manual install. Read each mod's description and carefully follow its instructions.

    -

    Deep Ze Full Crack Cho Win Xp


    Download ✦✦✦ https://ssurll.com/2uzy1I



    -

    It's annoying to cart a load of loot into town and find a vendor so poor he can't afford to buy it all. This mod gives vendors some deeper pockets, and gently encourages them to pay you more for your goods.

    -

    The Northern Kingdoms, especially Velen and Novigrad, are filthy places full of muck, but you can't do any decent photography with a dirty lens! Wipe them on your pants to get them clean, just like the pros do. No Dirty Lenses removes the old water spots and dirt effects from the in-game camera, but you'll still get water on the screen from splashing through a river or looking up at the rain.

    -

    As a registered user you are entitled to free data recovery software updates (up to the release of the next major version) and data recovery support. If you are looking for a Recover My Files crack, torrent, serial, portable or keygen, then please use this link.

    -

    Deep learning models [16,17,18,19,20] are an effective way of classifying and quantifying fracture characteristics [11,12,13,14,15,16,17,18,19,20,21,22,23,24]. The works presented in the literature show that the machine learning models based on convolutional neural networks (CNN) are very suitable for the detection and classification of microstructures [25,26,27,28,29,30,31]. Konovalenko et al. proposed a model to detect the edges of dimples. In addition, the authors used a CNN to estimate the size and diameter of the dimples. However, the proposed model focused on images that only contained dimples. The images of hybrid microstructures (i.e., a mixture of dimples and cleavage) were not of concern [11,32]. Recently, Sinha et al. used UNet to perform the semantic segmentation of dimples on a metallic surface. This model can well segment the clearly visible deep dimples. However, this model is inappropriate to segment the overall dimple morphology of fracture [33].

    -

    Typically, box springs come fully assembled, so you would just place it where you want it. Some, like a smart box spring, that go above and beyond a traditional box spring may have a few parts to put together, but it should be easy to assemble with instructions. These are usually metal, with a fabric covering that acts as a complete foundation.

    -

    -

    Let's start with the most common mattress size, queen. You can get a queen box spring as one piece or two, there are split queen box springs. When it comes to king size box springs, they are always split, as it's simply too large to be moving through your house as one piece. The same goes for California king box springs. The smaller box springs come as one piece, including twin box springs and full box springs, along with their extra-long counterparts.

    -

    Use Energy Saver to extend your battery life. This tool automatically detects when your computer is not plugged into a power source and several energy-saving settings that are usually hidden deep inside the operating system. Use settings to customize to your own preferences and easily one-click to extend your charge!

    -

    To add a little life to your idler pulleys, try using a little spray lubricant on the seals. Over time, water and sunlight can cause the seals to harden, shrink or crack. When this happens, the seal no longer protects the bearing and a failure will soon happen. The lubricant will help to keep the seal soft and resistant to the weathering that can occur.

    -

    Abstract:The deep sea has been proven to be a great treasure for structurally unique and biologically active natural products in the last two decades. Cold seeps and hydrothermal vents, as typical representatives of deep-sea extreme environments, have attracted more and more attention. This review mainly summarizes the natural products of marine animals, marine fungi, and marine bacteria derived from deep-sea cold seeps and hydrothermal vents as well as their biological activities. In general, there were 182 compounds reported, citing 132 references and covering the literature from the first report in 1984 up to March 2022. The sources of the compounds are represented by the genera Aspergillus sp., Penicillium sp., Streptomyces sp., and so on. It is worth mentioning that 90 of the 182 compounds are new and that almost 60% of the reported structures exhibited diverse bioactivities, which became attractive targets for relevant organic synthetic and biosynthetic studies.Keywords: natural products; extreme environments; cold seeps; hydrothermal vents; bioactivities

    -

    In 2001[54] and 2002,[55] processes for growing gallium nitride (GaN) LEDs on silicon were successfully demonstrated. In January 2012, Osram demonstrated high-power InGaN LEDs grown on silicon substrates commercially,[56] and GaN-on-silicon LEDs are in production at Plessey Semiconductors. As of 2017, some manufacturers are using SiC as the substrate for LED production, but sapphire is more common, as it has the most similar properties to that of gallium nitride, reducing the need for patterning the sapphire wafer (patterned wafers are known as epi wafers). Samsung, the University of Cambridge, and Toshiba are performing research into GaN on Si LEDs. Toshiba has stopped research, possibly due to low yields.[57][58][59][60][61][62][63] Some opt for epitaxy, which is difficult on silicon, while others, like the University of Cambridge, choose a multi-layer structure, in order to reduce (crystal) lattice mismatch and different thermal expansion ratios, in order to avoid cracking of the LED chip at high temperatures (e.g. during manufacturing), reduce heat generation and increase luminous efficiency. Sapphire substrate patterning can be carried out with nanoimprint lithography.[64][65][66][67][68][69][70]

    -

    Because of their long life, fast switching times, and visibility in broad daylight due to their high output and focus, LEDs have been used in automotive brake lights and turn signals. The use in brakes improves safety, due to a great reduction in the time needed to light fully, or faster rise time, about 0.1 second faster[citation needed] than an incandescent bulb. This gives drivers behind more time to react. In a dual intensity circuit (rear markers and brakes) if the LEDs are not pulsed at a fast enough frequency, they can create a phantom array, where ghost images of the LED appear if the eyes quickly scan across the array. White LED headlamps are beginning to appear. Using LEDs has styling advantages because LEDs can form much thinner lights than incandescent lamps with parabolic reflectors.

    -

    With the development of high-efficiency and high-power LEDs, it has become possible to use LEDs in lighting and illumination. To encourage the shift to LED lamps and other high-efficiency lighting, in 2008 the US Department of Energy created the L Prize competition. The Philips Lighting North America LED bulb won the first competition on August 3, 2011, after successfully completing 18 months of intensive field, lab, and product testing.[160]

    -

    In the work of Cao et al.,[184] researchers targeted the outcoupling problem, which is that the optical physics of thin-film LEDs causes the majority of light generated by the semiconductor to be trapped in the device.[185] To achieve this goal, they demonstrated that solution-processed perovskites can spontaneously form submicrometre-scale crystal platelets, which can efficiently extract light from the device. These perovskites are formed via the introduction of amino acid additives into the perovskite precursor solutions. In addition, their method is able to passivate perovskite surface defects and reduce nonradiative recombination. Therefore, by improving the light outcoupling and reducing nonradiative losses, Cao and his colleagues successfully achieved PLED with EQE up to 20.7%.[184]

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cooelf/Multimodal-CoT/timm/scheduler/__init__.py b/spaces/cooelf/Multimodal-CoT/timm/scheduler/__init__.py deleted file mode 100644 index 6a7789826229f66e1220cb6149902ba9c411b537..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/scheduler/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .cosine_lr import CosineLRScheduler -from .plateau_lr import PlateauLRScheduler -from .step_lr import StepLRScheduler -from .tanh_lr import TanhLRScheduler -from .scheduler_factory import create_scheduler diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/image/colorspace.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/image/colorspace.py deleted file mode 100644 index 814533952fdfda23d67cb6a3073692d8c1156add..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/image/colorspace.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 -import numpy as np - - -def imconvert(img, src, dst): - """Convert an image from the src colorspace to dst colorspace. - - Args: - img (ndarray): The input image. - src (str): The source colorspace, e.g., 'rgb', 'hsv'. - dst (str): The destination colorspace, e.g., 'rgb', 'hsv'. - - Returns: - ndarray: The converted image. - """ - code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') - out_img = cv2.cvtColor(img, code) - return out_img - - -def bgr2gray(img, keepdim=False): - """Convert a BGR image to grayscale image. - - Args: - img (ndarray): The input image. - keepdim (bool): If False (by default), then return the grayscale image - with 2 dims, otherwise 3 dims. - - Returns: - ndarray: The converted grayscale image. - """ - out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - if keepdim: - out_img = out_img[..., None] - return out_img - - -def rgb2gray(img, keepdim=False): - """Convert a RGB image to grayscale image. - - Args: - img (ndarray): The input image. - keepdim (bool): If False (by default), then return the grayscale image - with 2 dims, otherwise 3 dims. - - Returns: - ndarray: The converted grayscale image. - """ - out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) - if keepdim: - out_img = out_img[..., None] - return out_img - - -def gray2bgr(img): - """Convert a grayscale image to BGR image. - - Args: - img (ndarray): The input image. - - Returns: - ndarray: The converted BGR image. - """ - img = img[..., None] if img.ndim == 2 else img - out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - return out_img - - -def gray2rgb(img): - """Convert a grayscale image to RGB image. - - Args: - img (ndarray): The input image. - - Returns: - ndarray: The converted RGB image. - """ - img = img[..., None] if img.ndim == 2 else img - out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) - return out_img - - -def _convert_input_type_range(img): - """Convert the type and range of the input image. - - It converts the input image to np.float32 type and range of [0, 1]. - It is mainly used for pre-processing the input image in colorspace - conversion functions such as rgb2ycbcr and ycbcr2rgb. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - (ndarray): The converted image with type of np.float32 and range of - [0, 1]. - """ - img_type = img.dtype - img = img.astype(np.float32) - if img_type == np.float32: - pass - elif img_type == np.uint8: - img /= 255. - else: - raise TypeError('The img type should be np.float32 or np.uint8, ' - f'but got {img_type}') - return img - - -def _convert_output_type_range(img, dst_type): - """Convert the type and range of the image according to dst_type. - - It converts the image to desired type and range. If `dst_type` is np.uint8, - images will be converted to np.uint8 type with range [0, 255]. If - `dst_type` is np.float32, it converts the image to np.float32 type with - range [0, 1]. - It is mainly used for post-processing images in colorspace conversion - functions such as rgb2ycbcr and ycbcr2rgb. - - Args: - img (ndarray): The image to be converted with np.float32 type and - range [0, 255]. - dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it - converts the image to np.uint8 type with range [0, 255]. If - dst_type is np.float32, it converts the image to np.float32 type - with range [0, 1]. - - Returns: - (ndarray): The converted image with desired type and range. - """ - if dst_type not in (np.uint8, np.float32): - raise TypeError('The dst_type should be np.float32 or np.uint8, ' - f'but got {dst_type}') - if dst_type == np.uint8: - img = img.round() - else: - img /= 255. - return img.astype(dst_type) - - -def rgb2ycbcr(img, y_only=False): - """Convert a RGB image to YCbCr image. - - This function produces the same results as Matlab's `rgb2ycbcr` function. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - y_only (bool): Whether to only return Y channel. Default: False. - - Returns: - ndarray: The converted YCbCr image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) - if y_only: - out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0 - else: - out_img = np.matmul( - img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], - [24.966, 112.0, -18.214]]) + [16, 128, 128] - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def bgr2ycbcr(img, y_only=False): - """Convert a BGR image to YCbCr image. - - The bgr version of rgb2ycbcr. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - y_only (bool): Whether to only return Y channel. Default: False. - - Returns: - ndarray: The converted YCbCr image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) - if y_only: - out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 - else: - out_img = np.matmul( - img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], - [65.481, -37.797, 112.0]]) + [16, 128, 128] - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def ycbcr2rgb(img): - """Convert a YCbCr image to RGB image. - - This function produces the same results as Matlab's ycbcr2rgb function. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - ndarray: The converted RGB image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) * 255 - out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], - [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0]]) * 255.0 + [ - -222.921, 135.576, -276.836 - ] - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def ycbcr2bgr(img): - """Convert a YCbCr image to BGR image. - - The bgr version of ycbcr2rgb. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - ndarray: The converted BGR image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) * 255 - out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], - [0.00791071, -0.00153632, 0], - [0, -0.00318811, 0.00625893]]) * 255.0 + [ - -276.836, 135.576, -222.921 - ] - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def convert_color_factory(src, dst): - - code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') - - def convert_color(img): - out_img = cv2.cvtColor(img, code) - return out_img - - convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()} - image. - - Args: - img (ndarray or str): The input image. - - Returns: - ndarray: The converted {dst.upper()} image. - """ - - return convert_color - - -bgr2rgb = convert_color_factory('bgr', 'rgb') - -rgb2bgr = convert_color_factory('rgb', 'bgr') - -bgr2hsv = convert_color_factory('bgr', 'hsv') - -hsv2bgr = convert_color_factory('hsv', 'bgr') - -bgr2hls = convert_color_factory('bgr', 'hls') - -hls2bgr = convert_color_factory('hls', 'bgr') diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/psp_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/psp_head.py deleted file mode 100644 index 2a88d807bfe11fe224305f8de745cde3aa739db0..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/psp_head.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.nn as nn -from annotator.mmpkg.mmcv.cnn import ConvModule - -from annotator.mmpkg.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class PPM(nn.ModuleList): - """Pooling Pyramid Module used in PSPNet. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict): Config of activation layers. - align_corners (bool): align_corners argument of F.interpolate. - """ - - def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, - act_cfg, align_corners): - super(PPM, self).__init__() - self.pool_scales = pool_scales - self.align_corners = align_corners - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - for pool_scale in pool_scales: - self.append( - nn.Sequential( - nn.AdaptiveAvgPool2d(pool_scale), - ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg))) - - def forward(self, x): - """Forward function.""" - ppm_outs = [] - for ppm in self: - ppm_out = ppm(x) - upsampled_ppm_out = resize( - ppm_out, - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - ppm_outs.append(upsampled_ppm_out) - return ppm_outs - - -@HEADS.register_module() -class PSPHead(BaseDecodeHead): - """Pyramid Scene Parsing Network. - - This head is the implementation of - `PSPNet `_. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. Default: (1, 2, 3, 6). - """ - - def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(PSPHead, self).__init__(**kwargs) - assert isinstance(pool_scales, (list, tuple)) - self.pool_scales = pool_scales - self.psp_modules = PPM( - self.pool_scales, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=self.align_corners) - self.bottleneck = ConvModule( - self.in_channels + len(pool_scales) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - psp_outs = [x] - psp_outs.extend(self.psp_modules(x)) - psp_outs = torch.cat(psp_outs, dim=1) - output = self.bottleneck(psp_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/modeling/backbone/build.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/modeling/backbone/build.py deleted file mode 100644 index 63a4aaced2c2869294d2b16f4b95cdfdd01259b7..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/modeling/backbone/build.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from annotator.oneformer.detectron2.layers import ShapeSpec -from annotator.oneformer.detectron2.utils.registry import Registry - -from .backbone import Backbone - -BACKBONE_REGISTRY = Registry("BACKBONE") -BACKBONE_REGISTRY.__doc__ = """ -Registry for backbones, which extract feature maps from images - -The registered object must be a callable that accepts two arguments: - -1. A :class:`detectron2.config.CfgNode` -2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. - -Registered object must return instance of :class:`Backbone`. -""" - - -def build_backbone(cfg, input_shape=None): - """ - Build a backbone from `cfg.MODEL.BACKBONE.NAME`. - - Returns: - an instance of :class:`Backbone` - """ - if input_shape is None: - input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) - - backbone_name = cfg.MODEL.BACKBONE.NAME - backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) - assert isinstance(backbone, Backbone) - return backbone diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/utils/README.md b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/utils/README.md deleted file mode 100644 index 9765b24a730b77556104187ac3ef5439ab0859fd..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/utils/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Utility functions - -This folder contain utility functions that are not used in the -core library, but are useful for building models or training -code using the config system. diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/voc.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/voc.py deleted file mode 100644 index a8855203b14ee0dc4da9099a2945d4aedcffbcd6..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/voc.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class PascalVOCDataset(CustomDataset): - """Pascal VOC dataset. - - Args: - split (str): Split txt file for Pascal VOC. - """ - - CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', - 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', - 'train', 'tvmonitor') - - PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], - [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], - [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], - [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], - [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] - - def __init__(self, split, **kwargs): - super(PascalVOCDataset, self).__init__( - img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) - assert osp.exists(self.img_dir) and self.split is not None diff --git a/spaces/cscan/CodeFormer/CodeFormer/basicsr/losses/losses.py b/spaces/cscan/CodeFormer/CodeFormer/basicsr/losses/losses.py deleted file mode 100644 index 1bcf272cfb756d99451a3005567ea4d4c9059067..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/basicsr/losses/losses.py +++ /dev/null @@ -1,455 +0,0 @@ -import math -import lpips -import torch -from torch import autograd as autograd -from torch import nn as nn -from torch.nn import functional as F - -from basicsr.archs.vgg_arch import VGGFeatureExtractor -from basicsr.utils.registry import LOSS_REGISTRY -from .loss_util import weighted_loss - -_reduction_modes = ['none', 'mean', 'sum'] - - -@weighted_loss -def l1_loss(pred, target): - return F.l1_loss(pred, target, reduction='none') - - -@weighted_loss -def mse_loss(pred, target): - return F.mse_loss(pred, target, reduction='none') - - -@weighted_loss -def charbonnier_loss(pred, target, eps=1e-12): - return torch.sqrt((pred - target)**2 + eps) - - -@LOSS_REGISTRY.register() -class L1Loss(nn.Module): - """L1 (mean absolute error, MAE) loss. - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(L1Loss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class MSELoss(nn.Module): - """MSE (L2) loss. - - Args: - loss_weight (float): Loss weight for MSE loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(MSELoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class CharbonnierLoss(nn.Module): - """Charbonnier loss (one variant of Robust L1Loss, a differentiable - variant of L1Loss). - - Described in "Deep Laplacian Pyramid Networks for Fast and Accurate - Super-Resolution". - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - eps (float): A value used to control the curvature near zero. - Default: 1e-12. - """ - - def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12): - super(CharbonnierLoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - self.eps = eps - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class WeightedTVLoss(L1Loss): - """Weighted TV loss. - - Args: - loss_weight (float): Loss weight. Default: 1.0. - """ - - def __init__(self, loss_weight=1.0): - super(WeightedTVLoss, self).__init__(loss_weight=loss_weight) - - def forward(self, pred, weight=None): - y_diff = super(WeightedTVLoss, self).forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=weight[:, :, :-1, :]) - x_diff = super(WeightedTVLoss, self).forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=weight[:, :, :, :-1]) - - loss = x_diff + y_diff - - return loss - - -@LOSS_REGISTRY.register() -class PerceptualLoss(nn.Module): - """Perceptual loss with commonly used style loss. - - Args: - layer_weights (dict): The weight for each layer of vgg feature. - Here is an example: {'conv5_4': 1.}, which means the conv5_4 - feature layer (before relu5_4) will be extracted with weight - 1.0 in calculting losses. - vgg_type (str): The type of vgg network used as feature extractor. - Default: 'vgg19'. - use_input_norm (bool): If True, normalize the input image in vgg. - Default: True. - range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. - Default: False. - perceptual_weight (float): If `perceptual_weight > 0`, the perceptual - loss will be calculated and the loss will multiplied by the - weight. Default: 1.0. - style_weight (float): If `style_weight > 0`, the style loss will be - calculated and the loss will multiplied by the weight. - Default: 0. - criterion (str): Criterion used for perceptual loss. Default: 'l1'. - """ - - def __init__(self, - layer_weights, - vgg_type='vgg19', - use_input_norm=True, - range_norm=False, - perceptual_weight=1.0, - style_weight=0., - criterion='l1'): - super(PerceptualLoss, self).__init__() - self.perceptual_weight = perceptual_weight - self.style_weight = style_weight - self.layer_weights = layer_weights - self.vgg = VGGFeatureExtractor( - layer_name_list=list(layer_weights.keys()), - vgg_type=vgg_type, - use_input_norm=use_input_norm, - range_norm=range_norm) - - self.criterion_type = criterion - if self.criterion_type == 'l1': - self.criterion = torch.nn.L1Loss() - elif self.criterion_type == 'l2': - self.criterion = torch.nn.L2loss() - elif self.criterion_type == 'mse': - self.criterion = torch.nn.MSELoss(reduction='mean') - elif self.criterion_type == 'fro': - self.criterion = None - else: - raise NotImplementedError(f'{criterion} criterion has not been supported.') - - def forward(self, x, gt): - """Forward function. - - Args: - x (Tensor): Input tensor with shape (n, c, h, w). - gt (Tensor): Ground-truth tensor with shape (n, c, h, w). - - Returns: - Tensor: Forward results. - """ - # extract vgg features - x_features = self.vgg(x) - gt_features = self.vgg(gt.detach()) - - # calculate perceptual loss - if self.perceptual_weight > 0: - percep_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k] - else: - percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k] - percep_loss *= self.perceptual_weight - else: - percep_loss = None - - # calculate style loss - if self.style_weight > 0: - style_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - style_loss += torch.norm( - self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k] - else: - style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat( - gt_features[k])) * self.layer_weights[k] - style_loss *= self.style_weight - else: - style_loss = None - - return percep_loss, style_loss - - def _gram_mat(self, x): - """Calculate Gram matrix. - - Args: - x (torch.Tensor): Tensor with shape of (n, c, h, w). - - Returns: - torch.Tensor: Gram matrix. - """ - n, c, h, w = x.size() - features = x.view(n, c, w * h) - features_t = features.transpose(1, 2) - gram = features.bmm(features_t) / (c * h * w) - return gram - - -@LOSS_REGISTRY.register() -class LPIPSLoss(nn.Module): - def __init__(self, - loss_weight=1.0, - use_input_norm=True, - range_norm=False,): - super(LPIPSLoss, self).__init__() - self.perceptual = lpips.LPIPS(net="vgg", spatial=False).eval() - self.loss_weight = loss_weight - self.use_input_norm = use_input_norm - self.range_norm = range_norm - - if self.use_input_norm: - # the mean is for image with range [0, 1] - self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) - # the std is for image with range [0, 1] - self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) - - def forward(self, pred, target): - if self.range_norm: - pred = (pred + 1) / 2 - target = (target + 1) / 2 - if self.use_input_norm: - pred = (pred - self.mean) / self.std - target = (target - self.mean) / self.std - lpips_loss = self.perceptual(target.contiguous(), pred.contiguous()) - return self.loss_weight * lpips_loss.mean() - - -@LOSS_REGISTRY.register() -class GANLoss(nn.Module): - """Define GAN loss. - - Args: - gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'. - real_label_val (float): The value for real label. Default: 1.0. - fake_label_val (float): The value for fake label. Default: 0.0. - loss_weight (float): Loss weight. Default: 1.0. - Note that loss_weight is only for generators; and it is always 1.0 - for discriminators. - """ - - def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0): - super(GANLoss, self).__init__() - self.gan_type = gan_type - self.loss_weight = loss_weight - self.real_label_val = real_label_val - self.fake_label_val = fake_label_val - - if self.gan_type == 'vanilla': - self.loss = nn.BCEWithLogitsLoss() - elif self.gan_type == 'lsgan': - self.loss = nn.MSELoss() - elif self.gan_type == 'wgan': - self.loss = self._wgan_loss - elif self.gan_type == 'wgan_softplus': - self.loss = self._wgan_softplus_loss - elif self.gan_type == 'hinge': - self.loss = nn.ReLU() - else: - raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.') - - def _wgan_loss(self, input, target): - """wgan loss. - - Args: - input (Tensor): Input tensor. - target (bool): Target label. - - Returns: - Tensor: wgan loss. - """ - return -input.mean() if target else input.mean() - - def _wgan_softplus_loss(self, input, target): - """wgan loss with soft plus. softplus is a smooth approximation to the - ReLU function. - - In StyleGAN2, it is called: - Logistic loss for discriminator; - Non-saturating loss for generator. - - Args: - input (Tensor): Input tensor. - target (bool): Target label. - - Returns: - Tensor: wgan loss. - """ - return F.softplus(-input).mean() if target else F.softplus(input).mean() - - def get_target_label(self, input, target_is_real): - """Get target label. - - Args: - input (Tensor): Input tensor. - target_is_real (bool): Whether the target is real or fake. - - Returns: - (bool | Tensor): Target tensor. Return bool for wgan, otherwise, - return Tensor. - """ - - if self.gan_type in ['wgan', 'wgan_softplus']: - return target_is_real - target_val = (self.real_label_val if target_is_real else self.fake_label_val) - return input.new_ones(input.size()) * target_val - - def forward(self, input, target_is_real, is_disc=False): - """ - Args: - input (Tensor): The input for the loss module, i.e., the network - prediction. - target_is_real (bool): Whether the targe is real or fake. - is_disc (bool): Whether the loss for discriminators or not. - Default: False. - - Returns: - Tensor: GAN loss value. - """ - if self.gan_type == 'hinge': - if is_disc: # for discriminators in hinge-gan - input = -input if target_is_real else input - loss = self.loss(1 + input).mean() - else: # for generators in hinge-gan - loss = -input.mean() - else: # other gan types - target_label = self.get_target_label(input, target_is_real) - loss = self.loss(input, target_label) - - # loss_weight is always 1.0 for discriminators - return loss if is_disc else loss * self.loss_weight - - -def r1_penalty(real_pred, real_img): - """R1 regularization for discriminator. The core idea is to - penalize the gradient on real data alone: when the - generator distribution produces the true data distribution - and the discriminator is equal to 0 on the data manifold, the - gradient penalty ensures that the discriminator cannot create - a non-zero gradient orthogonal to the data manifold without - suffering a loss in the GAN game. - - Ref: - Eq. 9 in Which training methods for GANs do actually converge. - """ - grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0] - grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() - return grad_penalty - - -def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): - noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3]) - grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0] - path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) - - path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) - - path_penalty = (path_lengths - path_mean).pow(2).mean() - - return path_penalty, path_lengths.detach().mean(), path_mean.detach() - - -def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None): - """Calculate gradient penalty for wgan-gp. - - Args: - discriminator (nn.Module): Network for the discriminator. - real_data (Tensor): Real input data. - fake_data (Tensor): Fake input data. - weight (Tensor): Weight tensor. Default: None. - - Returns: - Tensor: A tensor for gradient penalty. - """ - - batch_size = real_data.size(0) - alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1)) - - # interpolate between real_data and fake_data - interpolates = alpha * real_data + (1. - alpha) * fake_data - interpolates = autograd.Variable(interpolates, requires_grad=True) - - disc_interpolates = discriminator(interpolates) - gradients = autograd.grad( - outputs=disc_interpolates, - inputs=interpolates, - grad_outputs=torch.ones_like(disc_interpolates), - create_graph=True, - retain_graph=True, - only_inputs=True)[0] - - if weight is not None: - gradients = gradients * weight - - gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() - if weight is not None: - gradients_penalty /= torch.mean(weight) - - return gradients_penalty diff --git a/spaces/csuhan/LLaMA-Adapter/app.py b/spaces/csuhan/LLaMA-Adapter/app.py deleted file mode 100644 index 70e93b3f4263bbc5ae6cf555851a40f83feacfdb..0000000000000000000000000000000000000000 --- a/spaces/csuhan/LLaMA-Adapter/app.py +++ /dev/null @@ -1,278 +0,0 @@ -import json -import os -import glob -import sys -import time -from pathlib import Path -from typing import Tuple - -from huggingface_hub import hf_hub_download -from PIL import Image -import gradio as gr -import torch -from fairscale.nn.model_parallel.initialize import initialize_model_parallel - -from llama import LLaMA, ModelArgs, Tokenizer, Transformer, VisionModel - -os.environ['CUDA_LAUNCH_BLOCKING'] = '1' - -PROMPT_DICT = { - "prompt_input": ( - "Below is an instruction that describes a task, paired with an input that provides further context. " - "Write a response that appropriately completes the request.\n\n" - "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:" - ), - "prompt_no_input": ( - "Below is an instruction that describes a task. " - "Write a response that appropriately completes the request.\n\n" - "### Instruction:\n{instruction}\n\n### Response:" - ), -} - - -def setup_model_parallel() -> Tuple[int, int]: - os.environ['RANK'] = '0' - os.environ['WORLD_SIZE'] = '1' - os.environ['MP'] = '1' - os.environ['MASTER_ADDR'] = '127.0.0.1' - os.environ['MASTER_PORT'] = '2223' - local_rank = int(os.environ.get("LOCAL_RANK", -1)) - world_size = int(os.environ.get("WORLD_SIZE", -1)) - - torch.distributed.init_process_group("nccl") - initialize_model_parallel(world_size) - torch.cuda.set_device(local_rank) - - # seed must be the same in all processes - torch.manual_seed(1) - return local_rank, world_size - - -def load( - ckpt0_path: str, - ckpt1_path: str, - param_path: str, - tokenizer_path: str, - instruct_adapter_path: str, - caption_adapter_path: str, - local_rank: int, - world_size: int, - max_seq_len: int, - max_batch_size: int, -) -> LLaMA: - start_time = time.time() - print("Loading") - instruct_adapter_checkpoint = torch.load( - instruct_adapter_path, map_location="cpu") - caption_adapter_checkpoint = torch.load( - caption_adapter_path, map_location="cpu") - with open(param_path, "r") as f: - params = json.loads(f.read()) - - model_args: ModelArgs = ModelArgs( - max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params - ) - model_args.adapter_layer = int( - instruct_adapter_checkpoint['adapter_query.weight'].shape[0] / model_args.adapter_len) - model_args.cap_adapter_layer = int( - caption_adapter_checkpoint['cap_adapter_query.weight'].shape[0] / model_args.cap_adapter_len) - - tokenizer = Tokenizer(model_path=tokenizer_path) - model_args.vocab_size = tokenizer.n_words - torch.set_default_tensor_type(torch.cuda.HalfTensor) - model = Transformer(model_args) - - # To reduce memory usuage - ckpt0 = torch.load(ckpt0_path, map_location='cuda') - model.load_state_dict(ckpt0, strict=False) - del ckpt0 - torch.cuda.empty_cache() - - ckpt1 = torch.load(ckpt1_path, map_location='cuda') - model.load_state_dict(ckpt1, strict=False) - del ckpt1 - torch.cuda.empty_cache() - - vision_model = VisionModel(model_args) - - torch.set_default_tensor_type(torch.FloatTensor) - model.load_state_dict(instruct_adapter_checkpoint, strict=False) - model.load_state_dict(caption_adapter_checkpoint, strict=False) - vision_model.load_state_dict(caption_adapter_checkpoint, strict=False) - - generator = LLaMA(model, tokenizer, vision_model) - print(f"Loaded in {time.time() - start_time:.2f} seconds") - return generator - - -def instruct_generate( - instruct: str, - input: str = 'none', - max_gen_len=512, - temperature: float = 0.1, - top_p: float = 0.75, -): - if input == 'none': - prompt = PROMPT_DICT['prompt_no_input'].format_map( - {'instruction': instruct, 'input': ''}) - else: - prompt = PROMPT_DICT['prompt_input'].format_map( - {'instruction': instruct, 'input': input}) - - results = generator.generate( - [prompt], max_gen_len=max_gen_len, temperature=temperature, top_p=top_p - ) - result = results[0].strip() - print(result) - return result - - -def caption_generate( - img: str, - max_gen_len=512, - temperature: float = 0.1, - top_p: float = 0.75, -): - imgs = [Image.open(img).convert('RGB')] - prompts = ["Generate caption of this image :",] * len(imgs) - - results = generator.generate( - prompts, imgs=imgs, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p - ) - result = results[0].strip() - print(result) - return result - - -def download_llama_adapter(instruct_adapter_path, caption_adapter_path): - if not os.path.exists(instruct_adapter_path): - os.system( - f"wget -q -O {instruct_adapter_path} https://github.com/OpenGVLab/LLaMA-Adapter/releases/download/v.1.0.0/llama_adapter_len10_layer30_release.pth") - - if not os.path.exists(caption_adapter_path): - os.system( - f"wget -q -O {caption_adapter_path} https://github.com/OpenGVLab/LLaMA-Adapter/releases/download/v.1.0.0/llama_adapter_len10_layer30_caption_vit_l.pth") - - -# ckpt_path = "/data1/llma/7B/consolidated.00.pth" -# param_path = "/data1/llma/7B/params.json" -# tokenizer_path = "/data1/llma/tokenizer.model" -ckpt0_path = hf_hub_download( - repo_id="csuhan/llama_storage", filename="consolidated.00_part0.pth") -ckpt1_path = hf_hub_download( - repo_id="csuhan/llama_storage", filename="consolidated.00_part1.pth") -param_path = hf_hub_download( - repo_id="nyanko7/LLaMA-7B", filename="params.json") -tokenizer_path = hf_hub_download( - repo_id="nyanko7/LLaMA-7B", filename="tokenizer.model") -instruct_adapter_path = "llama_adapter_len10_layer30_release.pth" -caption_adapter_path = "llama_adapter_len10_layer30_caption_vit_l.pth" -max_seq_len = 512 -max_batch_size = 1 - -# download models -# download_llama_adapter(instruct_adapter_path, caption_adapter_path) - - -local_rank, world_size = setup_model_parallel() -if local_rank > 0: - sys.stdout = open(os.devnull, "w") - -generator = load( - ckpt0_path, ckpt1_path, param_path, tokenizer_path, instruct_adapter_path, caption_adapter_path, local_rank, world_size, max_seq_len, max_batch_size -) - - -def create_instruct_demo(): - with gr.Blocks() as instruct_demo: - with gr.Row(): - with gr.Column(): - instruction = gr.Textbox(lines=2, label="Instruction") - input = gr.Textbox( - lines=2, label="Context input", placeholder='none') - max_len = gr.Slider(minimum=1, maximum=512, - value=128, label="Max length") - with gr.Accordion(label='Advanced options', open=False): - temp = gr.Slider(minimum=0, maximum=1, - value=0.1, label="Temperature") - top_p = gr.Slider(minimum=0, maximum=1, - value=0.75, label="Top p") - - run_botton = gr.Button("Run") - - with gr.Column(): - outputs = gr.Textbox(lines=10, label="Output") - - inputs = [instruction, input, max_len, temp, top_p] - - examples = [ - "Tell me about alpacas.", - "Write a Python program that prints the first 10 Fibonacci numbers.", - "Write a conversation between the sun and pluto.", - "Write a theory to explain why cat never existed", - ] - examples = [ - [x, "none", 128, 0.1, 0.75] - for x in examples] - - gr.Examples( - examples=examples, - inputs=inputs, - outputs=outputs, - fn=instruct_generate, - cache_examples=os.getenv('SYSTEM') == 'spaces' - ) - run_botton.click(fn=instruct_generate, inputs=inputs, outputs=outputs) - return instruct_demo - - -def create_caption_demo(): - with gr.Blocks() as instruct_demo: - with gr.Row(): - with gr.Column(): - img = gr.Image(label='Input', type='filepath') - max_len = gr.Slider(minimum=1, maximum=512, - value=64, label="Max length") - with gr.Accordion(label='Advanced options', open=False): - temp = gr.Slider(minimum=0, maximum=1, - value=0.1, label="Temperature") - top_p = gr.Slider(minimum=0, maximum=1, - value=0.75, label="Top p") - - run_botton = gr.Button("Run") - - with gr.Column(): - outputs = gr.Textbox(lines=10, label="Output") - - inputs = [img, max_len, temp, top_p] - - examples = glob.glob("caption_demo/*.jpg") - examples = [ - [x, 64, 0.1, 0.75] - for x in examples] - - gr.Examples( - examples=examples, - inputs=inputs, - outputs=outputs, - fn=caption_generate, - cache_examples=os.getenv('SYSTEM') == 'spaces' - ) - run_botton.click(fn=caption_generate, inputs=inputs, outputs=outputs) - return instruct_demo - - -description = """ -# LLaMA-Adapter 🚀 -The official demo for **LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention**. -Please refer to our [arXiv paper](https://arxiv.org/abs/2303.16199) and [github](https://github.com/ZrrSkywalker/LLaMA-Adapter) for more details. -""" - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(description) - with gr.TabItem("Instruction-Following"): - create_instruct_demo() - with gr.TabItem("Image Captioning"): - create_caption_demo() - -demo.queue(api_open=True, concurrency_count=1).launch() diff --git a/spaces/curt-tigges/anime-image-labeller/app.py b/spaces/curt-tigges/anime-image-labeller/app.py deleted file mode 100644 index 3471531b2b91a7e5f6c91ec8d91cfd89c00f1997..0000000000000000000000000000000000000000 --- a/spaces/curt-tigges/anime-image-labeller/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import gradio as gr -import fastbook -fastbook.setup_book() -from fastbook import * - -""" -Get the prediction labels and their accuracies, then return the results as a dictionary. - -[obj] - tensor matrix containing the predicted accuracy given from the model -[learn] - fastai learner needed to get the labels -[thresh] - minimum accuracy threshold to returning results -""" -def get_pred_classes(obj, learn, thresh): - labels = [] - # get list of classes from csv--replace - with open('classes.txt', 'r') as f: - for line in f: - labels.append(line.strip('\n')) - - predictions = {} - x=0 - for item in obj: - acc= round(item.item(), 3) - if acc > thresh: - predictions[labels[x]] = round(acc, 3) - x+=1 - - predictions =sorted(predictions.items(), key=lambda x: x[1], reverse=True) - - return predictions - -def get_x(r): return 'images'/r['img_name'] -def get_y(r): return [t for t in r['tags'].split(' ') if t in pop_tags] - -learn = load_learner(fname='model-large-basic-10e.pkl') - -def predict_single_img(imf, thresh=0.2, learn=learn): - - img = PILImage.create(imf) - - #img.show() #show image - _, _, pred_pct = learn.predict(img) #predict while ignoring first 2 array inputs - img.show() #show image - return str(get_pred_classes(pred_pct, learn, thresh)) - -#predict_single_img('test/mask.jpeg') - -iface = gr.Interface(fn=predict_single_img, - inputs=["image","number"], - outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/damian0815/Erasing-Concepts-In-Diffusion/README.md b/spaces/damian0815/Erasing-Concepts-In-Diffusion/README.md deleted file mode 100644 index e84fe526daf7cb3931ff800af48fddf3fe6dd616..0000000000000000000000000000000000000000 --- a/spaces/damian0815/Erasing-Concepts-In-Diffusion/README.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Erasing Concepts from Diffusion Models -emoji: 💡 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit ---- - -# A GUI with custom model support, validation, and sample generation for "Erasing Concepts from Diffusion Models" - -Enables xformers, 8 bit AdamW via bitsandbytes, and AMP - editing SD1.5 models works with 16GB VRAM, and 2.5 models including the ESD-u training works with 24GB VRAM. - -## Quick start - -To run on vast.ai, use eg `pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel` - you need `-devel` for 8bit AdamW to work. - -On the dev machine: -``` -pip install -r requirements.txt -python app.py -``` - -then use the Gradio interface at port 7860. - -# Erasing Concepts from Diffusion Models - - Project Website [https://erasing.baulab.info](https://erasing.baulab.info)
    - Arxiv Preprint [https://arxiv.org/pdf/2303.07345.pdf](https://arxiv.org/pdf/2303.07345.pdf)
    - Fine-tuned Weights [https://erasing.baulab.info/weights/esd_models/](https://erasing.baulab.info/weights/esd_models/)
    -
    - -
    - -Motivated by recent advancements in text-to-image diffusion, we study erasure of specific concepts from the model's weights. While Stable Diffusion has shown promise in producing explicit or realistic artwork, it has raised concerns regarding its potential for misuse. We propose a fine-tuning method that can erase a visual concept from a pre-trained diffusion model, given only the name of the style and using negative guidance as a teacher. We benchmark our method against previous approaches that remove sexually explicit content and demonstrate its effectiveness, performing on par with Safe Latent Diffusion and censored training. - -To evaluate artistic style removal, we conduct experiments erasing five modern artists from the network and conduct a user study to assess the human perception of the removed styles. Unlike previous methods, our approach can remove concepts from a diffusion model permanently rather than modifying the output at the inference time, so it cannot be circumvented even if a user has access to model weights - -Given only a short text description of an undesired visual concept and no additional data, our method fine-tunes model weights to erase the targeted concept. Our method can avoid NSFW content, stop imitation of a specific artist's style, or even erase a whole object class from model output, while preserving the model's behavior and capabilities on other topics. - -## Demo vs github - -This demo uses an updated implementation from the original Erasing codebase the publication is based from. - -## Running locally - -1.) Create an environment using the packages included in the requirements.txt file - -2.) Run `python app.py` - -3.) Open the application in browser at `http://127.0.0.1:7860/` - -4.) Train, evaluate, and save models using our method - -## Citing our work -The preprint can be cited as follows -``` -@article{gandikota2023erasing, - title={Erasing Concepts from Diffusion Models}, - author={Rohit Gandikota and Joanna Materzy\'nska and Jaden Fiotto-Kaufman and David Bau}, - journal={arXiv preprint arXiv:2303.07345}, - year={2023} -} -``` \ No newline at end of file diff --git a/spaces/dantosxd/gorilla-llm-gorilla-mpt-7b-hf-v0/app.py b/spaces/dantosxd/gorilla-llm-gorilla-mpt-7b-hf-v0/app.py deleted file mode 100644 index 4766368fca968f88fe2ba2293e9f85ffa4b825bd..0000000000000000000000000000000000000000 --- a/spaces/dantosxd/gorilla-llm-gorilla-mpt-7b-hf-v0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gorilla-llm/gorilla-mpt-7b-hf-v0").launch() \ No newline at end of file diff --git a/spaces/dawdqd/ChuanhuChatGPT/modules/overwrites.py b/spaces/dawdqd/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index a4ef6167eb7ce75ed8b88024ad1187b24f2fc191..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import annotations -import logging - -from typing import List, Tuple -from gradio_client import utils as client_utils -from gradio import utils -import inspect - -from modules.presets import * -from modules.index_func import * - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | tuple | list | None, role: str - ) -> str | dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - file_uri = chat_message[0] - if utils.validate_url(file_uri): - filepath = file_uri - else: - filepath = self.make_temp_copy_if_needed(file_uri) - - mime_type = client_utils.get_mimetype(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - # chat_message = inspect.cleandoc(chat_message) - # escape html spaces - # chat_message = chat_message.replace(" ", " ") - if role == "bot": - chat_message = convert_bot_before_marked(chat_message) - elif role == "user": - chat_message = convert_user_before_marked(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - - - -def add_classes_to_gradio_component(comp): - """ - this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others - code from stable-diffusion-webui - """ - - comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] - - if getattr(comp, 'multiselect', False): - comp.elem_classes.append('multiselect') - - -def IOComponent_init(self, *args, **kwargs): - res = original_IOComponent_init(self, *args, **kwargs) - add_classes_to_gradio_component(self) - - return res - -original_IOComponent_init = gr.components.IOComponent.__init__ -gr.components.IOComponent.__init__ = IOComponent_init - - -def BlockContext_init(self, *args, **kwargs): - res = original_BlockContext_init(self, *args, **kwargs) - add_classes_to_gradio_component(self) - - return res - -original_BlockContext_init = gr.blocks.BlockContext.__init__ -gr.blocks.BlockContext.__init__ = BlockContext_init - diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/external-scripts.js b/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/external-scripts.js deleted file mode 100644 index 8d0352669045537af5698b1824dbc1dba21df478..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/external-scripts.js +++ /dev/null @@ -1,2 +0,0 @@ - -// external javascript here diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/dateutil/tz/win.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/dateutil/tz/win.py deleted file mode 100644 index cde07ba792c40903f0c334839140173b39fd8124..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/dateutil/tz/win.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module provides an interface to the native time zone data on Windows, -including :py:class:`datetime.tzinfo` implementations. - -Attempting to import this module on a non-Windows platform will raise an -:py:obj:`ImportError`. -""" -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct - -from six.moves import winreg -from six import text_type - -try: - import ctypes - from ctypes import wintypes -except ValueError: - # ValueError is raised on non-Windows systems for some horrible reason. - raise ImportError("Running tzwin on non-Windows system") - -from ._common import tzrangebase - -__all__ = ["tzwin", "tzwinlocal", "tzres"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - - -def _settzkeyname(): - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - return TZKEYNAME - - -TZKEYNAME = _settzkeyname() - - -class tzres(object): - """ - Class for accessing ``tzres.dll``, which contains timezone name related - resources. - - .. versionadded:: 2.5.0 - """ - p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char - - def __init__(self, tzres_loc='tzres.dll'): - # Load the user32 DLL so we can load strings from tzres - user32 = ctypes.WinDLL('user32') - - # Specify the LoadStringW function - user32.LoadStringW.argtypes = (wintypes.HINSTANCE, - wintypes.UINT, - wintypes.LPWSTR, - ctypes.c_int) - - self.LoadStringW = user32.LoadStringW - self._tzres = ctypes.WinDLL(tzres_loc) - self.tzres_loc = tzres_loc - - def load_name(self, offset): - """ - Load a timezone name from a DLL offset (integer). - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.load_name(112)) - 'Eastern Standard Time' - - :param offset: - A positive integer value referring to a string from the tzres dll. - - .. note:: - - Offsets found in the registry are generally of the form - ``@tzres.dll,-114``. The offset in this case is 114, not -114. - - """ - resource = self.p_wchar() - lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) - nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) - return resource[:nchar] - - def name_from_string(self, tzname_str): - """ - Parse strings as returned from the Windows registry into the time zone - name as defined in the registry. - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.name_from_string('@tzres.dll,-251')) - 'Dateline Daylight Time' - >>> print(tzr.name_from_string('Eastern Standard Time')) - 'Eastern Standard Time' - - :param tzname_str: - A timezone name string as returned from a Windows registry key. - - :return: - Returns the localized timezone string from tzres.dll if the string - is of the form `@tzres.dll,-offset`, else returns the input string. - """ - if not tzname_str.startswith('@'): - return tzname_str - - name_splt = tzname_str.split(',-') - try: - offset = int(name_splt[1]) - except: - raise ValueError("Malformed timezone string.") - - return self.load_name(offset) - - -class tzwinbase(tzrangebase): - """tzinfo class based on win32's timezones available in the registry.""" - def __init__(self): - raise NotImplementedError('tzwinbase is an abstract base class') - - def __eq__(self, other): - # Compare on all relevant dimensions, including name. - if not isinstance(other, tzwinbase): - return NotImplemented - - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._stddayofweek == other._stddayofweek and - self._dstdayofweek == other._dstdayofweek and - self._stdweeknumber == other._stdweeknumber and - self._dstweeknumber == other._dstweeknumber and - self._stdhour == other._stdhour and - self._dsthour == other._dsthour and - self._stdminute == other._stdminute and - self._dstminute == other._dstminute and - self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr) - - @staticmethod - def list(): - """Return a list of all time zones known to the system.""" - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZKEYNAME) as tzkey: - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - return result - - def display(self): - """ - Return the display name of the time zone. - """ - return self._display - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - - if not self.hasdst: - return None - - dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - - dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - - # Ambiguous dates default to the STD side - dstoff -= self._dst_base_offset - - return dston, dstoff - - def _get_hasdst(self): - return self._dstmonth != 0 - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -class tzwin(tzwinbase): - """ - Time zone object created from the zone info in the Windows registry - - These are similar to :py:class:`dateutil.tz.tzrange` objects in that - the time zone data is provided in the format of a single offset rule - for either 0 or 2 time zone transitions per year. - - :param: name - The name of a Windows time zone key, e.g. "Eastern Standard Time". - The full list of keys can be retrieved with :func:`tzwin.list`. - """ - - def __init__(self, name): - self._name = name - - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - keydict = valuestodict(tzkey) - - self._std_abbr = keydict["Std"] - self._dst_abbr = keydict["Dlt"] - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - dstoffset = stdoffset-tup[2] # + DaylightBias * -1 - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs - # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - """ - Class representing the local time zone information in the Windows registry - - While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` - module) to retrieve time zone information, ``tzwinlocal`` retrieves the - rules directly from the Windows registry and creates an object like - :class:`dateutil.tz.tzwin`. - - Because Windows does not have an equivalent of :func:`time.tzset`, on - Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the - time zone settings *at the time that the process was started*, meaning - changes to the machine's time zone settings during the run of a program - on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. - Because ``tzwinlocal`` reads the registry directly, it is unaffected by - this issue. - """ - def __init__(self): - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: - keydict = valuestodict(tzlocalkey) - - self._std_abbr = keydict["StandardName"] - self._dst_abbr = keydict["DaylightName"] - - try: - tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, - sn=self._std_abbr) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - except OSError: - self._display = None - - stdoffset = -keydict["Bias"]-keydict["StandardBias"] - dstoffset = stdoffset-keydict["DaylightBias"] - - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # For reasons unclear, in this particular key, the day of week has been - # moved to the END of the SYSTEMTIME structure. - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:5] - - self._stddayofweek = tup[7] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:5] - - self._dstdayofweek = tup[7] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwinlocal()" - - def __str__(self): - # str will return the standard name, not the daylight name. - return "tzwinlocal(%s)" % repr(self._std_abbr) - - def __reduce__(self): - return (self.__class__, ()) - - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ - first = datetime.datetime(year, month, 1, hour, minute) - - # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), - # Because 7 % 7 = 0 - weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) - wd = weekdayone + ((whichweek - 1) * ONEWEEK) - if (wd.month != month): - wd -= ONEWEEK - - return wd - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dout = {} - size = winreg.QueryInfoKey(key)[1] - tz_res = None - - for i in range(size): - key_name, value, dtype = winreg.EnumValue(key, i) - if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: - # If it's a DWORD (32-bit integer), it's stored as unsigned - convert - # that to a proper signed integer - if value & (1 << 31): - value = value - (1 << 32) - elif dtype == winreg.REG_SZ: - # If it's a reference to the tzres DLL, load the actual string - if value.startswith('@tzres'): - tz_res = tz_res or tzres() - value = tz_res.name_from_string(value) - - value = value.rstrip('\x00') # Remove trailing nulls - - dout[key_name] = value - - return dout diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-1af20794.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-1af20794.css deleted file mode 100644 index 365e58d88e7f8e9c541e689f1fc99edd253df80e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-1af20794.css +++ /dev/null @@ -1 +0,0 @@ -.rangeSlider{--pip:var(--range-pip, lightslategray);--pip-text:var(--range-pip-text, var(--pip));--pip-active:var(--range-pip-active, darkslategrey);--pip-active-text:var(--range-pip-active-text, var(--pip-active));--pip-hover:var(--range-pip-hover, darkslategrey);--pip-hover-text:var(--range-pip-hover-text, var(--pip-hover));--pip-in-range:var(--range-pip-in-range, var(--pip-active));--pip-in-range-text:var(--range-pip-in-range-text, var(--pip-active-text))}.rangePips{position:absolute;height:1em;left:0;right:0;bottom:-1em}.rangePips.vertical{height:auto;width:1em;inset:0 auto 0 100%}.rangePips .pip{height:.4em;position:absolute;top:.25em;width:1px;white-space:nowrap}.rangePips.vertical .pip{height:1px;width:.4em;left:.25em;top:auto;bottom:auto}.rangePips .pipVal{position:absolute;top:.4em;transform:translate(-50%,25%)}.rangePips.vertical .pipVal{position:absolute;top:0;left:.4em;transform:translate(25%,-50%)}.rangePips .pip{transition:all .15s ease}.rangePips .pipVal{transition:all .15s ease,font-weight 0s linear}.rangePips .pip{color:#789;color:var(--pip-text);background-color:#789;background-color:var(--pip)}.rangePips .pip.selected{color:#2f4f4f;color:var(--pip-active-text);background-color:#2f4f4f;background-color:var(--pip-active)}.rangePips.hoverable:not(.disabled) .pip:hover{color:#2f4f4f;color:var(--pip-hover-text);background-color:#2f4f4f;background-color:var(--pip-hover)}.rangePips .pip.in-range{color:#2f4f4f;color:var(--pip-in-range-text);background-color:#2f4f4f;background-color:var(--pip-in-range)}.rangePips .pip.selected{height:.75em}.rangePips.vertical .pip.selected{height:1px;width:.75em}.rangePips .pip.selected .pipVal{font-weight:700;top:.75em}.rangePips.vertical .pip.selected .pipVal{top:0;left:.75em}.rangePips.hoverable:not(.disabled) .pip:not(.selected):hover{transition:none}.rangePips.hoverable:not(.disabled) .pip:not(.selected):hover .pipVal{transition:none;font-weight:700}.rangeSlider{--slider:var(--range-slider, #d7dada);--handle-inactive:var(--range-handle-inactive, #99a2a2);--handle:var(--range-handle, #838de7);--handle-focus:var(--range-handle-focus, #4a40d4);--handle-border:var(--range-handle-border, var(--handle));--range-inactive:var(--range-range-inactive, var(--handle-inactive));--range:var(--range-range, var(--handle-focus));--float-inactive:var(--range-float-inactive, var(--handle-inactive));--float:var(--range-float, var(--handle-focus));--float-text:var(--range-float-text, white)}.rangeSlider{position:relative;border-radius:100px;height:.5em;margin:1em;transition:opacity .2s ease;user-select:none}.rangeSlider *{user-select:none}.rangeSlider.pips{margin-bottom:1.8em}.rangeSlider.pip-labels{margin-bottom:2.8em}.rangeSlider.vertical{display:inline-block;border-radius:100px;width:.5em;min-height:200px}.rangeSlider.vertical.pips{margin-right:1.8em;margin-bottom:1em}.rangeSlider.vertical.pip-labels{margin-right:2.8em;margin-bottom:1em}.rangeSlider .rangeHandle{position:absolute;display:block;height:1.4em;width:1.4em;top:.25em;bottom:auto;transform:translateY(-50%) translate(-50%);z-index:2}.rangeSlider.reversed .rangeHandle{transform:translateY(-50%) translate(50%)}.rangeSlider.vertical .rangeHandle{left:.25em;top:auto;transform:translateY(50%) translate(-50%)}.rangeSlider.vertical.reversed .rangeHandle{transform:translateY(-50%) translate(-50%)}.rangeSlider .rangeNub,.rangeSlider .rangeHandle:before{position:absolute;left:0;top:0;display:block;border-radius:10em;height:100%;width:100%;transition:box-shadow .2s ease}.rangeSlider .rangeHandle:before{content:"";inset:1px;height:auto;width:auto;box-shadow:0 0 0 0 var(--handle-border);opacity:0}.rangeSlider.hoverable:not(.disabled) .rangeHandle:hover:before{box-shadow:0 0 0 8px var(--handle-border);opacity:.2}.rangeSlider.hoverable:not(.disabled) .rangeHandle.press:before,.rangeSlider.hoverable:not(.disabled) .rangeHandle.press:hover:before{box-shadow:0 0 0 12px var(--handle-border);opacity:.4}.rangeSlider.range:not(.min):not(.max) .rangeNub{border-radius:10em 10em 10em 1.6em}.rangeSlider.range .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(-135deg)}.rangeSlider.range .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(45deg)}.rangeSlider.range.reversed .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(45deg)}.rangeSlider.range.reversed .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(-135deg)}.rangeSlider.range.vertical .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(135deg)}.rangeSlider.range.vertical .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(-45deg)}.rangeSlider.range.vertical.reversed .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(-45deg)}.rangeSlider.range.vertical.reversed .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(135deg)}.rangeSlider .rangeFloat{display:block;position:absolute;left:50%;top:-.5em;transform:translate(-50%,-100%);font-size:1em;text-align:center;opacity:0;pointer-events:none;white-space:nowrap;transition:all .2s ease;font-size:.9em;padding:.2em .4em;border-radius:.2em}.rangeSlider .rangeHandle.active .rangeFloat,.rangeSlider.hoverable .rangeHandle:hover .rangeFloat{opacity:1;top:-.2em;transform:translate(-50%,-100%)}.rangeSlider .rangeBar{position:absolute;display:block;transition:background .2s ease;border-radius:1em;height:.5em;top:0;user-select:none;z-index:1}.rangeSlider.vertical .rangeBar{width:.5em;height:auto}.rangeSlider{background-color:#d7dada;background-color:var(--slider)}.rangeSlider .rangeBar{background-color:#99a2a2;background-color:var(--range-inactive)}.rangeSlider.focus .rangeBar{background-color:#838de7;background-color:var(--range)}.rangeSlider .rangeNub{background-color:#99a2a2;background-color:var(--handle-inactive)}.rangeSlider.focus .rangeNub{background-color:#838de7;background-color:var(--handle)}.rangeSlider .rangeHandle.active .rangeNub{background-color:#4a40d4;background-color:var(--handle-focus)}.rangeSlider .rangeFloat{color:#fff;color:var(--float-text);background-color:#99a2a2;background-color:var(--float-inactive)}.rangeSlider.focus .rangeFloat{background-color:#4a40d4;background-color:var(--float)}.rangeSlider.disabled{opacity:.5}.rangeSlider.disabled .rangeNub{background-color:#d7dada;background-color:var(--slider)}.mic-wrap.svelte-1thnwz{padding:var(--size-2)}.record-icon.svelte-1thnwz{display:flex;position:relative;margin-right:var(--size-2);width:6px;height:6px}.dot.svelte-1thnwz{display:inline-flex;position:relative;border-radius:var(--radius-full);background:var(--color-red-500);width:6px;height:6px}.pinger.svelte-1thnwz{display:inline-flex;position:absolute;opacity:.9;animation:svelte-1thnwz-ping 1s cubic-bezier(0,0,.2,1) infinite;border-radius:var(--radius-full);background:var(--color-red-500);width:var(--size-full);height:var(--size-full)}@keyframes svelte-1thnwz-ping{75%,to{transform:scale(2);opacity:0}}audio.svelte-1thnwz{padding:var(--size-2);width:var(--size-full);height:var(--size-14)}audio.svelte-pq78xp{padding:var(--size-2);width:var(--size-full);height:var(--size-14)}.icon-buttons.svelte-pq78xp{display:flex;position:absolute;top:6px;right:6px;gap:var(--size-1)} diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py deleted file mode 100644 index 5aecfeac112e53b2fc49278c1acaa95a6c0c7257..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from typing import List, Optional, Union - -import numpy as np -import PIL -from PIL import Image - -from ...utils import BaseOutput, is_torch_available, is_transformers_available - - -@dataclass -class SafetyConfig(object): - WEAK = { - "sld_warmup_steps": 15, - "sld_guidance_scale": 20, - "sld_threshold": 0.0, - "sld_momentum_scale": 0.0, - "sld_mom_beta": 0.0, - } - MEDIUM = { - "sld_warmup_steps": 10, - "sld_guidance_scale": 1000, - "sld_threshold": 0.01, - "sld_momentum_scale": 0.3, - "sld_mom_beta": 0.4, - } - STRONG = { - "sld_warmup_steps": 7, - "sld_guidance_scale": 2000, - "sld_threshold": 0.025, - "sld_momentum_scale": 0.5, - "sld_mom_beta": 0.7, - } - MAX = { - "sld_warmup_steps": 0, - "sld_guidance_scale": 5000, - "sld_threshold": 1.0, - "sld_momentum_scale": 0.5, - "sld_mom_beta": 0.7, - } - - -@dataclass -class StableDiffusionSafePipelineOutput(BaseOutput): - """ - Output class for Safe Stable Diffusion pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, - num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. - nsfw_content_detected (`List[bool]`) - List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, or `None` if safety checking could not be performed. - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work" - (nsfw) content, or `None` if no safety check was performed or no images were flagged. - applied_safety_concept (`str`) - The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled - """ - - images: Union[List[PIL.Image.Image], np.ndarray] - nsfw_content_detected: Optional[List[bool]] - unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]] - applied_safety_concept: Optional[str] - - -if is_transformers_available() and is_torch_available(): - from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe - from .safety_checker import SafeStableDiffusionSafetyChecker diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py b/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py deleted file mode 100644 index 1b517bdec5703495afeee26a1c8ed4cb98561d7c..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, logging, randn_tensor -from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -@dataclass -# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerAncestralDiscrete -class EulerAncestralDiscreteSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> torch.Tensor: - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): - """ - Ancestral sampling with Euler method steps. Based on the original k-diffusion implementation by Katherine Crowson: - https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L72 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - - """ - - _compatibles = [e.name for e in KarrasDiffusionSchedulers] - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - prediction_type: str = "epsilon", - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) - self.sigmas = torch.from_numpy(sigmas) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = self.sigmas.max() - - # setable values - self.num_inference_steps = None - timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() - self.timesteps = torch.from_numpy(timesteps) - self.is_scale_input_called = False - - def scale_model_input( - self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] - ) -> torch.FloatTensor: - """ - Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain - - Returns: - `torch.FloatTensor`: scaled input sample - """ - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - sample = sample / ((sigma**2 + 1) ** 0.5) - self.is_scale_input_called = True - return sample - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - device (`str` or `torch.device`, optional): - the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - self.num_inference_steps = num_inference_steps - - timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) - sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) - self.sigmas = torch.from_numpy(sigmas).to(device=device) - if str(device).startswith("mps"): - # mps does not support float64 - self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) - else: - self.timesteps = torch.from_numpy(timesteps).to(device=device) - - def step( - self, - model_output: torch.FloatTensor, - timestep: Union[float, torch.FloatTensor], - sample: torch.FloatTensor, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`float`): current timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - generator (`torch.Generator`, optional): Random number generator. - return_dict (`bool`): option for returning tuple rather than EulerAncestralDiscreteSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.EulerAncestralDiscreteSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.EulerAncestralDiscreteSchedulerOutput`] if `return_dict` is True, otherwise - a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - - if ( - isinstance(timestep, int) - or isinstance(timestep, torch.IntTensor) - or isinstance(timestep, torch.LongTensor) - ): - raise ValueError( - ( - "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" - " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" - " one of the `scheduler.timesteps` as a timestep." - ), - ) - - if not self.is_scale_input_called: - logger.warning( - "The `scale_model_input` function should be called before `step` to ensure correct denoising. " - "See `StableDiffusionPipeline` for a usage example." - ) - - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - - # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise - if self.config.prediction_type == "epsilon": - pred_original_sample = sample - sigma * model_output - elif self.config.prediction_type == "v_prediction": - # * c_out + input * c_skip - pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) - elif self.config.prediction_type == "sample": - raise NotImplementedError("prediction_type not implemented yet: sample") - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" - ) - - sigma_from = self.sigmas[step_index] - sigma_to = self.sigmas[step_index + 1] - sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5 - sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 - - # 2. Convert to an ODE derivative - derivative = (sample - pred_original_sample) / sigma - - dt = sigma_down - sigma - - prev_sample = sample + derivative * dt - - device = model_output.device - noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) - - prev_sample = prev_sample + noise * sigma_up - - if not return_dict: - return (prev_sample,) - - return EulerAncestralDiscreteSchedulerOutput( - prev_sample=prev_sample, pred_original_sample=pred_original_sample - ) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - self.sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - self.timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - self.timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - schedule_timesteps = self.timesteps - step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] - - sigma = self.sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - noisy_samples = original_samples + noise * sigma - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/declare-lab/tango/diffusers/tests/schedulers/test_scheduler_dpm_multi.py b/spaces/declare-lab/tango/diffusers/tests/schedulers/test_scheduler_dpm_multi.py deleted file mode 100644 index 295bbe882746793b09b196f054e392e22415d455..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/schedulers/test_scheduler_dpm_multi.py +++ /dev/null @@ -1,245 +0,0 @@ -import tempfile - -import torch - -from diffusers import ( - DEISMultistepScheduler, - DPMSolverMultistepScheduler, - DPMSolverSinglestepScheduler, - UniPCMultistepScheduler, -) - -from .test_schedulers import SchedulerCommonTest - - -class DPMSolverMultistepSchedulerTest(SchedulerCommonTest): - scheduler_classes = (DPMSolverMultistepScheduler,) - forward_default_kwargs = (("num_inference_steps", 25),) - - def get_scheduler_config(self, **kwargs): - config = { - "num_train_timesteps": 1000, - "beta_start": 0.0001, - "beta_end": 0.02, - "beta_schedule": "linear", - "solver_order": 2, - "prediction_type": "epsilon", - "thresholding": False, - "sample_max_value": 1.0, - "algorithm_type": "dpmsolver++", - "solver_type": "midpoint", - "lower_order_final": False, - } - - config.update(**kwargs) - return config - - def check_over_configs(self, time_step=0, **config): - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - sample = self.dummy_sample - residual = 0.1 * sample - dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - scheduler.set_timesteps(num_inference_steps) - # copy over dummy past residuals - scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler = scheduler_class.from_pretrained(tmpdirname) - new_scheduler.set_timesteps(num_inference_steps) - # copy over dummy past residuals - new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] - - output, new_output = sample, sample - for t in range(time_step, time_step + scheduler.config.solver_order + 1): - output = scheduler.step(residual, t, output, **kwargs).prev_sample - new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample - - assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def test_from_save_pretrained(self): - pass - - def check_over_forward(self, time_step=0, **forward_kwargs): - kwargs = dict(self.forward_default_kwargs) - num_inference_steps = kwargs.pop("num_inference_steps", None) - sample = self.dummy_sample - residual = 0.1 * sample - dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - scheduler.set_timesteps(num_inference_steps) - - # copy over dummy past residuals (must be after setting timesteps) - scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] - - with tempfile.TemporaryDirectory() as tmpdirname: - scheduler.save_config(tmpdirname) - new_scheduler = scheduler_class.from_pretrained(tmpdirname) - # copy over dummy past residuals - new_scheduler.set_timesteps(num_inference_steps) - - # copy over dummy past residual (must be after setting timesteps) - new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] - - output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample - new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample - - assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" - - def full_loop(self, scheduler=None, **config): - if scheduler is None: - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(**config) - scheduler = scheduler_class(**scheduler_config) - - num_inference_steps = 10 - model = self.dummy_model() - sample = self.dummy_sample_deter - scheduler.set_timesteps(num_inference_steps) - - for i, t in enumerate(scheduler.timesteps): - residual = model(sample, t) - sample = scheduler.step(residual, t, sample).prev_sample - - return sample - - def test_step_shape(self): - kwargs = dict(self.forward_default_kwargs) - - num_inference_steps = kwargs.pop("num_inference_steps", None) - - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - - sample = self.dummy_sample - residual = 0.1 * sample - - if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): - scheduler.set_timesteps(num_inference_steps) - elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): - kwargs["num_inference_steps"] = num_inference_steps - - # copy over dummy past residuals (must be done after set_timesteps) - dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] - scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] - - time_step_0 = scheduler.timesteps[5] - time_step_1 = scheduler.timesteps[6] - - output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample - output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample - - self.assertEqual(output_0.shape, sample.shape) - self.assertEqual(output_0.shape, output_1.shape) - - def test_timesteps(self): - for timesteps in [25, 50, 100, 999, 1000]: - self.check_over_configs(num_train_timesteps=timesteps) - - def test_thresholding(self): - self.check_over_configs(thresholding=False) - for order in [1, 2, 3]: - for solver_type in ["midpoint", "heun"]: - for threshold in [0.5, 1.0, 2.0]: - for prediction_type in ["epsilon", "sample"]: - self.check_over_configs( - thresholding=True, - prediction_type=prediction_type, - sample_max_value=threshold, - algorithm_type="dpmsolver++", - solver_order=order, - solver_type=solver_type, - ) - - def test_prediction_type(self): - for prediction_type in ["epsilon", "v_prediction"]: - self.check_over_configs(prediction_type=prediction_type) - - def test_solver_order_and_type(self): - for algorithm_type in ["dpmsolver", "dpmsolver++"]: - for solver_type in ["midpoint", "heun"]: - for order in [1, 2, 3]: - for prediction_type in ["epsilon", "sample"]: - self.check_over_configs( - solver_order=order, - solver_type=solver_type, - prediction_type=prediction_type, - algorithm_type=algorithm_type, - ) - sample = self.full_loop( - solver_order=order, - solver_type=solver_type, - prediction_type=prediction_type, - algorithm_type=algorithm_type, - ) - assert not torch.isnan(sample).any(), "Samples have nan numbers" - - def test_lower_order_final(self): - self.check_over_configs(lower_order_final=True) - self.check_over_configs(lower_order_final=False) - - def test_inference_steps(self): - for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: - self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) - - def test_full_loop_no_noise(self): - sample = self.full_loop() - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_mean.item() - 0.3301) < 1e-3 - - def test_full_loop_no_noise_thres(self): - sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_mean.item() - 0.6405) < 1e-3 - - def test_full_loop_with_v_prediction(self): - sample = self.full_loop(prediction_type="v_prediction") - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_mean.item() - 0.2251) < 1e-3 - - def test_switch(self): - # make sure that iterating over schedulers with same config names gives same results - # for defaults - scheduler = DPMSolverMultistepScheduler(**self.get_scheduler_config()) - sample = self.full_loop(scheduler=scheduler) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_mean.item() - 0.3301) < 1e-3 - - scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) - scheduler = UniPCMultistepScheduler.from_config(scheduler.config) - scheduler = DEISMultistepScheduler.from_config(scheduler.config) - scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) - - sample = self.full_loop(scheduler=scheduler) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_mean.item() - 0.3301) < 1e-3 - - def test_fp16_support(self): - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) - scheduler = scheduler_class(**scheduler_config) - - num_inference_steps = 10 - model = self.dummy_model() - sample = self.dummy_sample_deter.half() - scheduler.set_timesteps(num_inference_steps) - - for i, t in enumerate(scheduler.timesteps): - residual = model(sample, t) - sample = scheduler.step(residual, t, sample).prev_sample - - assert sample.dtype == torch.float16 diff --git a/spaces/deprem-ml/deprem-ocr/app.py b/spaces/deprem-ml/deprem-ocr/app.py deleted file mode 100644 index 34b234b2f7d95945778bca40b32bf4aac87a6019..0000000000000000000000000000000000000000 --- a/spaces/deprem-ml/deprem-ocr/app.py +++ /dev/null @@ -1,153 +0,0 @@ -from PIL import ImageFilter, Image -from easyocr import Reader -import gradio as gr -import numpy as np -import openai -import ast -from transformers import pipeline -import os - -from openai_api import OpenAI_API -import utils - -openai.api_key = os.getenv("API_KEY") -reader = Reader(["tr"]) - - -def get_text(input_img): - img = Image.fromarray(input_img) - detailed = np.asarray(img.filter(ImageFilter.DETAIL)) - result = reader.readtext(detailed, detail=0, paragraph=True) - return " ".join(result) - - -# Submit button -def get_parsed_address(input_img): - - address_full_text = get_text(input_img) - return ner_response(address_full_text) - - -def save_deta_db(input): - eval_result = ast.literal_eval(input) - utils.write_db(eval_result) - return - - -def update_component(): - return gr.update(value="Gönderildi, teşekkürler.", visible=True) - - -def clear_textbox(value): - return gr.update(value="") - - -def text_dict(input): - eval_result = ast.literal_eval(input) - return ( - str(eval_result["il"]), - str(eval_result["ilce"]), - str(eval_result["mahalle"]), - str(eval_result["sokak"]), - str(eval_result["Apartman/site"]), - str(eval_result["no"]), - str(eval_result["ad-soyad"]), - str(eval_result["dis kapi no"]), - ) - - -def ner_response(ocr_input): - - ner_pipe = pipeline("token-classification","deprem-ml/deprem-ner", aggregation_strategy="first") - predictions = ner_pipe(ocr_input) - resp = {} - - for item in predictions: - print(item) - key = item["entity_group"] - resp[key] = item["word"] - - resp["input"] = ocr_input - dict_keys = ["il", "ilce", "mahalle", "sokak", "Apartman/site", "no", "ad-soyad", "dis kapi no"] - for key in dict_keys: - if key not in resp.keys(): - resp[key] = "" - return resp - - -# User Interface -with gr.Blocks() as demo: - gr.Markdown( - """ - # Enkaz Bildirme Uygulaması - """ - ) - gr.Markdown( - "Bu uygulamada ekran görüntüsü sürükleyip bırakarak AFAD'a enkaz bildirimi yapabilirsiniz. Mesajı metin olarak da girebilirsiniz, tam adresi ayrıştırıp döndürür. API olarak kullanmak isterseniz sayfanın en altında use via api'ya tıklayın." - ) - with gr.Row(): - with gr.Column(): - img_area = gr.Image(label="Ekran Görüntüsü yükleyin 👇") - img_area_button = gr.Button(value="Görüntüyü İşle", label="Submit") - - with gr.Column(): - text_area = gr.Textbox(label="Metin yükleyin 👇 ", lines=8) - text_area_button = gr.Button(value="Metni Yükle", label="Submit") - - open_api_text = gr.Textbox(label="Tam Adres") - - with gr.Column(): - with gr.Row(): - il = gr.Textbox(label="İl", interactive=True, show_progress=False) - ilce = gr.Textbox(label="İlçe", interactive=True, show_progress=False) - with gr.Row(): - mahalle = gr.Textbox( - label="Mahalle", interactive=True, show_progress=False - ) - sokak = gr.Textbox( - label="Sokak/Cadde/Bulvar", interactive=True, show_progress=False - ) - with gr.Row(): - no = gr.Textbox(label="Telefon", interactive=True, show_progress=False) - with gr.Row(): - ad_soyad = gr.Textbox( - label="İsim Soyisim", interactive=True, show_progress=False - ) - apartman = gr.Textbox(label="apartman", interactive=True, show_progress=False) - with gr.Row(): - dis_kapi_no = gr.Textbox(label="Kapı No", interactive=True, show_progress=False) - - img_area_button.click( - get_parsed_address, - inputs=img_area, - outputs=open_api_text, - api_name="upload-image", - ) - - text_area_button.click( - ner_response, text_area, open_api_text, api_name="upload-text" - ) - - - open_api_text.change( - text_dict, - open_api_text, - [il, ilce, mahalle, sokak, no, apartman, ad_soyad, dis_kapi_no], - ) - ocr_button = gr.Button(value="Sadece OCR kullan") - ocr_button.click( - get_text, - inputs=img_area, - outputs=text_area, - api_name="get-ocr-output", - ) - submit_button = gr.Button(value="Veriyi Birimlere Yolla") - submit_button.click(save_deta_db, open_api_text) - done_text = gr.Textbox(label="Done", value="Not Done", visible=False) - submit_button.click(update_component, outputs=done_text) - for txt in [il, ilce, mahalle, sokak, apartman, no, ad_soyad, dis_kapi_no]: - submit_button.click(fn=clear_textbox, inputs=txt, outputs=txt) - - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Chegg Premium Account For Free.md b/spaces/diacanFperku/AutoGPT/Chegg Premium Account For Free.md deleted file mode 100644 index f935ad1edee8dd8a5365e337eaf3c6309d3554c7..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Chegg Premium Account For Free.md +++ /dev/null @@ -1,119 +0,0 @@ - -

    How to Get a Chegg Premium Account for Free and Enjoy Unlimited Benefits

    - -

    If you are a student who wants to save money on textbooks, homework help, online courses, and more, you might be interested in getting a Chegg premium account for free. Chegg is a popular online education platform that offers various services for students of all levels. With a Chegg premium account, you can access millions of textbooks for rent or purchase, get 24/7 expert assistance with your assignments, learn new skills with interactive courses, and find opportunities for internships and scholarships.

    - -

    However, a Chegg premium account is not cheap. It costs $14.95 per month for Chegg Study, $9.95 per month for Chegg Math Solver, $9.95 per month for Chegg Writing, and $19.95 per month for Chegg Study Pack. If you want to use all these features, you will have to pay a hefty sum every month. But don't worry, there are some ways to get a Chegg premium account for free or at a lower cost. In this article, we will show you how to do that.

    -

    chegg premium account for free


    Downloadhttps://gohhs.com/2uFUsq



    - -

    Method One: Use Chegg Trial Period

    - -

    One of the easiest ways to get a Chegg premium account for free is to use the trial period offered by Chegg. Chegg allows new users to try out their services for free for 7 days. During this period, you can access all the features of Chegg without paying anything. You just need to sign up with your email address and password, and provide your payment details. You can cancel your subscription anytime before the trial ends and you won't be charged.

    - -

    To sign up for the Chegg trial period, follow these steps:

    - -
      -
    1. Go to Chegg.com and click on "Try Chegg Study".
    2. -
    3. Choose the plan that suits your needs and click on "Start your free trial".
    4. -
    5. Enter your email address and password and click on "Create account".
    6. -
    7. Enter your payment details and click on "Start your free trial".
    8. -
    9. Enjoy your free access to Chegg premium features for 7 days.
    10. -
    - -

    Remember to cancel your subscription before the trial ends if you don't want to be charged. To cancel your subscription, follow these steps:

    -

    - -
      -
    1. Go to Chegg.com and log in to your account.
    2. -
    3. Click on your profile icon at the top right corner and select "My account".
    4. -
    5. Click on "Orders" and then on "Subscriptions".
    6. -
    7. Find the plan that you want to cancel and click on "Cancel subscription".
    8. -
    9. Confirm your cancellation and you're done.
    10. -
    - -

    Method Two: Use Free Chegg Accounts and Passwords

    - -

    Another way to get a Chegg premium account for free is to use free Chegg accounts and passwords that are available on the internet. There are many websites that provide working Chegg accounts and passwords for free. You can use these accounts to log in to Chegg and access the premium features without paying anything. However, you should be careful when using these accounts as they might not be safe or reliable. Some of them might be hacked or stolen from other users, and some of them might not work or be expired.

    - -

    To use free Chegg accounts and passwords, follow these steps:

    - -
      -
    1. Search for websites that provide free Chegg accounts and passwords on Google or other search engines.
    2. -
    3. Select a website that looks trustworthy and has positive reviews from other users.
    4. -
    5. Copy one of the Chegg accounts and passwords from the website.
    6. -
    7. Go to Chegg.com and click on "Sign in".
    8. -
    9. Paste the email address and password that you copied and click on "Sign in".
    10. -
    11. If the account works, you can access the premium features of Chegg for free.
    12. -
    - -

    If the account doesn't work or is already in use by someone else, you can try another one until you find one that works. However, you should not change the password or personal information of the account as it might belong to someone else. You should also not share the account with anyone else as it might get banned or suspended by Chegg.

    - -

    Method Three: Use Coupon Codes and Discounts

    - -

    A third way to get a Chegg premium account for free or at a lower cost is to use coupon codes and discounts that are offered by Chegg or other websites. Coupon codes are special codes that you can enter at checkout to get a discount on your purchase. Discounts are special offers that reduce the price of your purchase without requiring any code. You can find coupon codes and discounts for Chegg on various websites such as RetailMeNot, CouponCabin, Groupon, etc.

    - -

    To use coupon codes and discounts for Chegg, follow these steps:

    - -
      -
    1. Search for websites that provide coupon codes and discounts for Chegg on Google or other search engines.
    2. -
    3. Select a website that looks trustworthy and has positive reviews from other users.
    4. -
    5. Browse through the available coupon codes and discounts and choose one that suits your needs.
    6. -
    7. Copy the coupon code or click on the discount link.
    8. -
    9. Go to Chegg.com and select the plan that you want to purchase.
    10. -
    11. Paste the coupon code at checkout or apply the discount automatically if you clicked on the link.
    12. -
    13. Enjoy your reduced price or free access to Chegg premium features.
    14. -
    - -

    Note that some coupon codes and discounts might have expiration dates or terms and conditions that limit their usage. You should check these details before using them.

    - -

    Conclusion

    - -

    Chegg is a great online education platform that offers various services for students of all levels. However, a Chegg premium account can be expensive for some students who want to save money on their education expenses. Fortunately, there are some ways to get a Chegg premium account for free or at a lower cost by using the trial period, free accounts and passwords, or coupon codes and discounts. These methods can help you access all the features of Chegg without breaking your bank.

    - -

    We hope this article was helpful for you in getting a chegg premium account for free. If you have any questions or suggestions, feel free to leave them in the comments section below.

    -

    Method Four: Use Textsheet Alternatives

    - -

    A fourth way to get a Chegg premium account for free or at a lower cost is to use Textsheet alternatives. Textsheet was a popular website that provided free Chegg answers and solutions to students. However, it was shut down by Chegg due to copyright infringement. Since then, many other websites have emerged that offer similar services to Textsheet. These websites use Chegg API or other methods to scrape Chegg answers and solutions and provide them to users for free or at a nominal fee.

    - -

    To use Textsheet alternatives, follow these steps:

    - -
      -
    1. Search for websites that provide Textsheet alternatives on Google or other search engines.
    2. -
    3. Select a website that looks trustworthy and has positive reviews from other users.
    4. -
    5. Enter the Chegg question URL or paste the question text on the website.
    6. -
    7. Click on the submit button or press enter.
    8. -
    9. Wait for the website to fetch the Chegg answer or solution.
    10. -
    11. View the Chegg answer or solution for free or after paying a small fee.
    12. -
    - -

    Some of the popular Textsheet alternatives are Litanswers, Slader, CourseHero, Studylib, etc. However, you should be careful when using these websites as they might not be legal or safe. Some of them might contain malware or viruses, and some of them might violate Chegg's terms of service. You should also not rely on these websites for accurate or complete answers or solutions as they might be outdated or incorrect.

    - -

    Method Five: Use Online Forums and Communities

    - -

    A fifth way to get a Chegg premium account for free or at a lower cost is to use online forums and communities where students help each other with their homework and assignments. There are many online platforms where you can post your Chegg questions and get answers or solutions from other students who have Chegg accounts. You can also help other students with their questions and earn credits or rewards that you can use to get Chegg answers or solutions.

    - -

    To use online forums and communities, follow these steps:

    - -
      -
    1. Search for online platforms that provide homework help on Google or other search engines.
    2. -
    3. Select a platform that looks trustworthy and has positive reviews from other users.
    4. -
    5. Create an account on the platform using your email address and password.
    6. -
    7. Post your Chegg question on the platform and wait for other users to respond.
    8. -
    9. View the Chegg answer or solution provided by other users for free or after paying a small fee.
    10. -
    11. Help other users with their questions and earn credits or rewards that you can use to get more Chegg answers or solutions.
    12. -
    - -

    Some of the popular online platforms that provide homework help are Reddit, Quora, HomeworkMarket, Chegg Study Community, etc. However, you should be careful when using these platforms as they might not be legal or safe. Some of them might contain spam or scams, and some of them might violate Chegg's terms of service. You should also not rely on these platforms for accurate or complete answers or solutions as they might be outdated or incorrect.

    - -

    Conclusion

    - -

    Chegg is a great online education platform that offers various services for students of all levels. However, a Chegg premium account can be expensive for some students who want to save money on their education expenses. Fortunately, there are some ways to get a Chegg premium account for free or at a lower cost by using the trial period, free accounts and passwords, coupon codes and discounts, Textsheet alternatives, or online forums and communities. These methods can help you access all the features of Chegg without breaking your bank.

    - -

    We hope this article was helpful for you in getting a chegg premium account for free. If you have any questions or suggestions, feel free to leave them in the comments section below.

    -

    Conclusion

    - -

    Chegg is a great online education platform that offers various services for students of all levels. However, a Chegg premium account can be expensive for some students who want to save money on their education expenses. Fortunately, there are some ways to get a Chegg premium account for free or at a lower cost by using the trial period, free accounts and passwords, coupon codes and discounts, Textsheet alternatives, or online forums and communities. These methods can help you access all the features of Chegg without breaking your bank.

    - -

    We hope this article was helpful for you in getting a chegg premium account for free. If you have any questions or suggestions, feel free to leave them in the comments section below.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (movavi Video Converter 12 Clave De Activacion -softpile -kiber -moviedox -egydown.md b/spaces/diacanFperku/AutoGPT/HD Online Player (movavi Video Converter 12 Clave De Activacion -softpile -kiber -moviedox -egydown.md deleted file mode 100644 index 07b6c2de623ab662fb15982d23785d02abd8c290..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/HD Online Player (movavi Video Converter 12 Clave De Activacion -softpile -kiber -moviedox -egydown.md +++ /dev/null @@ -1,29 +0,0 @@ -
    -

    How to Use Movavi Video Converter 12 to Play HD Videos Online

    -

    Movavi Video Converter 12 is a powerful software that allows you to convert videos to various formats, including HD. With Movavi Video Converter 12, you can also play HD videos online using the built-in HD Online Player. In this article, we will show you how to use Movavi Video Converter 12 to play HD videos online and how to activate the software with a valid clave de activacion (activation key).

    -

    Step 1: Download and Install Movavi Video Converter 12

    -

    To use Movavi Video Converter 12, you need to download and install the software on your computer. You can download Movavi Video Converter 12 from the official website[^1^] or from other trusted sources. The installation process is simple and fast. Just follow the instructions on the screen and choose the language and destination folder for the program.

    -

    HD Online Player (movavi video converter 12 clave de activacion -softpile -kiber -moviedox -egydown


    Download Filehttps://gohhs.com/2uFUme



    -

    Step 2: Add Videos to Movavi Video Converter 12

    -

    After installing Movavi Video Converter 12, launch the program and click on the Add Media button. Then, choose Add Video and browse your computer for the videos you want to convert and play online. You can add multiple videos at once and preview them in the built-in player.

    -

    Step 3: Choose Output Format and Quality

    -

    Next, you need to choose the output format and quality for your videos. Click on the Output format button and select one of the online video formats, such as MP4, FLV, WebM, or AVI. You can also choose a preset for a specific device or platform, such as YouTube, Facebook, iPhone, or Android. To adjust the quality settings, click on the cogwheel icon next to the output format and choose one of the options: Low Quality, Economy, Standard, High Quality, or Original Size.

    -

    Step 4: Activate Movavi Video Converter 12 with Clave de Activacion

    -

    Before you can use Movavi Video Converter 12 to play HD videos online, you need to activate the software with a valid clave de activacion (activation key). A clave de activacion is a special code that unlocks all the features of Movavi Video Converter 12. To get a clave de activacion, you need to purchase a license from the official website[^1^] or from other authorized sellers. You can also try a free trial version of Movavi Video Converter 12 for 7 days[^1^]. To activate Movavi Video Converter 12 with a clave de activacion, follow these steps:

    -
      -
    • Click on the Menu button in the upper right corner of the program window and select Activate Software.
    • -
    • Enter your email address and your clave de activacion in the corresponding fields.
    • -
    • Click on Activate.
    • -
    • Restart Movavi Video Converter 12.
    • -
    -

    Note: Do not use illegal or pirated claves de activacion that are posted on some websites. These claves de activacion may not work properly or may cause problems with your computer. They may also violate the law and expose you to legal risks[^1^]. Always use legal and official claves de activacion from Movavi or its partners.

    -

    Step 5: Play HD Videos Online with HD Online Player

    -

    Now that you have activated Movavi Video Converter 12 with a clave de activacion, you can use it to play HD videos online with HD Online Player. HD Online Player is a feature of Movavi Video Converter 12 that allows you to stream your converted videos directly from your computer to any web browser. To use HD Online Player, follow these steps:

    -

    -
      -
    • Click on the Share button in the lower right corner of the program window and select Upload Online.
    • -
    • Select HD Online Player as the destination for your videos.
    • -
    • Click on Start.
    • -
    • A new window will open with a link to your online video player. Copy this link and paste it into any web browser on any device. d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Mcd001.ps2 WWE Smackdown - Here Comes The Pain! (PCSX2 Memory Card File For PlayStation 2) [SAVED GAME STATE - EVERYTHINK UNLOCKED].rar.md b/spaces/diacanFperku/AutoGPT/Mcd001.ps2 WWE Smackdown - Here Comes The Pain! (PCSX2 Memory Card File For PlayStation 2) [SAVED GAME STATE - EVERYTHINK UNLOCKED].rar.md deleted file mode 100644 index bf156c23c8ce83bba093059f19ee7b054087506b..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Mcd001.ps2 WWE Smackdown - Here Comes The Pain! (PCSX2 Memory Card File For PlayStation 2) [SAVED GAME STATE - EVERYTHINK UNLOCKED].rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Mcd001.ps2 WWE Smackdown - Here Comes The Pain! (PCSX2 Memory Card File for PlayStation 2) [SAVED GAME STATE - EVERYTHINK UNLOCKED].rar


      Download Filehttps://gohhs.com/2uFTY3



      -
      -- here's my new game data! [game state - no cheat codes!].zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - my game data (no in-game cheat codes and memory cheat codes).zip - my game data (no in-game cheat codes and memory cheat codes).rar - 4fefd39f24
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/Psychology From Inquiry To Understanding (4th Edition) Books.pdf VERIFIED.md b/spaces/diacanFperku/AutoGPT/Psychology From Inquiry To Understanding (4th Edition) Books.pdf VERIFIED.md deleted file mode 100644 index e2d8ac28cdd41d82c21f35f50c51d846efef2f35..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Psychology From Inquiry To Understanding (4th Edition) Books.pdf VERIFIED.md +++ /dev/null @@ -1,62 +0,0 @@ -

      Psychology: From Inquiry To Understanding (4th Edition) Books.pdf


      Download Filehttps://gohhs.com/2uFUci



      -
      -psychology: from inquiry to understanding (4th edition) pdf. - -Download MPA in Psychology Psychology Download PDF Ebook. Bachelor of Science in Psychology: Graduate Handbook (a comprehensive guide to becoming a graduate of the University of California-San Diego). Enter the world of psychology through the use of these links and videos to learn more about the different disciplines in this field. - -Psychology: download pdf psychology: from inquiry to understanding (4th edition) psychology: from inquiry to understanding (4th edition) pdf. September 3, 2013. The Psychology Association (PSYCH-UK) is the national organisation of the UK profession of psychology.{ - - "name": "Pylons", - - "version": "0.2.4", - - "desc": "Pylons is an MVC framework with unit testing and embedded logging", - - "main": "src/pylons.js", - - "scripts": - - "test": "make test" - - , - - "repository": { - - "type": "git", - - "url": "git://github.com/Pylons/Pylons.git" - - "dependencies": { - - "Django": "0.4.3", - - "Django-Pelican": "0.1.3", - - "Django-Pylons": "0.4.3", - - "DjangoTemplates": "0.3.5", - - "Django-Pagination": "0.4.1", - - "DjangoSession": "0.3.1", - - "DynamicWidgets": "0.2.4", - - "Django-Simple-Form": "0.3.1", - - "Django-Filebrowser": "0.2.0", - - "Django-Html-Table": "0.4.0", - - "FormToolkit": "1.1.2", - - "repoze.who": "0.2", - - "SimpleHTTPServer": "0.5.0", - - "Sortable": "0.1.3", - - "email": "0.2.6", 4fefd39f24
      -
      -
      -

      diff --git a/spaces/digitalxingtong/Azuma-Bert-VITS2/short_audio_transcribe.py b/spaces/digitalxingtong/Azuma-Bert-VITS2/short_audio_transcribe.py deleted file mode 100644 index f1e8b30671f2c2f2fa3c93feb1f4edd3fbe2f545..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Azuma-Bert-VITS2/short_audio_transcribe.py +++ /dev/null @@ -1,122 +0,0 @@ -import whisper -import os -import json -import torchaudio -import argparse -import torch - -lang2token = { - 'zh': "[ZH]", - 'ja': "[JA]", - "en": "[EN]", - } -def transcribe_one(audio_path): - # load audio and pad/trim it to fit 30 seconds - audio = whisper.load_audio(audio_path) - audio = whisper.pad_or_trim(audio) - - # make log-Mel spectrogram and move to the same device as the model - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - # detect the spoken language - _, probs = model.detect_language(mel) - print(f"Detected language: {max(probs, key=probs.get)}") - lang = max(probs, key=probs.get) - # decode the audio - options = whisper.DecodingOptions(beam_size=5) - result = whisper.decode(model, mel, options) - - # print the recognized text - print(result.text) - return lang, result.text -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--languages", default="CJE") - parser.add_argument("--whisper_size", default="medium") - args = parser.parse_args() - if args.languages == "CJE": - lang2token = { - 'zh': "[ZH]", - 'ja': "[JA]", - "en": "[EN]", - } - elif args.languages == "CJ": - lang2token = { - 'zh': "[ZH]", - 'ja': "[JA]", - } - elif args.languages == "C": - lang2token = { - 'zh': "[ZH]", - } - assert (torch.cuda.is_available()), "Please enable GPU in order to run Whisper!" - model = whisper.load_model(args.whisper_size) - parent_dir = "./custom_character_voice/" - speaker_names = list(os.walk(parent_dir))[0][1] - speaker_annos = [] - total_files = sum([len(files) for r, d, files in os.walk(parent_dir)]) - # resample audios - # 2023/4/21: Get the target sampling rate - with open("./configs/config.json", 'r', encoding='utf-8') as f: - hps = json.load(f) - target_sr = hps['data']['sampling_rate'] - processed_files = 0 - for speaker in speaker_names: - for i, wavfile in enumerate(list(os.walk(parent_dir + speaker))[0][2]): - # try to load file as audio - if wavfile.startswith("processed_"): - continue - try: - wav, sr = torchaudio.load(parent_dir + speaker + "/" + wavfile, frame_offset=0, num_frames=-1, normalize=True, - channels_first=True) - wav = wav.mean(dim=0).unsqueeze(0) - if sr != target_sr: - wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr)(wav) - if wav.shape[1] / sr > 20: - print(f"{wavfile} too long, ignoring\n") - save_path = parent_dir + speaker + "/" + f"processed_{i}.wav" - torchaudio.save(save_path, wav, target_sr, channels_first=True) - # transcribe text - lang, text = transcribe_one(save_path) - if lang not in list(lang2token.keys()): - print(f"{lang} not supported, ignoring\n") - continue - text = "ZH|" + text + "\n"# - #text = lang2token[lang] + text + lang2token[lang] + "\n" - speaker_annos.append(save_path + "|" + speaker + "|" + text) - - processed_files += 1 - print(f"Processed: {processed_files}/{total_files}") - except: - continue - - # # clean annotation - # import argparse - # import text - # from utils import load_filepaths_and_text - # for i, line in enumerate(speaker_annos): - # path, sid, txt = line.split("|") - # cleaned_text = text._clean_text(txt, ["cjke_cleaners2"]) - # cleaned_text += "\n" if not cleaned_text.endswith("\n") else "" - # speaker_annos[i] = path + "|" + sid + "|" + cleaned_text - # write into annotation - if len(speaker_annos) == 0: - print("Warning: no short audios found, this IS expected if you have only uploaded long audios, videos or video links.") - print("this IS NOT expected if you have uploaded a zip file of short audios. Please check your file structure or make sure your audio language is supported.") - with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f: - for line in speaker_annos: - f.write(line) - - # import json - # # generate new config - # with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f: - # hps = json.load(f) - # # modify n_speakers - # hps['data']["n_speakers"] = 1000 + len(speaker2id) - # # add speaker names - # for speaker in speaker_names: - # hps['speakers'][speaker] = speaker2id[speaker] - # # save modified config - # with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f: - # json.dump(hps, f, indent=2) - # print("finished") diff --git a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/text/english_bert_mock.py b/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/digitalxingtong/Xingtong-2dall-Bert-VITS2/train_ms.py b/spaces/digitalxingtong/Xingtong-2dall-Bert-VITS2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-2dall-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/diivien/Music-Popularity-Prediction/README.md b/spaces/diivien/Music-Popularity-Prediction/README.md deleted file mode 100644 index 5b795991c5272fd1a14c9e363f48f86077b0232c..0000000000000000000000000000000000000000 --- a/spaces/diivien/Music-Popularity-Prediction/README.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Music Popularity Prediction -emoji: 🚀 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.28.1 -app_file: app.py -pinned: false -python_version: 3.10.6 ---- - -# Music Popularity Prediction - -This repository contains a data science project that aims to predict the popularity of music using machine learning techniques. - -Check out the demo [here](https://huggingface.co/spaces/diivien/Music-Popularity-Prediction)! - -## Dataset - -This project uses the [Spotify Tracks Dataset](https://www.kaggle.com/datasets/maharshipandya/-spotify-tracks-dataset) available on Kaggle. This dataset contains information about Spotify tracks over a range of 125 different genres. Each track has several audio features associated with it, such as popularity, explicitness, danceability, energy, key, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, and time signature. - -You can download the dataset from the Kaggle website and use it to follow along with the analysis in this project. - -## Overview - -This repository contains a data science project that aims to predict the popularity of music using machine learning techniques. The project is a binary classification problem where the goal is to predict whether a song will be popular or not. The dataset used in this project is imbalanced, meaning that one class is significantly more common than the other. - -The project consists of three main parts: Data Cleaning, Exploratory Data Analysis, and Model Building. - -### Data Cleaning - -In the [Data Cleaning](https://github.com/diivien/Music-Popularity-Prediction/blob/master/Data%20Cleaning.ipynb) notebook, I clean and preprocess the data to prepare it for analysis. This involves several steps such as: - -- Removing unique columns -- Dropping null values -- Dropping duplicated rows (same artists and track name) -- Dropping artists and track name columns -- Dropping invalid tempo and time signature according to Spotify API -- Saving the cleaned dataset into a CSV file - -To get started with the data cleaning process, you can follow the instructions in the Data Cleaning notebook. This will guide you through the steps involved in cleaning and preprocessing the data. - -### Exploratory Data Analysis - -In the [Exploratory Data Analysis](https://github.com/diivien/Music-Popularity-Prediction/blob/master/Exploratory%20Data%20Analysis.ipynb) notebook, we explore the data and gain insights into the relationships between the features and the target variable. This involves generating various visualizations such as: - -- Correlation heatmaps to examine the relationships between pairs of continuous features -- Histograms to check the distribution of continuous features -- Bar charts to visualize categorical features -- Scatter plots to examine the relationships between pairs of continuous features -- Box plots to examine the distribution of continuous features by category -- Stacked bar charts to visualize conditional distributions - -These visualizations help us understand the data better and inform our decisions when building machine learning models. - -To get started with the exploratory data analysis process, you can follow the instructions in the Exploratory Data Analysis notebook. This will guide you through the steps involved in exploring and visualizing the data. - -### Model Building - -In the [Model Building](https://github.com/diivien/Music-Popularity-Prediction/blob/master/Model%20Building.ipynb) notebook, I build and evaluate machine learning models to predict music popularity. The models used in this analysis include Linear SVC, Random Forest Classifier, LightGBM, and CatBoost. As part of this process, I perform several preprocessing steps such as scaling the data using a MinMax scaler and encoding categorical variables using a target encoder. I also use SMOTE-NC in an imbalanced-learn pipeline to prevent data leakage. - -To tune the hyperparameters of our models, I use Optuna for multi-objective optimization and generate a Pareto front plot to determine the best hyperparameters. - -To evaluate the performance of our models, I use several metrics that are appropriate for imbalanced datasets, such as F1 score, balanced accuracy, and PR AUC. - -To get started with the model building process, you can follow the instructions in the Model Building notebook. This will guide you through the steps involved in building and evaluating machine learning models to predict music popularity. - -## Future Work - -I am currently working on several improvements and extensions to this project. Some include: - -- Testing a neural network classifier to see if it can improve the accuracy of our predictions -- Deploying an app on Gradio to make it easier for users to interact with our models and make predictions - - -## Citations - -If you use any of the following libraries in your project, please cite them as follows: - -- imbalanced-learn: Lemaître, G., Nogueira, F., & Aridas, C. K. (2017). Imbalanced-learn: A Python Toolbox to Tackle the Curse of Imbalanced Datasets in Machine Learning. Journal of Machine Learning Research, 18(17), 1-5. -- Matplotlib: Hunter, J. D. (2007). Matplotlib: A 2D graphics environment. Computing in Science & Engineering, 9(3), 90-95. -- Seaborn: Waskom, M., Botvinnik, O., O’Kane, D., Hobson, P., Lukauskas, S., Gemperline, D. C., ... & de Ruiter, J. (2021). seaborn: statistical data visualization. Journal of Open Source Software, 6(60), 3021. -- Joblib: Buitinck, L., Louppe, G., Blondel, M., Pedregosa, F., Mueller, A., Grisel, O., ... & Duchesnay, E. (2013). API design for machine learning software: experiences from the scikit-learn project. arXiv preprint arXiv:1309.0238. -- Feature-engine: Sole-Ribalta A. (2020) Feature-engine: A Python Package for Feature Engineering and Preprocessing in Machine Learning. In: Martínez-Villaseñor L., Batyrshin I., Mendoza O., Kuri-Morales Á. (eds) Advances in Artificial Intelligence - IBERAMIA 2020. IBERAMIA 2020. Lecture Notes in Computer Science, vol 12422. Springer, Cham. -- LightGBM: Ke, G., Meng, Q., Finley, T., Wang, T., Chen, W., Ma, W., ... & Liu, T. Y. (2017). LightGBM: A highly efficient gradient boosting decision tree. Advances in Neural Information Processing Systems. -- CatBoost: Prokhorenkova L.O., Gusev G.L., Vorobev A.V., Dorogush A.V., Gulin A.A.(2018). CatBoost: unbiased boosting with categorical features. Advances in Neural Information Processing Systems. -- Category Encoders: Micci-Barreca D (2001) A preprocessing scheme for high-cardinality categorical attributes in classification and prediction problems. ACM SIGKDD Explorations Newsletter 3(1):27–32 -- NumPy: Harris CR et al.(2020) Array programming with NumPy. Nature 585(7825):357–362 -- SDV (Synthetic Data Vault): Patki N et al.(2016) The Synthetic Data Vault. IEEE International Conference on Data Science and Advanced Analytics -- Optuna: Akiba T et al.(2019) Optuna: A Next-generation Hyperparameter Optimization Framework. Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining -- PyTorch: Paszke A et al.(2019) PyTorch: An Imperative Style High-performance Deep Learning Library. Advances in Neural Information Processing Systems -- SciKeras: Varma P et al.(2020) SciKeras: a high-level Scikit-Learn compatible API for TensorFlow's Keras module diff --git a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/docs/Low-VRAM-guide.md b/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/docs/Low-VRAM-guide.md deleted file mode 100644 index 1dc86f9c7f764a886c454f7f76a2a89a77140655..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/docs/Low-VRAM-guide.md +++ /dev/null @@ -1,51 +0,0 @@ -If you GPU is not large enough to fit a model, try these in the following order: - -### Load the model in 8-bit mode - -``` -python server.py --load-in-8bit -``` - -This reduces the memory usage by half with no noticeable loss in quality. Only newer GPUs support 8-bit mode. - -### Split the model across your GPU and CPU - -``` -python server.py --auto-devices -``` - -If you can load the model with this command but it runs out of memory when you try to generate text, try increasingly limiting the amount of memory allocated to the GPU until the error stops happening: - -``` -python server.py --auto-devices --gpu-memory 10 -python server.py --auto-devices --gpu-memory 9 -python server.py --auto-devices --gpu-memory 8 -... -``` - -where the number is in GiB. - -For finer control, you can also specify the unit in MiB explicitly: - -``` -python server.py --auto-devices --gpu-memory 8722MiB -python server.py --auto-devices --gpu-memory 4725MiB -python server.py --auto-devices --gpu-memory 3500MiB -... -``` - -Additionally, you can also set the `--no-cache` value to reduce the GPU usage while generating text at a performance cost. This may allow you to set a higher value for `--gpu-memory`, resulting in a net performance gain. - -### Send layers to a disk cache - -As a desperate last measure, you can split the model across your GPU, CPU, and disk: - -``` -python server.py --auto-devices --disk -``` - -With this, I am able to load a 30b model into my RTX 3090, but it takes 10 seconds to generate 1 word. - -### DeepSpeed (experimental) - -An experimental alternative to all of the above is to use DeepSpeed: [guide](DeepSpeed.md). diff --git a/spaces/dragonSwing/video2slide/post_process.py b/spaces/dragonSwing/video2slide/post_process.py deleted file mode 100644 index 3d841a5347dff4b2a931f354bbea76533488d96b..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/video2slide/post_process.py +++ /dev/null @@ -1,77 +0,0 @@ -import imagehash -import os -from collections import deque -from PIL import Image -from tqdm import tqdm - - -def find_similar_images( - base_dir, hash_size=8, hashfunc=imagehash.dhash, queue_len=5, threshold=4 -): - snapshots_files = sorted(os.listdir(base_dir)) - - hash_dict = {} - hash_queue = deque([], maxlen=queue_len) - duplicates = [] - num_duplicates = 0 - - print("---" * 5, "Finding similar files", "---" * 5) - - with tqdm(snapshots_files) as t: - for file in t: - read_file = Image.open(os.path.join(base_dir, file)) - comp_hash = hashfunc(read_file, hash_size=hash_size) - duplicate = False - - if comp_hash not in hash_dict: - hash_dict[comp_hash] = file - # Compare with hash queue to find out potential duplicates - for img_hash in hash_queue: - if img_hash - comp_hash <= threshold: - duplicate = True - break - - if not duplicate: - hash_queue.append(comp_hash) - else: - duplicate = True - - if duplicate: - duplicates.append(file) - num_duplicates += 1 - t.set_postfix_str(f"Duplicate files: {num_duplicates}") - - return hash_dict, duplicates - - -def remove_duplicates( - base_dir, hash_size=8, hashfunc=imagehash.dhash, queue_len=5, threshold=4 -): - _, duplicates = find_similar_images( - base_dir, - hash_size=hash_size, - hashfunc=hashfunc, - queue_len=queue_len, - threshold=threshold, - ) - - if not len(duplicates): - print("No duplicates found!") - else: - print("Removing duplicates...") - - for dup_file in duplicates: - file_path = os.path.join(base_dir, dup_file) - - if os.path.exists(file_path): - os.remove(file_path) - else: - print("Filepath: ", file_path, "does not exists.") - - print("All duplicates removed!") - - print("***" * 10, "\n") - - -if __name__ == "__main__": - remove_duplicates("sample_1") diff --git a/spaces/ehcalabres/EMOVoice/README.md b/spaces/ehcalabres/EMOVoice/README.md deleted file mode 100644 index 41d1d133d8c95ae2896b4b4d0d88164e9ae8daf9..0000000000000000000000000000000000000000 --- a/spaces/ehcalabres/EMOVoice/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: EMOVoice -emoji: 😍 -colorFrom: green -colorTo: red -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/ehristoforu/runwayml-stable-diffusion-v1-5k/README.md b/spaces/ehristoforu/runwayml-stable-diffusion-v1-5k/README.md deleted file mode 100644 index 2de2727dd53656c2399112b35c9f5733e9478b07..0000000000000000000000000000000000000000 --- a/spaces/ehristoforu/runwayml-stable-diffusion-v1-5k/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5k -emoji: 🌍 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/emc348/faces-through-time/models/e4e/stylegan2/op/__init__.py b/spaces/emc348/faces-through-time/models/e4e/stylegan2/op/__init__.py deleted file mode 100644 index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000 --- a/spaces/emc348/faces-through-time/models/e4e/stylegan2/op/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .fused_act import FusedLeakyReLU, fused_leaky_relu -from .upfirdn2d import upfirdn2d diff --git a/spaces/erastorgueva-nv/NeMo-Forced-Aligner/utils/constants.py b/spaces/erastorgueva-nv/NeMo-Forced-Aligner/utils/constants.py deleted file mode 100644 index 51ce934be4793a33b907f2791bb60c04276d922c..0000000000000000000000000000000000000000 --- a/spaces/erastorgueva-nv/NeMo-Forced-Aligner/utils/constants.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -BLANK_TOKEN = "" - -SPACE_TOKEN = "" - -V_NEGATIVE_NUM = -3.4e38 # this is just above the most negative number in torch.float32 diff --git a/spaces/exnav29/Real_Estate_Bot/README.md b/spaces/exnav29/Real_Estate_Bot/README.md deleted file mode 100644 index 2970f398a88ee97b4da03c3ce0b56b526d99a439..0000000000000000000000000000000000000000 --- a/spaces/exnav29/Real_Estate_Bot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Real Estate Bot -emoji: 📈 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: cc-by-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/falterWliame/Face_Mask_Detection/HACK DVDFab 13.0.9.7 (x64) !!BETTER!! Crack.md b/spaces/falterWliame/Face_Mask_Detection/HACK DVDFab 13.0.9.7 (x64) !!BETTER!! Crack.md deleted file mode 100644 index a51ad74bb001375a832471cc95563d2ec302aefb..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/HACK DVDFab 13.0.9.7 (x64) !!BETTER!! Crack.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      dvdfab crack is also a perfect solution for copying discs. it provides you the option to choose the dvd source and the output files for each and every disc. you can also get the configuration and operation data of the movies as well as subtitle information. in addition, it offers you the choice to edit the files after conversion. it additionally permits you to generate the subtitles from the converted data if they are not available.

      -

      HACK DVDFab 13.0.9.7 (x64) Crack


      Download File ★★★★★ https://urlca.com/2uDdq5



      -

      you can also use dvdfab torrent a free dvd encryption software that offers you one of the best dvd ripping and conversion results. it offers you some new features such as "fastest converting speed, rip with multi-core cpu and nvidia cuda technology. with a few clicks, you can easily customize the output files. it can also convert blu-ray discs. you can also use the free trial to test dvdfab.

      -

      my response: technically, dvdfab is in the same boat as anydvd; however, dvdfab is a lot cheaper (and, therefore, a lot more attractive). and, although anydvd can do everything that dvdfab can do, dvdfab (and its other available utilities) can do all of that and much, much more. plus, it can decrypt and rip blu-ray discs, which anydvd cannot. if i were a lawyer and dvdfab were to sue anydvd, i would be going after the lawyers who wrote the anydvd code, not the developers of dvdfab.

      -

      my response: if dvdfab has a weakness, it is the fact that it is a bit limited in its features. dvdfab does have a feature set that is quite limited, which is why i keep saying that the developers do need to make sure that they do not start copying the features of their competitors; since, if they do that, then they will be on the hook for any lawsuits that are filed against them. however, dvdfab does the basic job that it was designed to do, and it does it very well.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Real Gangster Crime and Rule the Streets of New Vegas.md b/spaces/fatiXbelha/sd/Download Real Gangster Crime and Rule the Streets of New Vegas.md deleted file mode 100644 index 3dfe87435aed1024c8bcbbf21cb48a7d70b3e01b..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Real Gangster Crime and Rule the Streets of New Vegas.md +++ /dev/null @@ -1,112 +0,0 @@ - -

      How to Download Real Gangster Crime Dinheiro Infinito

      -

      If you are a fan of action, adventure, and simulation games, you might have heard of Real Gangster Crime, a popular game developed by Naxeex Studio. In this game, you can experience the thrill of being a gangster in a crime-ridden city, where you can fight, steal, drive, and explore. But what if you want to have unlimited money and resources in the game, without having to spend real money or complete difficult missions? That's where dinheiro infinito comes in. Dinheiro infinito is a Portuguese term that means infinite money, and it refers to a modded version of the game that gives you access to unlimited cash, weapons, vehicles, clothes, and more. In this article, we will show you how to download real gangster crime dinheiro infinito for Android and PC devices, so you can enjoy the game to the fullest.

      -

      What is Real Gangster Crime?

      -

      Real Gangster Crime is an action game that combines elements of adventure and simulation in one grand theft game. It's perfect for fans of police chases, gang wars, crime simulators, and realistic games. In this game, you play as a real gangster who is trying to climb the ranks of the criminal underworld and become the kingpin. You can engage in various missions and quests, from stealing cars to participating in high-speed police chases to heists and robberies. As you progress through the game, you can unlock new and more difficult missions, which will require you to use all your skills and resources to succeed. You can also explore the open world of the city at your own pace, look for hidden collectibles and easter eggs, and discover new locations and activities. You can drive cool cars and motorcycles, confront enemies with a giant assault robot, or even fly a helicopter. You can also customize your character with a variety of clothing, shoes, hats, masks, and other accessories. You can choose from a wide range of weapons, from pistols to rocket launchers and from laser rifles to a strong steel suit. Real Gangster Crime is a free action game that offers you a realistic and immersive gangster simulator experience.

      -

      download real gangster crime dinheiro infinito


      Download File ✏ ✏ ✏ https://urllie.com/2uNE92



      -

      Features of the game

      -
        -
      • Realistic gameplay and graphics
      • -
      • Open world environment with many locations and activities
      • -
      • Various missions and quests with different objectives and rewards
      • -
      • Huge arsenal of weapons and vehicles
      • -
      • Character customization options
      • -
      • Offline mode available
      • -
      -

      Why download dinheiro infinito?

      -

      While Real Gangster Crime is a fun and exciting game, it can also be frustrating not having enough money to purchase the weapons, vehicles, clothes, and other gear you need. You might also find some missions too hard or too boring to complete. That's why some players prefer to download dinheiro infinito, which is a modded version of the game that gives you unlimited money and resources. With dinheiro infinito, you can buy anything you want in the game without worrying about running out of cash. You can also unlock all the missions and quests without having to complete them. You can enjoy the game without any limitations or restrictions.

      -

      How to download dinheiro infinito for Android

      -

      If you want to download real gangster crime dinheiro infinito for your Android device, you will need to follow these steps:

      -

      Step 1: Find a reliable source

      -

      The first thing you need to do is find a reliable source

      The first thing you need to do is find a reliable source that offers the download link for the dinheiro infinito APK file. You can search online for websites or blogs that provide the link, but be careful not to download from untrustworthy or malicious sources that might harm your device or steal your data. You can also check the reviews and ratings of other users who have downloaded the file before you. One of the sources that we recommend is [this website], which has a high reputation and positive feedback from users.

      -

      Step 2: Enable unknown sources

      -

      Before you can install the dinheiro infinito APK file on your Android device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and toggle it on. You might see a warning message that says installing apps from unknown sources can be risky, but don't worry, as long as you download from a reliable source, you should be fine.

      -

      Step 3: Install the APK file

      -

      Once you have enabled unknown sources, you can proceed to install the dinheiro infinito APK file on your device. To do this, go to the download folder where you saved the file, and tap on it. You might see a pop-up window that asks for your permission to install the app, just tap on install and wait for the process to finish. You might also see another pop-up window that asks if you want to open the app or done, just tap on done for now.

      -

      Step 4: Enjoy unlimited money and resources

      -

      Now that you have installed the dinheiro infinito APK file on your device, you can enjoy unlimited money and resources in Real Gangster Crime. To do this, just open the app and start playing. You will see that you have unlimited cash in your account, and you can buy anything you want in the game. You can also unlock all the missions and quests without having to complete them. You can have fun with the gangster life without any limitations or restrictions.

      -

      real gangster crime mod apk dinheiro infinito
      -como baixar real gangster crime com dinheiro infinito
      -real gangster crime hack dinheiro infinito download
      -real gangster crime atualizado dinheiro infinito
      -download real gangster crime dinheiro infinito 2023
      -real gangster crime apk mod dinheiro infinito 2023
      -real gangster crime dinheiro infinito e diamantes
      -real gangster crime jogo de mundo aberto dinheiro infinito
      -real gangster crime versão antiga dinheiro infinito
      -real gangster crime download grátis dinheiro infinito
      -real gangster crime jogo online dinheiro infinito
      -real gangster crime para pc dinheiro infinito
      -real gangster crime 2 dinheiro infinito download
      -real gangster crime mod menu dinheiro infinito
      -real gangster crime como ter dinheiro infinito
      -download do jogo real gangster crime com dinheiro infinito
      -real gangster crime apk pure dinheiro infinito
      -real gangster crime uptodown dinheiro infinito
      -baixar o jogo real gangster crime com dinheiro infinito
      -real gangster crime dicas e truques dinheiro infinito
      -real gangster crime gameplay dinheiro infinito
      -real gangster crime 3d dinheiro infinito download
      -real gangster crime simulator dinheiro infinito
      -real gangster crime city 2023 dinheiro infinito
      -download de real gangster crime com dinheiro infinito
      -baixar apk mod de real gangster crime com dinheiro infinito
      -como instalar real gangster crime com dinheiro infinito
      -real gangster crime cheats dinheiro infinito android
      -baixar e instalar real gangster crime com dinheiro infinito
      -download do apk de real gangster crime com dinheiro infinito
      -como jogar real gangster crime com dinheiro infinito
      -baixar o apk de real gangster crime com dinheiro infinito
      -como atualizar o real gangster crime com dinheiro infinito
      -baixar e jogar real gangster crime com dinheiro infinito
      -download do mod de real gangster crime com dinheiro infinito
      -como hackear o jogo real gangster crime com dinheiro infinito
      -baixar o mod de real gangster crime com dinheiro infinito
      -como desbloquear tudo no real gangster crime com dinheiro infinito
      -baixar e atualizar o real gangster crime com dinheiro infinito
      -download do hack de real gangster crime com dinheiro infinito
      -como fazer o download do real gangster crime com dinheiro infinito
      -baixar o hack de real gangster crime com dinheiro infinito
      -como resolver o erro do real gangster crime com dinheiro infinito
      -baixar e hackear o jogo real gangster crime com dinheiro infinito
      -download do jogo completo de real gangster crime com dinheiro infinito
      -como mudar a linguagem do real gangster crime com dinheiro infinito
      -baixar o jogo completo de real gangster crime com dinheiro infinito

      -

      How to download dinheiro infinito for PC

      -

      If you want to download real gangster crime dinheiro infinito for your PC, you will need to follow these steps:

      -

      Step 1: Download an emulator

      -

      An emulator is a software that allows you to run Android apps on your PC. There are many emulators available online, but some of the most popular ones are BlueStacks, NoxPlayer, and LDPlayer. You can choose any emulator that suits your preferences and system requirements, but make sure to download it from a reputable source. To download an emulator, just go to its official website and follow the instructions to install it on your PC.

      -

      Step 2: Install the emulator and the APK file

      -

      Once you have downloaded and installed an emulator on your PC, you need to install the dinheiro infinito APK file on it. To do this, just drag and drop the APK file into the emulator's window, or use the built-in browser to find and download it from [this website]. The emulator will automatically detect and install the APK file on its system.

      -

      Step 3: Launch the game and customize the settings

      -

      Now that you have installed the dinheiro infinito APK file on your emulator, you can launch the game and customize the settings according to your preferences. To do this, just open the emulator and click on Real Gangster Crime icon. You will see a welcome screen that asks you to choose your language and agree to the terms of service. After that, you can adjust the graphics quality, sound effects, controls, and other options in the settings menu.

      -

      Step 4: Have fun with the gangster life

      -

      Now that you have launched the game and customized the settings, you can have fun with

      Now that you have launched the game and customized the settings, you can have fun with the gangster life on your PC. You will see that you have unlimited money and resources in Real Gangster Crime, and you can buy anything you want in the game. You can also unlock all the missions and quests without having to complete them. You can explore the city, fight, steal, drive, and fly as you please. You can enjoy the game with high-quality graphics, sound effects, and controls on your PC.

      -

      Conclusion

      -

      Real Gangster Crime is an action game that lets you experience the thrill of being a gangster in a crime-ridden city. It offers you a realistic and immersive gangster simulator experience, with various missions, quests, locations, activities, weapons, vehicles, and customization options. However, if you want to have unlimited money and resources in the game, without having to spend real money or complete difficult missions, you can download dinheiro infinito, which is a modded version of the game that gives you access to unlimited cash, weapons, vehicles, clothes, and more. In this article, we showed you how to download real gangster crime dinheiro infinito for Android and PC devices, so you can enjoy the game to the fullest. We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below.

      -

      FAQs

      -
        -
      • Is dinheiro infinito safe to download?
      • -

        Dinheiro infinito is safe to download as long as you download it from a reliable source that offers the download link for the dinheiro infinito APK file. You should also enable unknown sources in your settings before installing the APK file on your device. However, you should be aware that downloading dinheiro infinito might violate the terms of service of Real Gangster Crime, and you might face some risks or consequences if you use it online or with other players.

        -
      • Is dinheiro infinito compatible with all devices?
      • -

        Dinheiro infinito is compatible with most Android and PC devices that meet the minimum system requirements of Real Gangster Crime. However, some devices might not support the modded version of the game, or might experience some glitches or errors while running it. If you encounter any problems while downloading or installing dinheiro infinito, you should try to update your device's software or contact the source of the download link for assistance.

        -
      • Can I update dinheiro infinito?
      • -

        Dinheiro infinito is usually updated whenever Real Gangster Crime releases a new version of the game. However, you should not update dinheiro infinito from the Google Play Store or any other official source of Real Gangster Crime, as this might overwrite or delete the modded features of dinheiro infinito. Instead, you should check the source of the download link for dinheiro infinito for any updates or new versions of the modded game.

        -
      • Can I play online with dinheiro infinito?
      • -

        Dinheiro infinito is mainly designed for offline mode, where you can play without any internet connection or interference from other players. However, some sources of dinheiro infinito might offer online mode as well, where you can play with other players who have downloaded dinheiro infinito as well. However, you should be careful not to play online with players who have not downloaded dinheiro infinito, as this might cause some conflicts or issues with the game's servers or security systems.

        -
      • Can I uninstall dinheiro infinito?
      • -

        If you want to uninstall dinheiro infinito from your device, you can do so by following these steps:

        -
          -
        1. Go to your device's settings, then apps, then Real Gangster Crime.
        2. -
        3. Tap on uninstall and confirm your choice.
        4. -
        5. Delete the dinheiro infinito APK file from your download folder.
        6. -
        -

        This will remove dinheiro infinito from your device completely. If you want to reinstall Real Gangster Crime from the Google Play Store or any other official source of the game, you can do so by following these steps:

        -
          -
        1. Go to the Google Play Store or any other official source of Real Gangster Crime.
        2. -
        3. Search for Real Gangster Crime and tap on install.
        4. -
        5. Wait for the installation process to finish and open the game.
        6. -
        -

        This will reinstall Real Gangster Crime on your device normally.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/models/transfo_xl_denoise/generate.py b/spaces/fclong/summary/fengshen/models/transfo_xl_denoise/generate.py deleted file mode 100644 index 5b768ff1baf6477735ac14fec9df58f7cd2724c6..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/models/transfo_xl_denoise/generate.py +++ /dev/null @@ -1,66 +0,0 @@ -import torch -import torch.nn.functional as F -from fengshen.models.transfo_xl_denoise.tokenization_transfo_xl_denoise import TransfoXLDenoiseTokenizer -from fengshen.models.transfo_xl_denoise.modeling_transfo_xl_denoise import TransfoXLDenoiseModel -from fengshen.utils import top_k_logits, get_masks_and_position_ids - - -def get_batch(context_tokens, mem_length, batch_size=1): - tokens = context_tokens - tokens = tokens.view(batch_size, -1).contiguous() - # Get the masks and postition ids. - attention_mask, position_ids = get_masks_and_position_ids(tokens, mem_length=mem_length) - return tokens, attention_mask, position_ids - - -def denoise_generate(model, - tokenizer, - input_text, - device=0, - mem_length=512, - temperature=1., - top_p=0.9, - eod_token=50000): - ''' Generate with fixed prompt pretrained ''' - prompt = f"“{input_text}”改写后是“" - res = [] - counter = 0 - tokens, attention_mask, position_ids = get_batch( - torch.LongTensor(tokenizer.encode(prompt)), mem_length, batch_size=1) - tokens, attention_mask, position_ids = tokens.cuda( - device), attention_mask.cuda(device), position_ids.cuda(device) - org_context_length = tokens.shape[-1] - model = model.cuda(device) - while counter < 100: - if counter == 0: - mems = [] # empty at the begining - output = model(input_ids=tokens, attention_mask=attention_mask, - position_ids=position_ids, hidden_states=mems) - logits, mems = output.logits, output.hidden_states - else: - index = org_context_length + counter - output = model(input_ids=tokens[:, index - 1: index], position_ids=tokens.new_ones((1, 1)) * (index - 1), - attention_mask=tokens.new_ones(1, 1, 1, mem_length + 1, device=device, - dtype=torch.float), hidden_states=mems) - logits, mems = output.logits, output.hidden_states - logits = logits[:, -1] - logits /= temperature - logits = top_k_logits(logits, top_k=0, top_p=top_p) - log_probs = F.softmax(logits, dim=-1) - prev = torch.multinomial(log_probs, num_samples=1)[0] - is_end = prev == eod_token - if is_end: - break - tokens = torch.cat((tokens, prev.view(1, 1)), dim=1) - counter += 1 - res.append(tokenizer.decode(tokens.view(-1).contiguous().tolist())) - return res - - -if __name__ == "__main__": - device = 1 - tokenizer = TransfoXLDenoiseTokenizer.from_pretrained('IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B') - model = TransfoXLDenoiseModel.from_pretrained('IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B') - input_text = "凡是有成就的人, 都很严肃地对待生命自己的" - res = denoise_generate(model, tokenizer, input_text) - print(res) diff --git a/spaces/fffffu/bing/src/components/ui/tooltip.tsx b/spaces/fffffu/bing/src/components/ui/tooltip.tsx deleted file mode 100644 index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000 --- a/spaces/fffffu/bing/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -'use client' - -import * as React from 'react' -import * as TooltipPrimitive from '@radix-ui/react-tooltip' - -import { cn } from '@/lib/utils' - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/zlib.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/zlib.d.ts deleted file mode 100644 index 1d7f0c0e507405e9584cd7158cbbea92234afa84..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/zlib.d.ts +++ /dev/null @@ -1,517 +0,0 @@ -/** - * The `zlib` module provides compression functionality implemented using Gzip, - * Deflate/Inflate, and Brotli. - * - * To access it: - * - * ```js - * const zlib = require('zlib'); - * ``` - * - * Compression and decompression are built around the Node.js `Streams API`. - * - * Compressing or decompressing a stream (such as a file) can be accomplished by - * piping the source stream through a `zlib` `Transform` stream into a destination - * stream: - * - * ```js - * const { createGzip } = require('zlib'); - * const { pipeline } = require('stream'); - * const { - * createReadStream, - * createWriteStream - * } = require('fs'); - * - * const gzip = createGzip(); - * const source = createReadStream('input.txt'); - * const destination = createWriteStream('input.txt.gz'); - * - * pipeline(source, gzip, destination, (err) => { - * if (err) { - * console.error('An error occurred:', err); - * process.exitCode = 1; - * } - * }); - * - * // Or, Promisified - * - * const { promisify } = require('util'); - * const pipe = promisify(pipeline); - * - * async function do_gzip(input, output) { - * const gzip = createGzip(); - * const source = createReadStream(input); - * const destination = createWriteStream(output); - * await pipe(source, gzip, destination); - * } - * - * do_gzip('input.txt', 'input.txt.gz') - * .catch((err) => { - * console.error('An error occurred:', err); - * process.exitCode = 1; - * }); - * ``` - * - * It is also possible to compress or decompress data in a single step: - * - * ```js - * const { deflate, unzip } = require('zlib'); - * - * const input = '.................................'; - * deflate(input, (err, buffer) => { - * if (err) { - * console.error('An error occurred:', err); - * process.exitCode = 1; - * } - * console.log(buffer.toString('base64')); - * }); - * - * const buffer = Buffer.from('eJzT0yMAAGTvBe8=', 'base64'); - * unzip(buffer, (err, buffer) => { - * if (err) { - * console.error('An error occurred:', err); - * process.exitCode = 1; - * } - * console.log(buffer.toString()); - * }); - * - * // Or, Promisified - * - * const { promisify } = require('util'); - * const do_unzip = promisify(unzip); - * - * do_unzip(buffer) - * .then((buf) => console.log(buf.toString())) - * .catch((err) => { - * console.error('An error occurred:', err); - * process.exitCode = 1; - * }); - * ``` - * @since v0.5.8 - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/zlib.js) - */ -declare module 'zlib' { - import * as stream from 'node:stream'; - interface ZlibOptions { - /** - * @default constants.Z_NO_FLUSH - */ - flush?: number | undefined; - /** - * @default constants.Z_FINISH - */ - finishFlush?: number | undefined; - /** - * @default 16*1024 - */ - chunkSize?: number | undefined; - windowBits?: number | undefined; - level?: number | undefined; // compression only - memLevel?: number | undefined; // compression only - strategy?: number | undefined; // compression only - dictionary?: NodeJS.ArrayBufferView | ArrayBuffer | undefined; // deflate/inflate only, empty dictionary by default - info?: boolean | undefined; - maxOutputLength?: number | undefined; - } - interface BrotliOptions { - /** - * @default constants.BROTLI_OPERATION_PROCESS - */ - flush?: number | undefined; - /** - * @default constants.BROTLI_OPERATION_FINISH - */ - finishFlush?: number | undefined; - /** - * @default 16*1024 - */ - chunkSize?: number | undefined; - params?: - | { - /** - * Each key is a `constants.BROTLI_*` constant. - */ - [key: number]: boolean | number; - } - | undefined; - maxOutputLength?: number | undefined; - } - interface Zlib { - /** @deprecated Use bytesWritten instead. */ - readonly bytesRead: number; - readonly bytesWritten: number; - shell?: boolean | string | undefined; - close(callback?: () => void): void; - flush(kind?: number, callback?: () => void): void; - flush(callback?: () => void): void; - } - interface ZlibParams { - params(level: number, strategy: number, callback: () => void): void; - } - interface ZlibReset { - reset(): void; - } - interface BrotliCompress extends stream.Transform, Zlib {} - interface BrotliDecompress extends stream.Transform, Zlib {} - interface Gzip extends stream.Transform, Zlib {} - interface Gunzip extends stream.Transform, Zlib {} - interface Deflate extends stream.Transform, Zlib, ZlibReset, ZlibParams {} - interface Inflate extends stream.Transform, Zlib, ZlibReset {} - interface DeflateRaw extends stream.Transform, Zlib, ZlibReset, ZlibParams {} - interface InflateRaw extends stream.Transform, Zlib, ZlibReset {} - interface Unzip extends stream.Transform, Zlib {} - /** - * Creates and returns a new `BrotliCompress` object. - * @since v11.7.0, v10.16.0 - */ - function createBrotliCompress(options?: BrotliOptions): BrotliCompress; - /** - * Creates and returns a new `BrotliDecompress` object. - * @since v11.7.0, v10.16.0 - */ - function createBrotliDecompress(options?: BrotliOptions): BrotliDecompress; - /** - * Creates and returns a new `Gzip` object. - * See `example`. - * @since v0.5.8 - */ - function createGzip(options?: ZlibOptions): Gzip; - /** - * Creates and returns a new `Gunzip` object. - * @since v0.5.8 - */ - function createGunzip(options?: ZlibOptions): Gunzip; - /** - * Creates and returns a new `Deflate` object. - * @since v0.5.8 - */ - function createDeflate(options?: ZlibOptions): Deflate; - /** - * Creates and returns a new `Inflate` object. - * @since v0.5.8 - */ - function createInflate(options?: ZlibOptions): Inflate; - /** - * Creates and returns a new `DeflateRaw` object. - * - * An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior when `windowBits`is set to 8 for raw deflate streams. zlib would automatically set `windowBits`to 9 if was initially set to 8\. Newer - * versions of zlib will throw an exception, - * so Node.js restored the original behavior of upgrading a value of 8 to 9, - * since passing `windowBits = 9` to zlib actually results in a compressed stream - * that effectively uses an 8-bit window only. - * @since v0.5.8 - */ - function createDeflateRaw(options?: ZlibOptions): DeflateRaw; - /** - * Creates and returns a new `InflateRaw` object. - * @since v0.5.8 - */ - function createInflateRaw(options?: ZlibOptions): InflateRaw; - /** - * Creates and returns a new `Unzip` object. - * @since v0.5.8 - */ - function createUnzip(options?: ZlibOptions): Unzip; - type InputType = string | ArrayBuffer | NodeJS.ArrayBufferView; - type CompressCallback = (error: Error | null, result: Buffer) => void; - /** - * @since v11.7.0, v10.16.0 - */ - function brotliCompress(buf: InputType, options: BrotliOptions, callback: CompressCallback): void; - function brotliCompress(buf: InputType, callback: CompressCallback): void; - namespace brotliCompress { - function __promisify__(buffer: InputType, options?: BrotliOptions): Promise; - } - /** - * Compress a chunk of data with `BrotliCompress`. - * @since v11.7.0, v10.16.0 - */ - function brotliCompressSync(buf: InputType, options?: BrotliOptions): Buffer; - /** - * @since v11.7.0, v10.16.0 - */ - function brotliDecompress(buf: InputType, options: BrotliOptions, callback: CompressCallback): void; - function brotliDecompress(buf: InputType, callback: CompressCallback): void; - namespace brotliDecompress { - function __promisify__(buffer: InputType, options?: BrotliOptions): Promise; - } - /** - * Decompress a chunk of data with `BrotliDecompress`. - * @since v11.7.0, v10.16.0 - */ - function brotliDecompressSync(buf: InputType, options?: BrotliOptions): Buffer; - /** - * @since v0.6.0 - */ - function deflate(buf: InputType, callback: CompressCallback): void; - function deflate(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace deflate { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Compress a chunk of data with `Deflate`. - * @since v0.11.12 - */ - function deflateSync(buf: InputType, options?: ZlibOptions): Buffer; - /** - * @since v0.6.0 - */ - function deflateRaw(buf: InputType, callback: CompressCallback): void; - function deflateRaw(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace deflateRaw { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Compress a chunk of data with `DeflateRaw`. - * @since v0.11.12 - */ - function deflateRawSync(buf: InputType, options?: ZlibOptions): Buffer; - /** - * @since v0.6.0 - */ - function gzip(buf: InputType, callback: CompressCallback): void; - function gzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace gzip { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Compress a chunk of data with `Gzip`. - * @since v0.11.12 - */ - function gzipSync(buf: InputType, options?: ZlibOptions): Buffer; - /** - * @since v0.6.0 - */ - function gunzip(buf: InputType, callback: CompressCallback): void; - function gunzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace gunzip { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Decompress a chunk of data with `Gunzip`. - * @since v0.11.12 - */ - function gunzipSync(buf: InputType, options?: ZlibOptions): Buffer; - /** - * @since v0.6.0 - */ - function inflate(buf: InputType, callback: CompressCallback): void; - function inflate(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace inflate { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Decompress a chunk of data with `Inflate`. - * @since v0.11.12 - */ - function inflateSync(buf: InputType, options?: ZlibOptions): Buffer; - /** - * @since v0.6.0 - */ - function inflateRaw(buf: InputType, callback: CompressCallback): void; - function inflateRaw(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace inflateRaw { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Decompress a chunk of data with `InflateRaw`. - * @since v0.11.12 - */ - function inflateRawSync(buf: InputType, options?: ZlibOptions): Buffer; - /** - * @since v0.6.0 - */ - function unzip(buf: InputType, callback: CompressCallback): void; - function unzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; - namespace unzip { - function __promisify__(buffer: InputType, options?: ZlibOptions): Promise; - } - /** - * Decompress a chunk of data with `Unzip`. - * @since v0.11.12 - */ - function unzipSync(buf: InputType, options?: ZlibOptions): Buffer; - namespace constants { - const BROTLI_DECODE: number; - const BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: number; - const BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: number; - const BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: number; - const BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: number; - const BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: number; - const BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: number; - const BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: number; - const BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: number; - const BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: number; - const BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: number; - const BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: number; - const BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: number; - const BROTLI_DECODER_ERROR_FORMAT_DISTANCE: number; - const BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: number; - const BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: number; - const BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: number; - const BROTLI_DECODER_ERROR_FORMAT_PADDING_1: number; - const BROTLI_DECODER_ERROR_FORMAT_PADDING_2: number; - const BROTLI_DECODER_ERROR_FORMAT_RESERVED: number; - const BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: number; - const BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: number; - const BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: number; - const BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: number; - const BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: number; - const BROTLI_DECODER_ERROR_UNREACHABLE: number; - const BROTLI_DECODER_NEEDS_MORE_INPUT: number; - const BROTLI_DECODER_NEEDS_MORE_OUTPUT: number; - const BROTLI_DECODER_NO_ERROR: number; - const BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: number; - const BROTLI_DECODER_PARAM_LARGE_WINDOW: number; - const BROTLI_DECODER_RESULT_ERROR: number; - const BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: number; - const BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: number; - const BROTLI_DECODER_RESULT_SUCCESS: number; - const BROTLI_DECODER_SUCCESS: number; - const BROTLI_DEFAULT_MODE: number; - const BROTLI_DEFAULT_QUALITY: number; - const BROTLI_DEFAULT_WINDOW: number; - const BROTLI_ENCODE: number; - const BROTLI_LARGE_MAX_WINDOW_BITS: number; - const BROTLI_MAX_INPUT_BLOCK_BITS: number; - const BROTLI_MAX_QUALITY: number; - const BROTLI_MAX_WINDOW_BITS: number; - const BROTLI_MIN_INPUT_BLOCK_BITS: number; - const BROTLI_MIN_QUALITY: number; - const BROTLI_MIN_WINDOW_BITS: number; - const BROTLI_MODE_FONT: number; - const BROTLI_MODE_GENERIC: number; - const BROTLI_MODE_TEXT: number; - const BROTLI_OPERATION_EMIT_METADATA: number; - const BROTLI_OPERATION_FINISH: number; - const BROTLI_OPERATION_FLUSH: number; - const BROTLI_OPERATION_PROCESS: number; - const BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: number; - const BROTLI_PARAM_LARGE_WINDOW: number; - const BROTLI_PARAM_LGBLOCK: number; - const BROTLI_PARAM_LGWIN: number; - const BROTLI_PARAM_MODE: number; - const BROTLI_PARAM_NDIRECT: number; - const BROTLI_PARAM_NPOSTFIX: number; - const BROTLI_PARAM_QUALITY: number; - const BROTLI_PARAM_SIZE_HINT: number; - const DEFLATE: number; - const DEFLATERAW: number; - const GUNZIP: number; - const GZIP: number; - const INFLATE: number; - const INFLATERAW: number; - const UNZIP: number; - // Allowed flush values. - const Z_NO_FLUSH: number; - const Z_PARTIAL_FLUSH: number; - const Z_SYNC_FLUSH: number; - const Z_FULL_FLUSH: number; - const Z_FINISH: number; - const Z_BLOCK: number; - const Z_TREES: number; - // Return codes for the compression/decompression functions. - // Negative values are errors, positive values are used for special but normal events. - const Z_OK: number; - const Z_STREAM_END: number; - const Z_NEED_DICT: number; - const Z_ERRNO: number; - const Z_STREAM_ERROR: number; - const Z_DATA_ERROR: number; - const Z_MEM_ERROR: number; - const Z_BUF_ERROR: number; - const Z_VERSION_ERROR: number; - // Compression levels. - const Z_NO_COMPRESSION: number; - const Z_BEST_SPEED: number; - const Z_BEST_COMPRESSION: number; - const Z_DEFAULT_COMPRESSION: number; - // Compression strategy. - const Z_FILTERED: number; - const Z_HUFFMAN_ONLY: number; - const Z_RLE: number; - const Z_FIXED: number; - const Z_DEFAULT_STRATEGY: number; - const Z_DEFAULT_WINDOWBITS: number; - const Z_MIN_WINDOWBITS: number; - const Z_MAX_WINDOWBITS: number; - const Z_MIN_CHUNK: number; - const Z_MAX_CHUNK: number; - const Z_DEFAULT_CHUNK: number; - const Z_MIN_MEMLEVEL: number; - const Z_MAX_MEMLEVEL: number; - const Z_DEFAULT_MEMLEVEL: number; - const Z_MIN_LEVEL: number; - const Z_MAX_LEVEL: number; - const Z_DEFAULT_LEVEL: number; - const ZLIB_VERNUM: number; - } - // Allowed flush values. - /** @deprecated Use `constants.Z_NO_FLUSH` */ - const Z_NO_FLUSH: number; - /** @deprecated Use `constants.Z_PARTIAL_FLUSH` */ - const Z_PARTIAL_FLUSH: number; - /** @deprecated Use `constants.Z_SYNC_FLUSH` */ - const Z_SYNC_FLUSH: number; - /** @deprecated Use `constants.Z_FULL_FLUSH` */ - const Z_FULL_FLUSH: number; - /** @deprecated Use `constants.Z_FINISH` */ - const Z_FINISH: number; - /** @deprecated Use `constants.Z_BLOCK` */ - const Z_BLOCK: number; - /** @deprecated Use `constants.Z_TREES` */ - const Z_TREES: number; - // Return codes for the compression/decompression functions. - // Negative values are errors, positive values are used for special but normal events. - /** @deprecated Use `constants.Z_OK` */ - const Z_OK: number; - /** @deprecated Use `constants.Z_STREAM_END` */ - const Z_STREAM_END: number; - /** @deprecated Use `constants.Z_NEED_DICT` */ - const Z_NEED_DICT: number; - /** @deprecated Use `constants.Z_ERRNO` */ - const Z_ERRNO: number; - /** @deprecated Use `constants.Z_STREAM_ERROR` */ - const Z_STREAM_ERROR: number; - /** @deprecated Use `constants.Z_DATA_ERROR` */ - const Z_DATA_ERROR: number; - /** @deprecated Use `constants.Z_MEM_ERROR` */ - const Z_MEM_ERROR: number; - /** @deprecated Use `constants.Z_BUF_ERROR` */ - const Z_BUF_ERROR: number; - /** @deprecated Use `constants.Z_VERSION_ERROR` */ - const Z_VERSION_ERROR: number; - // Compression levels. - /** @deprecated Use `constants.Z_NO_COMPRESSION` */ - const Z_NO_COMPRESSION: number; - /** @deprecated Use `constants.Z_BEST_SPEED` */ - const Z_BEST_SPEED: number; - /** @deprecated Use `constants.Z_BEST_COMPRESSION` */ - const Z_BEST_COMPRESSION: number; - /** @deprecated Use `constants.Z_DEFAULT_COMPRESSION` */ - const Z_DEFAULT_COMPRESSION: number; - // Compression strategy. - /** @deprecated Use `constants.Z_FILTERED` */ - const Z_FILTERED: number; - /** @deprecated Use `constants.Z_HUFFMAN_ONLY` */ - const Z_HUFFMAN_ONLY: number; - /** @deprecated Use `constants.Z_RLE` */ - const Z_RLE: number; - /** @deprecated Use `constants.Z_FIXED` */ - const Z_FIXED: number; - /** @deprecated Use `constants.Z_DEFAULT_STRATEGY` */ - const Z_DEFAULT_STRATEGY: number; - /** @deprecated */ - const Z_BINARY: number; - /** @deprecated */ - const Z_TEXT: number; - /** @deprecated */ - const Z_ASCII: number; - /** @deprecated */ - const Z_UNKNOWN: number; - /** @deprecated */ - const Z_DEFLATED: number; -} -declare module 'node:zlib' { - export * from 'zlib'; -} diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/path-to-regexp/History.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/path-to-regexp/History.md deleted file mode 100644 index 7f6587846f67047b7f9ecddbb176abd25dc3741d..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/path-to-regexp/History.md +++ /dev/null @@ -1,36 +0,0 @@ -0.1.7 / 2015-07-28 -================== - - * Fixed regression with escaped round brackets and matching groups. - -0.1.6 / 2015-06-19 -================== - - * Replace `index` feature by outputting all parameters, unnamed and named. - -0.1.5 / 2015-05-08 -================== - - * Add an index property for position in match result. - -0.1.4 / 2015-03-05 -================== - - * Add license information - -0.1.3 / 2014-07-06 -================== - - * Better array support - * Improved support for trailing slash in non-ending mode - -0.1.0 / 2014-03-06 -================== - - * add options.end - -0.0.2 / 2013-02-10 -================== - - * Update to match current express - * add .license property to component.json diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/safe-buffer/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/safe-buffer/index.js deleted file mode 100644 index f8d3ec98852f449b44b7d89fc82bae737c69f3fc..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/safe-buffer/index.js +++ /dev/null @@ -1,65 +0,0 @@ -/*! safe-buffer. MIT License. Feross Aboukhadijeh */ -/* eslint-disable node/no-deprecated-api */ -var buffer = require('buffer') -var Buffer = buffer.Buffer - -// alternative to using Object.keys for old browsers -function copyProps (src, dst) { - for (var key in src) { - dst[key] = src[key] - } -} -if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) { - module.exports = buffer -} else { - // Copy properties from require('buffer') - copyProps(buffer, exports) - exports.Buffer = SafeBuffer -} - -function SafeBuffer (arg, encodingOrOffset, length) { - return Buffer(arg, encodingOrOffset, length) -} - -SafeBuffer.prototype = Object.create(Buffer.prototype) - -// Copy static methods from Buffer -copyProps(Buffer, SafeBuffer) - -SafeBuffer.from = function (arg, encodingOrOffset, length) { - if (typeof arg === 'number') { - throw new TypeError('Argument must not be a number') - } - return Buffer(arg, encodingOrOffset, length) -} - -SafeBuffer.alloc = function (size, fill, encoding) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - var buf = Buffer(size) - if (fill !== undefined) { - if (typeof encoding === 'string') { - buf.fill(fill, encoding) - } else { - buf.fill(fill) - } - } else { - buf.fill(0) - } - return buf -} - -SafeBuffer.allocUnsafe = function (size) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - return Buffer(size) -} - -SafeBuffer.allocUnsafeSlow = function (size) { - if (typeof size !== 'number') { - throw new TypeError('Argument must be a number') - } - return buffer.SlowBuffer(size) -} diff --git a/spaces/fffiloni/lama-video-watermark-remover/fetch_data/eval_sampler.py b/spaces/fffiloni/lama-video-watermark-remover/fetch_data/eval_sampler.py deleted file mode 100644 index bf2d70d875a44b5a74daeec9b4ba747600287f2a..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/lama-video-watermark-remover/fetch_data/eval_sampler.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import random - - -val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/' -val_files = [val_files_path + image for image in os.listdir(val_files_path)] - -print(f'found {len(val_files)} images in {val_files_path}') - -random.shuffle(val_files) -val_files_random = val_files[0:2000] - -list_of_random_val_files = os.path.abspath('.') \ -+ '/places_standard_dataset/original/eval_random_files.txt' - -print(f'copying 2000 random images to {list_of_random_val_files}') -with open(list_of_random_val_files, 'w') as fw: - for filename in val_files_random: - fw.write(filename+'\n') -print('...done') - diff --git a/spaces/fracapuano/AISandbox/summarization/__init__.py b/spaces/fracapuano/AISandbox/summarization/__init__.py deleted file mode 100644 index 23124821c65d272c61ab7b5a7b51f46a875d3f07..0000000000000000000000000000000000000000 --- a/spaces/fracapuano/AISandbox/summarization/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .summarization import * \ No newline at end of file diff --git a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Better.py b/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Better.py deleted file mode 100644 index e95bf36ac645428a2a70246da52d83d74c008ec8..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/Better.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import json -import requests -from typing import Dict, get_type_hints - -url = 'https://openai-proxy-api.vercel.app/v1/' -model = { - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0613' - 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-16k-0613', - 'gpt-4', -} - -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - headers = { - 'Content-Type': 'application/json', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58', - 'Referer': 'https://chat.ylokh.xyz/', - 'Origin': 'https://chat.ylokh.xyz', - 'Connection': 'keep-alive', - } - - json_data = { - 'messages': messages, - 'temperature': 1.0, - 'model': model, - 'stream': stream, - } - - response = requests.post( - 'https://openai-proxy-api.vercel.app/v1/chat/completions', headers=headers, json=json_data, stream=True - ) - - for token in response.iter_lines(): - decoded = token.decode('utf-8') - if decoded.startswith('data: '): - data_str = decoded.replace('data: ', '') - data = json.loads(data_str) - if 'choices' in data and 'delta' in data['choices'][0]: - delta = data['choices'][0]['delta'] - content = delta.get('content', '') - finish_reason = delta.get('finish_reason', '') - - if finish_reason == 'stop': - break - if content: - yield content - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/geloku/ai-academy/app.py b/spaces/geloku/ai-academy/app.py deleted file mode 100644 index 315b4d0d57a28de4829e465e6afa8777847faadf..0000000000000000000000000000000000000000 --- a/spaces/geloku/ai-academy/app.py +++ /dev/null @@ -1,26 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../app.ipynb. - -# %% auto 0 -__all__ = ['learn', 'categories', 'image', 'label', 'examples', 'intf', 'classify_image'] - -# %% ../app.ipynb 1 -from fastai.vision.all import * -import gradio as gr - -# %% ../app.ipynb 2 -learn = load_learner('model.pk1') - -# %% ../app.ipynb 3 -categories = ('bird', 'fish', 'mammal') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - -# %% ../app.ipynb 5 -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() -examples = ['bird.jpg', 'fish.jpg', 'mammal.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/__init__.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/__init__.py deleted file mode 100644 index 52e4b48d383a84a055dcd7f6236f6e8e58eab924..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_module import BaseModule, ModuleList, Sequential -from .base_runner import BaseRunner -from .builder import RUNNERS, build_runner -from .checkpoint import (CheckpointLoader, _load_checkpoint, - _load_checkpoint_with_prefix, load_checkpoint, - load_state_dict, save_checkpoint, weights_to_cpu) -from .default_constructor import DefaultRunnerConstructor -from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info, - init_dist, master_only) -from .epoch_based_runner import EpochBasedRunner, Runner -from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model -from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook, - DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook, - Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, - GradientCumulativeOptimizerHook, Hook, IterTimerHook, - LoggerHook, LrUpdaterHook, MlflowLoggerHook, - NeptuneLoggerHook, OptimizerHook, PaviLoggerHook, - SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook, - WandbLoggerHook) -from .iter_based_runner import IterBasedRunner, IterLoader -from .log_buffer import LogBuffer -from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS, - DefaultOptimizerConstructor, build_optimizer, - build_optimizer_constructor) -from .priority import Priority, get_priority -from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed - -__all__ = [ - 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer', - 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', - 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook', - 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', - 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook', - 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict', - 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority', - 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict', - 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS', - 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer', - 'build_optimizer_constructor', 'IterLoader', 'set_random_seed', - 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook', - 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads', - 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule', - '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential', - 'ModuleList', 'GradientCumulativeOptimizerHook', - 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor' -] diff --git a/spaces/ggwvits/vits-uma-genshin-honkai/attentions.py b/spaces/ggwvits/vits-uma-genshin-honkai/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/ggwvits/vits-uma-genshin-honkai/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/glyszt/vt/vtoonify/model/encoder/encoders/psp_encoders.py b/spaces/glyszt/vt/vtoonify/model/encoder/encoders/psp_encoders.py deleted file mode 100644 index f69d38200b6be4997673ae38ed481fd21f88b419..0000000000000000000000000000000000000000 --- a/spaces/glyszt/vt/vtoonify/model/encoder/encoders/psp_encoders.py +++ /dev/null @@ -1,186 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn -from torch.nn import Linear, Conv2d, BatchNorm2d, PReLU, Sequential, Module - -from model.encoder.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE -from model.stylegan.model import EqualLinear - - -class GradualStyleBlock(Module): - def __init__(self, in_c, out_c, spatial): - super(GradualStyleBlock, self).__init__() - self.out_c = out_c - self.spatial = spatial - num_pools = int(np.log2(spatial)) - modules = [] - modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1), - nn.LeakyReLU()] - for i in range(num_pools - 1): - modules += [ - Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1), - nn.LeakyReLU() - ] - self.convs = nn.Sequential(*modules) - self.linear = EqualLinear(out_c, out_c, lr_mul=1) - - def forward(self, x): - x = self.convs(x) - x = x.view(-1, self.out_c) - x = self.linear(x) - return x - - -class GradualStyleEncoder(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(GradualStyleEncoder, self).__init__() - assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - self.styles = nn.ModuleList() - self.style_count = opts.n_styles - self.coarse_ind = 3 - self.middle_ind = 7 - for i in range(self.style_count): - if i < self.coarse_ind: - style = GradualStyleBlock(512, 512, 16) - elif i < self.middle_ind: - style = GradualStyleBlock(512, 512, 32) - else: - style = GradualStyleBlock(512, 512, 64) - self.styles.append(style) - self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0) - self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0) - - def _upsample_add(self, x, y): - '''Upsample and add two feature maps. - Args: - x: (Variable) top feature map to be upsampled. - y: (Variable) lateral feature map. - Returns: - (Variable) added feature map. - Note in PyTorch, when input size is odd, the upsampled feature map - with `F.upsample(..., scale_factor=2, mode='nearest')` - maybe not equal to the lateral feature map size. - e.g. - original input size: [N,_,15,15] -> - conv2d feature map size: [N,_,8,8] -> - upsampled feature map size: [N,_,16,16] - So we choose bilinear upsample which supports arbitrary output sizes. - ''' - _, _, H, W = y.size() - return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y - - def forward(self, x): - x = self.input_layer(x) - - latents = [] - modulelist = list(self.body._modules.values()) - for i, l in enumerate(modulelist): - x = l(x) - if i == 6: - c1 = x - elif i == 20: - c2 = x - elif i == 23: - c3 = x - - for j in range(self.coarse_ind): - latents.append(self.styles[j](c3)) - - p2 = self._upsample_add(c3, self.latlayer1(c2)) - for j in range(self.coarse_ind, self.middle_ind): - latents.append(self.styles[j](p2)) - - p1 = self._upsample_add(p2, self.latlayer2(c1)) - for j in range(self.middle_ind, self.style_count): - latents.append(self.styles[j](p1)) - - out = torch.stack(latents, dim=1) - return out - - -class BackboneEncoderUsingLastLayerIntoW(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(BackboneEncoderUsingLastLayerIntoW, self).__init__() - print('Using BackboneEncoderUsingLastLayerIntoW') - assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1)) - self.linear = EqualLinear(512, 512, lr_mul=1) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_pool(x) - x = x.view(-1, 512) - x = self.linear(x) - return x - - -class BackboneEncoderUsingLastLayerIntoWPlus(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__() - print('Using BackboneEncoderUsingLastLayerIntoWPlus') - assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.n_styles = opts.n_styles - self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - self.output_layer_2 = Sequential(BatchNorm2d(512), - torch.nn.AdaptiveAvgPool2d((7, 7)), - Flatten(), - Linear(512 * 7 * 7, 512)) - self.linear = EqualLinear(512, 512 * self.n_styles, lr_mul=1) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer_2(x) - x = self.linear(x) - x = x.view(-1, self.n_styles, 512) - return x diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Ebooks Free Download Iphone The College Dropout.md b/spaces/gotiQspiryo/whisper-ui/examples/Ebooks Free Download Iphone The College Dropout.md deleted file mode 100644 index 98fc25255366b80efbdb00491d6bdd86c9d6d7ae..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Ebooks Free Download Iphone The College Dropout.md +++ /dev/null @@ -1,5 +0,0 @@ -
        -

        In the event of loss, immediately call the OTS Service Desk at 832.813.6600 (toll-free 866.614.5014) to file a report. When you report the device lost, or the college has sufficient cause to believe the device is no longer in your possession, the device may be rendered nonfunctional, and the built-in tracking mechanism may be enabled by the college to allow for recovery of the device.

        -

        Ebooks free download iphone The College Dropout


        Download Zip ✯✯✯ https://urlgoal.com/2uyM3t



        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/guetLzy/Real-ESRGAN-Demo/tests/test_dataset.py b/spaces/guetLzy/Real-ESRGAN-Demo/tests/test_dataset.py deleted file mode 100644 index 715b4082645c131d43d728ae8f65bcc2430aa8c9..0000000000000000000000000000000000000000 --- a/spaces/guetLzy/Real-ESRGAN-Demo/tests/test_dataset.py +++ /dev/null @@ -1,151 +0,0 @@ -import pytest -import yaml - -from realesrgan.data.realesrgan_dataset import RealESRGANDataset -from realesrgan.data.realesrgan_paired_dataset import RealESRGANPairedDataset - - -def test_realesrgan_dataset(): - - with open('tests/data/test_realesrgan_dataset.yml', mode='r') as f: - opt = yaml.load(f, Loader=yaml.FullLoader) - - dataset = RealESRGANDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 2 # whether to read correct meta info - assert dataset.kernel_list == [ - 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso' - ] # correct initialization the degradation configurations - assert dataset.betag_range2 == [0.5, 4] - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 400, 400) - assert result['kernel1'].shape == (21, 21) - assert result['kernel2'].shape == (21, 21) - assert result['sinc_kernel'].shape == (21, 21) - assert result['gt_path'] == 'tests/data/gt/baboon.png' - - # ------------------ test lmdb backend -------------------- # - opt['dataroot_gt'] = 'tests/data/gt.lmdb' - opt['io_backend']['type'] = 'lmdb' - - dataset = RealESRGANDataset(opt) - assert dataset.io_backend_opt['type'] == 'lmdb' # io backend - assert len(dataset.paths) == 2 # whether to read correct meta info - assert dataset.kernel_list == [ - 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso' - ] # correct initialization the degradation configurations - assert dataset.betag_range2 == [0.5, 4] - - # test __getitem__ - result = dataset.__getitem__(1) - # check returned keys - expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 400, 400) - assert result['kernel1'].shape == (21, 21) - assert result['kernel2'].shape == (21, 21) - assert result['sinc_kernel'].shape == (21, 21) - assert result['gt_path'] == 'comic' - - # ------------------ test with sinc_prob = 0 -------------------- # - opt['dataroot_gt'] = 'tests/data/gt.lmdb' - opt['io_backend']['type'] = 'lmdb' - opt['sinc_prob'] = 0 - opt['sinc_prob2'] = 0 - opt['final_sinc_prob'] = 0 - dataset = RealESRGANDataset(opt) - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 400, 400) - assert result['kernel1'].shape == (21, 21) - assert result['kernel2'].shape == (21, 21) - assert result['sinc_kernel'].shape == (21, 21) - assert result['gt_path'] == 'baboon' - - # ------------------ lmdb backend should have paths ends with lmdb -------------------- # - with pytest.raises(ValueError): - opt['dataroot_gt'] = 'tests/data/gt' - opt['io_backend']['type'] = 'lmdb' - dataset = RealESRGANDataset(opt) - - -def test_realesrgan_paired_dataset(): - - with open('tests/data/test_realesrgan_paired_dataset.yml', mode='r') as f: - opt = yaml.load(f, Loader=yaml.FullLoader) - - dataset = RealESRGANPairedDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 2 # whether to read correct meta info - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 128, 128) - assert result['lq'].shape == (3, 32, 32) - assert result['gt_path'] == 'tests/data/gt/baboon.png' - assert result['lq_path'] == 'tests/data/lq/baboon.png' - - # ------------------ test lmdb backend -------------------- # - opt['dataroot_gt'] = 'tests/data/gt.lmdb' - opt['dataroot_lq'] = 'tests/data/lq.lmdb' - opt['io_backend']['type'] = 'lmdb' - - dataset = RealESRGANPairedDataset(opt) - assert dataset.io_backend_opt['type'] == 'lmdb' # io backend - assert len(dataset) == 2 # whether to read correct meta info - - # test __getitem__ - result = dataset.__getitem__(1) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 128, 128) - assert result['lq'].shape == (3, 32, 32) - assert result['gt_path'] == 'comic' - assert result['lq_path'] == 'comic' - - # ------------------ test paired_paths_from_folder -------------------- # - opt['dataroot_gt'] = 'tests/data/gt' - opt['dataroot_lq'] = 'tests/data/lq' - opt['io_backend'] = dict(type='disk') - opt['meta_info'] = None - - dataset = RealESRGANPairedDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 2 # whether to read correct meta info - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 128, 128) - assert result['lq'].shape == (3, 32, 32) - - # ------------------ test normalization -------------------- # - dataset.mean = [0.5, 0.5, 0.5] - dataset.std = [0.5, 0.5, 0.5] - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 128, 128) - assert result['lq'].shape == (3, 32, 32) diff --git a/spaces/gulabpatel/Real-ESRGAN/scripts/generate_multiscale_DF2K.py b/spaces/gulabpatel/Real-ESRGAN/scripts/generate_multiscale_DF2K.py deleted file mode 100644 index d4f5d8324b1624e4cb6163754703b8dac2d188fd..0000000000000000000000000000000000000000 --- a/spaces/gulabpatel/Real-ESRGAN/scripts/generate_multiscale_DF2K.py +++ /dev/null @@ -1,48 +0,0 @@ -import argparse -import glob -import os -from PIL import Image - - -def main(args): - # For DF2K, we consider the following three scales, - # and the smallest image whose shortest edge is 400 - scale_list = [0.75, 0.5, 1 / 3] - shortest_edge = 400 - - path_list = sorted(glob.glob(os.path.join(args.input, '*'))) - for path in path_list: - print(path) - basename = os.path.splitext(os.path.basename(path))[0] - - img = Image.open(path) - width, height = img.size - for idx, scale in enumerate(scale_list): - print(f'\t{scale:.2f}') - rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS) - rlt.save(os.path.join(args.output, f'{basename}T{idx}.png')) - - # save the smallest image which the shortest edge is 400 - if width < height: - ratio = height / width - width = shortest_edge - height = int(width * ratio) - else: - ratio = width / height - height = shortest_edge - width = int(height * ratio) - rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS) - rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png')) - - -if __name__ == '__main__': - """Generate multi-scale versions for GT images with LANCZOS resampling. - It is now used for DF2K dataset (DIV2K + Flickr 2K) - """ - parser = argparse.ArgumentParser() - parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') - parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder') - args = parser.parse_args() - - os.makedirs(args.output, exist_ok=True) - main(args) diff --git a/spaces/gylleus/icongen/torch_utils/ops/bias_act.h b/spaces/gylleus/icongen/torch_utils/ops/bias_act.h deleted file mode 100644 index a32187e1fb7e3bae509d4eceaf900866866875a4..0000000000000000000000000000000000000000 --- a/spaces/gylleus/icongen/torch_utils/ops/bias_act.h +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct bias_act_kernel_params -{ - const void* x; // [sizeX] - const void* b; // [sizeB] or NULL - const void* xref; // [sizeX] or NULL - const void* yref; // [sizeX] or NULL - const void* dy; // [sizeX] or NULL - void* y; // [sizeX] - - int grad; - int act; - float alpha; - float gain; - float clamp; - - int sizeX; - int sizeB; - int stepB; - int loopX; -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template void* choose_bias_act_kernel(const bias_act_kernel_params& p); - -//------------------------------------------------------------------------ diff --git a/spaces/gyrojeff/YuzuMarker.FontDetection/linux_venv_setup.sh b/spaces/gyrojeff/YuzuMarker.FontDetection/linux_venv_setup.sh deleted file mode 100644 index 012ac087448c6a91183408cdcdc3028b536aa25f..0000000000000000000000000000000000000000 --- a/spaces/gyrojeff/YuzuMarker.FontDetection/linux_venv_setup.sh +++ /dev/null @@ -1,34 +0,0 @@ -sudo apt update && sudo apt install python3-venv -y - -# init virtual env -python3 -m venv venv - -source venv/bin/activate - -# install pillow deps -sudo apt-get install libtiff5-dev libjpeg8-dev libopenjp2-7-dev zlib1g-dev \ - libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk \ - libharfbuzz-dev libfribidi-dev libxcb1-dev -y - -pip install meson ninja - -# clone pillow -git clone https://github.com/python-pillow/Pillow - -cd Pillow/depends -# make sudo happy -sed -i 's/sudo/sudo -E env PATH=$PATH/g' install_raqm.sh -# install script -chmod +x ./install_raqm.sh && ./install_raqm.sh - -#clean up -cd ../.. -rm -rf Pillow - -# install everything -pip install -r requirements_generate_font_dataset.txt - -# download wordlist (added since my cluster has bad public network connection) -wget https://www.mit.edu/~ecprice/wordlist.10000 -mv wordlist.10000 wordlist.txt - diff --git a/spaces/h2oai/wave-tour/examples/db_todo.py b/spaces/h2oai/wave-tour/examples/db_todo.py deleted file mode 100644 index 4af3ff8135d6b68f86e3fda2f2b756a9c1aeb62d..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/db_todo.py +++ /dev/null @@ -1,109 +0,0 @@ -# WaveDB / To-do App -# A multi-user To-do list application using WaveDB for data management. -# This example is very similar to the todo.py example, except that this -# example uses WaveDB instead of an in-memory list. -# --- -from h2o_wave import main, app, Q, ui, connect, WaveDB, expando_to_dict - - -# A simple class that represents a to-do item. -class TodoItem: - def __init__(self, id, label, done): - self.id = id - self.label = label - self.done = done - - -async def setup_db() -> WaveDB: - db = connect()['todo'] - _, err = await db.exec_atomic( - """ - create table if not exists todo ( - id integer primary key, - user text not null, - label text not null, - done integer not null default 0 - ) - """ - ) - if err: - raise RuntimeError(f'Failed setting up database: {err}') - return db - - -@app('/demo') -async def serve(q: Q): - if q.app.db is None: - q.app.db = await setup_db() - - if q.args.new_todo: # Display an input form. - await new_todo(q) - elif q.args.add_todo: # Add an item. - await add_todo(q) - else: # Show all items. - await show_todos(q) - - -async def show_todos(q: Q): - # Get items for this user. - db: WaveDB = q.app.db - - # Check if we have any updates, i.e. the user has checked/unchecked any item. - updates = [] - for key, done in expando_to_dict(q.args).items(): - # We've named each todo item `todo_{id}' (e.g. todo_42, todo_43, and so on) - # So identify the todo items from their 'todo_' prefix, then extract the ids from the names. - if key.startswith('todo_'): - _, id = key.split('_', 1) - updates.append(('update todo set done=? where id=?', 1 if done else 0, int(id))) - - # If we have updates, update our database. - if len(updates): - _, err = await db.exec_many(*updates) - if err: - raise RuntimeError(f'Failed updating todos: {err}') - - # Fetch latest todos for our user - rows, err = await db.exec('select id, label, done from todo where user=?', q.auth.subject) - if err: - raise RuntimeError(f'Failed fetching todos: {err}') - todos = [TodoItem(id, label, done) for id, label, done in rows] - - # Create done/not-done checkboxes. - done = [ui.checkbox(name=f'todo_{todo.id}', label=todo.label, value=True, trigger=True) for todo in todos if - todo.done] - not_done = [ui.checkbox(name=f'todo_{todo.id}', label=todo.label, trigger=True) for todo in todos if not todo.done] - - # Display list - q.page['form'] = ui.form_card(box='1 1 4 10', items=[ - ui.text_l('To Do'), - ui.button(name='new_todo', label='Add To Do...', primary=True), - *not_done, - *([ui.separator('Done')] if len(done) else []), - *done, - ]) - await q.page.save() - - -async def add_todo(q: Q): - # Insert a new item - db: WaveDB = q.app.db - _, err = await db.exec('insert into todo (user, label) values (? , ?)', q.auth.subject, q.args.label or 'Untitled') - if err: - raise RuntimeError(f'Failed inserting todo: {err}') - - # Go back to our list. - await show_todos(q) - - -async def new_todo(q: Q): - # Display an input form - q.page['form'] = ui.form_card(box='1 1 4 10', items=[ - ui.text_l('Add To Do'), - ui.textbox(name='label', label='What needs to be done?', multiline=True), - ui.buttons([ - ui.button(name='add_todo', label='Add', primary=True), - ui.button(name='show_todos', label='Back'), - ]), - ]) - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/progress_update.py b/spaces/h2oai/wave-tour/examples/progress_update.py deleted file mode 100644 index f0bb35e6c0b3412fcba463824d12fd9bb9557c02..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/progress_update.py +++ /dev/null @@ -1,28 +0,0 @@ -# Form / Progress / Updating -# Update a #progress bar's completion status periodically. -# #form -# --- -import time - -from h2o_wave import site, ui - -page = site['/demo'] - -page['example'] = ui.form_card( - box='1 1 4 7', - items=[ - ui.progress(name='progress', label='Basic Progress'), - ] -) -page.save() - -for i in range(1, 11): - time.sleep(1) - page['example'].items = [ - ui.progress(name='progress', label='Basic Progress', caption=f'{i * 10}% complete', value=i / 10), - ] - - # This will work, too: - # page['example'].progress.value = i/10 - - page.save() diff --git a/spaces/hanstyle/tts/results/README.md b/spaces/hanstyle/tts/results/README.md deleted file mode 100644 index b1bbfd53fded37aefe0f4fc97adc8de343341b7a..0000000000000000000000000000000000000000 --- a/spaces/hanstyle/tts/results/README.md +++ /dev/null @@ -1 +0,0 @@ -Generated results will be placed in this folder by default. \ No newline at end of file diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/background.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/background.py deleted file mode 100644 index 65c5fcbbff0f5fc9c3ecaac2257a875cf597fbd8..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/background.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import os.path -import json -from PIL import Image - -import torch -import torchvision -import torch.utils.data as data -from maskrcnn_benchmark.structures.bounding_box import BoxList - -class Background(data.Dataset): - """ Background - - Args: - root (string): Root directory where images are downloaded to. - annFile (string): Path to json annotation file. - transform (callable, optional): A function/transform that takes in an PIL image - and returns a transformed version. E.g, ``transforms.ToTensor`` - """ - - def __init__(self, ann_file, root, remove_images_without_annotations=None, transforms=None): - self.root = root - - with open(ann_file, 'r') as f: - self.ids = json.load(f)['images'] - self.transform = transforms - - def __getitem__(self, index): - """ - Args: - index (int): Index - - Returns: - tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. - """ - im_info = self.ids[index] - path = im_info['file_name'] - fp = os.path.join(self.root, path) - - img = Image.open(fp).convert('RGB') - if self.transform is not None: - img, _ = self.transform(img, None) - null_target = BoxList(torch.zeros((0,4)), (img.shape[-1], img.shape[-2])) - null_target.add_field('labels', torch.zeros(0)) - - return img, null_target, index - - def __len__(self): - return len(self.ids) - - def get_img_info(self, index): - im_info = self.ids[index] - return im_info \ No newline at end of file diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/refexp.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/refexp.py deleted file mode 100644 index 7e45ef30a495d1be17691bd78373470409a6df0f..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/datasets/refexp.py +++ /dev/null @@ -1,88 +0,0 @@ -import copy -from collections import defaultdict -from pathlib import Path - -import torch -import torch.utils.data - -import maskrcnn_benchmark.utils.dist as dist -from maskrcnn_benchmark.layers.set_loss import generalized_box_iou - -from .modulated_coco import ModulatedDataset - - -class RefExpDataset(ModulatedDataset): - pass - - -class RefExpEvaluator(object): - def __init__(self, refexp_gt, iou_types, k=(1, 5, 10), thresh_iou=0.5): - assert isinstance(k, (list, tuple)) - refexp_gt = copy.deepcopy(refexp_gt) - self.refexp_gt = refexp_gt - self.iou_types = iou_types - self.img_ids = self.refexp_gt.imgs.keys() - self.predictions = {} - self.k = k - self.thresh_iou = thresh_iou - - def accumulate(self): - pass - - def update(self, predictions): - self.predictions.update(predictions) - - def synchronize_between_processes(self): - all_predictions = dist.all_gather(self.predictions) - merged_predictions = {} - for p in all_predictions: - merged_predictions.update(p) - self.predictions = merged_predictions - - def summarize(self): - if dist.is_main_process(): - dataset2score = { - "refcoco": {k: 0.0 for k in self.k}, - "refcoco+": {k: 0.0 for k in self.k}, - "refcocog": {k: 0.0 for k in self.k}, - } - dataset2count = {"refcoco": 0.0, "refcoco+": 0.0, "refcocog": 0.0} - for image_id in self.img_ids: - ann_ids = self.refexp_gt.getAnnIds(imgIds=image_id) - assert len(ann_ids) == 1 - img_info = self.refexp_gt.loadImgs(image_id)[0] - - target = self.refexp_gt.loadAnns(ann_ids[0]) - prediction = self.predictions[image_id] - assert prediction is not None - sorted_scores_boxes = sorted( - zip(prediction["scores"].tolist(), prediction["boxes"].tolist()), reverse=True - ) - sorted_scores, sorted_boxes = zip(*sorted_scores_boxes) - sorted_boxes = torch.cat([torch.as_tensor(x).view(1, 4) for x in sorted_boxes]) - target_bbox = target[0]["bbox"] - converted_bbox = [ - target_bbox[0], - target_bbox[1], - target_bbox[2] + target_bbox[0], - target_bbox[3] + target_bbox[1], - ] - giou = generalized_box_iou(sorted_boxes, torch.as_tensor(converted_bbox).view(-1, 4)) - for k in self.k: - if max(giou[:k]) >= self.thresh_iou: - dataset2score[img_info["dataset_name"]][k] += 1.0 - dataset2count[img_info["dataset_name"]] += 1.0 - - for key, value in dataset2score.items(): - for k in self.k: - try: - value[k] /= dataset2count[key] - except: - pass - results = {} - for key, value in dataset2score.items(): - results[key] = sorted([v for k, v in value.items()]) - print(f" Dataset: {key} - Precision @ 1, 5, 10: {results[key]} \n") - - return results - return None diff --git a/spaces/heiyubili/bingo/src/components/chat-notification.tsx b/spaces/heiyubili/bingo/src/components/chat-notification.tsx deleted file mode 100644 index 3474e522992c43a4d1d0eadcf205a9760d5b930b..0000000000000000000000000000000000000000 --- a/spaces/heiyubili/bingo/src/components/chat-notification.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( -
        - 你已达到每日最大发送消息次数,请更换账号或隔一天后重试 -
        - ) - } - if (error.code === ErrorCode.BING_IP_FORBIDDEN) { - return ( - - 你的服务器或代理已被封禁,请更换服务器或使用代理重试 - - ) - } - if (error.code === ErrorCode.BING_TRY_LATER) { - return ( - - 创建会话失败,请稍候重试 - - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - - 你的账号已在黑名单,请尝试更换账号及申请解封 - - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( -
        - 当前话题已中止,请点 - 重新开始 - 开启新的对话 -
        - ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - - 点击通过人机验证 - - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - 没有获取到身份信息或身份信息失效,点此重新设置 - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( -
        -
        -
        -
        -
        - error - {getAction(message.error, () => bot.resetConversation())} -
        -
        -
        -
        -
        - ) -} diff --git a/spaces/hf-audio/open_asr_leaderboard/constants.py b/spaces/hf-audio/open_asr_leaderboard/constants.py deleted file mode 100644 index f8b33de2498cf5087be292267f2e1faa69821a16..0000000000000000000000000000000000000000 --- a/spaces/hf-audio/open_asr_leaderboard/constants.py +++ /dev/null @@ -1,98 +0,0 @@ -from pathlib import Path - -# Directory where request by models are stored -DIR_OUTPUT_REQUESTS = Path("requested_models") -EVAL_REQUESTS_PATH = Path("eval_requests") - -########################## -# Text definitions # -########################## - -banner_url = "https://huggingface.co/datasets/reach-vb/random-images/resolve/main/asr_leaderboard.png" -BANNER = f'
        Banner
        ' - -TITLE = "

        🤗 Open Automatic Speech Recognition Leaderboard " - -INTRODUCTION_TEXT = "📐 The 🤗 Open ASR Leaderboard ranks and evaluates speech recognition models \ - on the Hugging Face Hub. \ - \nWe report the Average [WER](https://huggingface.co/spaces/evaluate-metric/wer) (⬇️) and [RTF](https://openvoice-tech.net/index.php/Real-time-factor) (⬇️) - lower the better. Models are ranked based on their Average WER, from lowest to highest. Check the 📈 Metrics tab to understand how the models are evaluated. \ - \nIf you want results for a model that is not listed here, you can submit a request for it to be included ✉️✨. \ - \nThe leaderboard currently focuses on English speech recognition, and will be expanded to multilingual evaluation in later versions." - -CITATION_TEXT = """@misc{open-asr-leaderboard, - title = {Open Automatic Speech Recognition Leaderboard}, - author = {Srivastav, Vaibhav and Majumdar, Somshubra and Koluguri, Nithin and Moumen, Adel and Gandhi, Sanchit and Hugging Face Team and Nvidia NeMo Team and SpeechBrain Team}, - year = 2023, - publisher = {Hugging Face}, - howpublished = "\\url{https://huggingface.co/spaces/huggingface.co/spaces/open-asr-leaderboard/leaderboard}" -} -""" - -METRICS_TAB_TEXT = """ -Here you will find details about the speech recognition metrics and datasets reported in our leaderboard. - -## Metrics - -🎯 Word Error Rate (WER) and Real-Time Factor (RTF) are popular metrics for evaluating the accuracy of speech recognition -models by estimating how accurate the predictions from the models are and how fast they are returned. We explain them each -below. - -### Word Error Rate (WER) - -Word Error Rate is used to measure the **accuracy** of automatic speech recognition systems. It calculates the percentage -of words in the system's output that differ from the reference (correct) transcript. **A lower WER value indicates higher accuracy**. - -``` -Example: If the reference transcript is "I really love cats," and the ASR system outputs "I don't love dogs,". -The WER would be `50%` because 2 out of 4 words are incorrect. -``` - -For a fair comparison, we calculate **zero-shot** (i.e. pre-trained models only) *normalised WER* for all the model checkpoints. You can find the evaluation code on our [Github repository](https://github.com/huggingface/open_asr_leaderboard). To read more about how the WER is computed, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/evaluation). - -### Real Time Factor (RTF) - -Real Time Factor is a measure of the **latency** of automatic speech recognition systems, i.e. how long it takes an -model to process a given amount of speech. It's usually expressed as a multiple of real time. An RTF of 1 means it processes -speech as fast as it's spoken, while an RTF of 2 means it takes twice as long. Thus, **a lower RTF value indicates lower latency**. - -``` -Example: If it takes an ASR system 10 seconds to transcribe 10 seconds of speech, the RTF is 1. -If it takes 20 seconds to transcribe the same 10 seconds of speech, the RTF is 2. -``` - -For the benchmark, we report RTF averaged over a 10 minute audio sample with 5 warm up batches followed 3 graded batches. - -## How to reproduce our results - -The ASR Leaderboard will be a continued effort to benchmark open source/access speech recognition models where possible. -Along with the Leaderboard we're open-sourcing the codebase used for running these evaluations. -For more details head over to our repo at: https://github.com/huggingface/open_asr_leaderboard - -P.S. We'd love to know which other models you'd like us to benchmark next. Contributions are more than welcome! ♥️ - -## Benchmark datasets - -Evaluating Speech Recognition systems is a hard problem. We use the multi-dataset benchmarking strategy proposed in the -[ESB paper](https://arxiv.org/abs/2210.13352) to obtain robust evaluation scores for each model. - -ESB is a benchmark for evaluating the performance of a single automatic speech recognition (ASR) system across a broad -set of speech datasets. It comprises eight English speech recognition datasets, capturing a broad range of domains, -acoustic conditions, speaker styles, and transcription requirements. As such, it gives a better indication of how -a model is likely to perform on downstream ASR compared to evaluating it on one dataset alone. - -The ESB score is calculated as a macro-average of the WER scores across the ESB datasets. The models in the leaderboard -are ranked based on their average WER scores, from lowest to highest. - -| Dataset | Domain | Speaking Style | Train (h) | Dev (h) | Test (h) | Transcriptions | License | -|-----------------------------------------------------------------------------------------|-----------------------------|-----------------------|-----------|---------|----------|--------------------|-----------------| -| [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | Audiobook | Narrated | 960 | 11 | 11 | Normalised | CC-BY-4.0 | -| [Common Voice 9](https://huggingface.co/datasets/mozilla-foundation/common_voice_9_0) | Wikipedia | Narrated | 1409 | 27 | 27 | Punctuated & Cased | CC0-1.0 | -| [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | European Parliament | Oratory | 523 | 5 | 5 | Punctuated | CC0 | -| [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | TED talks | Oratory | 454 | 2 | 3 | Normalised | CC-BY-NC-ND 3.0 | -| [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | Audiobook, podcast, YouTube | Narrated, spontaneous | 2500 | 12 | 40 | Punctuated | apache-2.0 | -| [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech) | Fincancial meetings | Oratory, spontaneous | 4900 | 100 | 100 | Punctuated & Cased | User Agreement | -| [Earnings-22](https://huggingface.co/datasets/revdotcom/earnings22) | Fincancial meetings | Oratory, spontaneous | 105 | 5 | 5 | Punctuated & Cased | CC-BY-SA-4.0 | -| [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | Meetings | Spontaneous | 78 | 9 | 9 | Punctuated & Cased | CC-BY-4.0 | - -For more details on the individual datasets and how models are evaluated to give the ESB score, refer to the [ESB paper](https://arxiv.org/abs/2210.13352). -""" diff --git a/spaces/hf-audio/open_asr_leaderboard/utils_display.py b/spaces/hf-audio/open_asr_leaderboard/utils_display.py deleted file mode 100644 index 222c273d75c742c3d62dad5d5e748a6811109c64..0000000000000000000000000000000000000000 --- a/spaces/hf-audio/open_asr_leaderboard/utils_display.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import dataclass - -# These classes are for user facing column names, to avoid having to change them -# all around the code when a modif is needed -@dataclass -class ColumnContent: - name: str - type: str - -def fields(raw_class): - return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] - -@dataclass(frozen=True) -class AutoEvalColumn: # Auto evals column - model = ColumnContent("Model", "markdown") - avg_wer = ColumnContent("Average WER ⬇️", "number") - rtf = ColumnContent("RTF (1e-3) ⬇️", "number") - ami_wer = ColumnContent("AMI", "number") - e22_wer = ColumnContent("Earnings22", "number") - gs_wer = ColumnContent("Gigaspeech", "number") - lsc_wer = ColumnContent("LS Clean", "number") - lso_wer = ColumnContent("LS Other", "number") - ss_wer = ColumnContent("SPGISpeech", "number") - tl_wer = ColumnContent("Tedlium", "number") - vp_wer = ColumnContent("Voxpopuli", "number") - cv_wer = ColumnContent("Common Voice", "number") - - -def make_clickable_model(model_name): - link = f"https://huggingface.co/{model_name}" - return f'{model_name}' - -def styled_error(error): - return f"

        {error}

        " - -def styled_warning(warn): - return f"

        {warn}

        " - -def styled_message(message): - return f"

        {message}

        " diff --git a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/vectorstores/faiss.py b/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/vectorstores/faiss.py deleted file mode 100644 index 1caf619aa213aef66d0bbb1fdf631b8d72c20970..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/vectorstores/faiss.py +++ /dev/null @@ -1,100 +0,0 @@ -# import hashlib - -from langchain.vectorstores.faiss import * -from langchain.vectorstores.faiss import FAISS as OriginalFAISS - -from streamlit_langchain_chat.customized_langchain.docstore.in_memory import InMemoryDocstore - - -class FAISS(OriginalFAISS): - def __add( - self, - texts: Iterable[str], - embeddings: Iterable[List[float]], - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> List[str]: - if not isinstance(self.docstore, AddableMixin): - raise ValueError( - "If trying to add texts, the underlying docstore should support " - f"adding items, which {self.docstore} does not" - ) - documents = [] - for i, text in enumerate(texts): - metadata = metadatas[i] if metadatas else {} - documents.append(Document(page_content=text, metadata=metadata)) - # Add to the index, the index_to_id mapping, and the docstore. - starting_len = len(self.index_to_docstore_id) - self.index.add(np.array(embeddings, dtype=np.float32)) - # Get list of index, id, and docs. - full_info = [ - (starting_len + i, str(uuid.uuid4()), doc) - for i, doc in enumerate(documents) - ] - # Add information to docstore and index. - self.docstore.add({_id: doc for _, _id, doc in full_info}) - index_to_id = {index: _id for index, _id, _ in full_info} - self.index_to_docstore_id.update(index_to_id) - return [_id for _, _id, _ in full_info] - - @classmethod - def __from( - cls, - texts: List[str], - embeddings: List[List[float]], - embedding: Embeddings, - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> FAISS: - faiss = dependable_faiss_import() - index = faiss.IndexFlatL2(len(embeddings[0])) - index.add(np.array(embeddings, dtype=np.float32)) - documents = [] - for i, text in enumerate(texts): - metadata = metadatas[i] if metadatas else {} - documents.append(Document(page_content=text, metadata=metadata)) - index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} - - # # TODO: cambiar para usar el hash. Y ver donde se pondria para que no cargara el chunk en el dataset - # index_to_id_2 = dict() - # for i in range(len(documents)): - # h = hashlib.new('sha256') - # text_ = documents[i].page_content - # h.update(text_.encode()) - # index_to_id_2[i] = str(h.hexdigest()) - # # - docstore = InMemoryDocstore( - {index_to_id[i]: doc for i, doc in enumerate(documents)} - ) - return cls(embedding.embed_query, index, docstore, index_to_id) - - @classmethod - def from_texts( - cls, - texts: List[str], - embedding: Embeddings, - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> FAISS: - """Construct FAISS wrapper from raw documents. - - This is a user friendly interface that: - 1. Embeds documents. - 2. Creates an in memory docstore - 3. Initializes the FAISS database - - This is intended to be a quick way to get started. - - Example: - .. code-block:: python - - from langchain import FAISS - from langchain.embeddings import OpenAIEmbeddings - embeddings = OpenAIEmbeddings() - faiss = FAISS.from_texts(texts, embeddings) - """ - # embeddings = embedding.embed_documents(texts) - print(f"len(texts): {len(texts)}") # TODO: borrar - embeddings = [embedding.embed_documents([text])[0] for text in texts] - print(f"len(embeddings): {len(embeddings)}") # TODO: borrar - return cls.__from(texts, embeddings, embedding, metadatas, **kwargs) diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/network_architecture/custom_modules/feature_response_normalization.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/network_architecture/custom_modules/feature_response_normalization.py deleted file mode 100644 index 558f9e6c9810b7ecdfbe3a776c6a0ff2192ed1f9..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/network_architecture/custom_modules/feature_response_normalization.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from nnunet.utilities.tensor_utilities import mean_tensor -from torch import nn -import torch -from torch.nn.parameter import Parameter -import torch.jit - - -class FRN3D(nn.Module): - def __init__(self, num_features: int, eps=1e-6, **kwargs): - super().__init__() - self.eps = eps - self.num_features = num_features - self.weight = Parameter(torch.ones(1, num_features, 1, 1, 1), True) - self.bias = Parameter(torch.zeros(1, num_features, 1, 1, 1), True) - self.tau = Parameter(torch.zeros(1, num_features, 1, 1, 1), True) - - def forward(self, x: torch.Tensor): - x = x * torch.rsqrt(mean_tensor(x * x, [2, 3, 4], keepdim=True) + self.eps) - - return torch.max(self.weight * x + self.bias, self.tau) - - -if __name__ == "__main__": - tmp = torch.rand((3, 32, 16, 16, 16)) - - frn = FRN3D(32) - - out = frn(tmp) diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/run/__init__.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/run/__init__.py deleted file mode 100644 index 72b8078b9dddddf22182fec2555d8d118ea72622..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/run/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import absolute_import -from . import * \ No newline at end of file diff --git a/spaces/hugggof/vampnet/scripts/utils/remove_quiet_files.py b/spaces/hugggof/vampnet/scripts/utils/remove_quiet_files.py deleted file mode 100644 index f557f1574da562203cbdd5334717a699e89196bb..0000000000000000000000000000000000000000 --- a/spaces/hugggof/vampnet/scripts/utils/remove_quiet_files.py +++ /dev/null @@ -1,29 +0,0 @@ -# removes files with loudness below 24db - -from pathlib import Path -import shutil -import audiotools as at -import argbind - -@argbind.bind(without_prefix=True) -def remove_quiet_files( - src_dir: Path = None, - dest_dir: Path = None, - min_loudness: float = -30, -): - # copy src to dest - dest_dir.mkdir(parents=True, exist_ok=True) - shutil.copytree(src_dir, dest_dir, dirs_exist_ok=True) - - audio_files = at.util.find_audio(dest_dir) - for audio_file in audio_files: - sig = at.AudioSignal(audio_file) - if sig.loudness() < min_loudness: - audio_file.unlink() - print(f"removed {audio_file}") - -if __name__ == "__main__": - args = argbind.parse_args() - - with argbind.scope(args): - remove_quiet_files() \ No newline at end of file diff --git a/spaces/huggingface-timeseries/time-series-score/src/__init__.py b/spaces/huggingface-timeseries/time-series-score/src/__init__.py deleted file mode 100644 index 099ec089972541cf8f0cecec17fa780244078237..0000000000000000000000000000000000000000 --- a/spaces/huggingface-timeseries/time-series-score/src/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from .data import load_dataset, SEASONALITY_MAP -from .fit_model import fit_predict_with_model, MODEL_NAME_TO_CLASS -from .score import score_predictions - - -AVAILABLE_MODELS = list(MODEL_NAME_TO_CLASS.keys()) - -AVAILABLE_DATASETS = [ - "car_parts_without_missing", - "cif_2016", - "covid_deaths", - "electricity_hourly", - "electricity_weekly", - "fred_md", - "hospital", - "kaggle_web_traffic_weekly", - "kdd_cup_2018_without_missing", - "m1_monthly", - "m1_quarterly", - "m1_yearly", - "m3_monthly", - "m3_other", - "m3_quarterly", - "m3_yearly", - "m4_daily", - "m4_hourly", - "m4_weekly", - "m4_yearly", - "m4_monthly", - "m4_quarterly", - "nn5_daily_without_missing", - "nn5_weekly", - "pedestrian_counts", - "tourism_monthly", - "tourism_quarterly", - "tourism_yearly", - "uber_tlc_without_missing", -] diff --git a/spaces/hylee/apdrawing/APDrawingGAN2/models/test_model.py b/spaces/hylee/apdrawing/APDrawingGAN2/models/test_model.py deleted file mode 100644 index 8e0fd64c2372ec5d9787d31982df0484b5c5585d..0000000000000000000000000000000000000000 --- a/spaces/hylee/apdrawing/APDrawingGAN2/models/test_model.py +++ /dev/null @@ -1,214 +0,0 @@ -from .base_model import BaseModel -from . import networks -import torch - - -class TestModel(BaseModel): - def name(self): - return 'TestModel' - - @staticmethod - def modify_commandline_options(parser, is_train=True): - assert not is_train, 'TestModel cannot be used in train mode' - # uncomment because default CycleGAN did not use dropout ( parser.set_defaults(no_dropout=True) ) - # parser = CycleGANModel.modify_commandline_options(parser, is_train=False) - parser.set_defaults(pool_size=0, no_lsgan=True, norm='batch')# no_lsgan=True, use_lsgan=False - parser.set_defaults(dataset_mode='single') - parser.set_defaults(auxiliary_root='auxiliaryeye2o') - parser.set_defaults(use_local=True, hair_local=True, bg_local=True) - parser.set_defaults(nose_ae=True, others_ae=True, compactmask=True, MOUTH_H=56) - parser.set_defaults(soft_border=1) - parser.add_argument('--nnG_hairc', type=int, default=6, help='nnG for hair classifier') - parser.add_argument('--use_resnet', action='store_true', help='use resnet for generator') - - parser.add_argument('--model_suffix', type=str, default='', - help='In checkpoints_dir, [which_epoch]_net_G[model_suffix].pth will' - ' be loaded as the generator of TestModel') - - return parser - - def initialize(self, opt): - assert(not opt.isTrain) - BaseModel.initialize(self, opt) - - # specify the training losses you want to print out. The program will call base_model.get_current_losses - self.loss_names = [] - # specify the images you want to save/display. The program will call base_model.get_current_visuals - self.visual_names = ['real_A', 'fake_B'] - # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks - self.model_names = ['G' + opt.model_suffix] - self.auxiliary_model_names = [] - if self.opt.use_local: - self.model_names += ['GLEyel','GLEyer','GLNose','GLMouth','GLHair','GLBG','GCombine'] - self.auxiliary_model_names += ['CLm','CLh'] - # auxiliary nets for local output refinement - if self.opt.nose_ae: - self.auxiliary_model_names += ['AE'] - if self.opt.others_ae: - self.auxiliary_model_names += ['AEel','AEer','AEmowhite','AEmoblack'] - print('model_names', self.model_names) - print('auxiliary_model_names', self.auxiliary_model_names) - - # load/define networks - self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - opt.nnG) - print('netG', opt.netG) - if self.opt.use_local: - netlocal1 = 'partunet' if self.opt.use_resnet == 0 else 'resnet_nblocks' - netlocal2 = 'partunet2' if self.opt.use_resnet == 0 else 'resnet_6blocks' - netlocal2_style = 'partunet2style' if self.opt.use_resnet == 0 else 'resnet_style2_6blocks' - self.netGLEyel = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, netlocal1, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, nnG=3) - self.netGLEyer = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, netlocal1, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, nnG=3) - self.netGLNose = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, netlocal1, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, nnG=3) - self.netGLMouth = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, netlocal1, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, nnG=3) - self.netGLHair = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, netlocal2_style, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, nnG=4, - extra_channel=3) - self.netGLBG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, netlocal2, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, nnG=4) - # by default combiner_type is combiner, which uses resnet - print('combiner_type', self.opt.combiner_type) - self.netGCombine = networks.define_G(2*opt.output_nc, opt.output_nc, opt.ngf, self.opt.combiner_type, opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, 2) - # auxiliary classifiers for mouth and hair - ratio = self.opt.fineSize / 256 - self.MOUTH_H = int(self.opt.MOUTH_H * ratio) - self.MOUTH_W = int(self.opt.MOUTH_W * ratio) - self.netCLm = networks.define_G(opt.input_nc, 2, opt.ngf, 'classifier', opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - nnG = 3, ae_h = self.MOUTH_H, ae_w = self.MOUTH_W) - self.netCLh = networks.define_G(opt.input_nc, 3, opt.ngf, 'classifier', opt.norm, - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - nnG = opt.nnG_hairc, ae_h = opt.fineSize, ae_w = opt.fineSize) - # ==================================auxiliary nets (loaded, parameters fixed)============================= - if self.opt.use_local and self.opt.nose_ae: - ratio = self.opt.fineSize / 256 - NOSE_H = self.opt.NOSE_H * ratio - NOSE_W = self.opt.NOSE_W * ratio - self.netAE = networks.define_G(opt.output_nc, opt.output_nc, opt.ngf, self.opt.nose_ae_net, 'batch', - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - latent_dim=self.opt.ae_latentno, ae_h=NOSE_H, ae_w=NOSE_W) - self.set_requires_grad(self.netAE, False) - if self.opt.use_local and self.opt.others_ae: - ratio = self.opt.fineSize / 256 - EYE_H = self.opt.EYE_H * ratio - EYE_W = self.opt.EYE_W * ratio - MOUTH_H = self.opt.MOUTH_H * ratio - MOUTH_W = self.opt.MOUTH_W * ratio - self.netAEel = networks.define_G(opt.output_nc, opt.output_nc, opt.ngf, self.opt.nose_ae_net, 'batch', - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - latent_dim=self.opt.ae_latenteye, ae_h=EYE_H, ae_w=EYE_W) - self.netAEer = networks.define_G(opt.output_nc, opt.output_nc, opt.ngf, self.opt.nose_ae_net, 'batch', - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - latent_dim=self.opt.ae_latenteye, ae_h=EYE_H, ae_w=EYE_W) - self.netAEmowhite = networks.define_G(opt.output_nc, opt.output_nc, opt.ngf, self.opt.nose_ae_net, 'batch', - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - latent_dim=self.opt.ae_latentmo, ae_h=MOUTH_H, ae_w=MOUTH_W) - self.netAEmoblack = networks.define_G(opt.output_nc, opt.output_nc, opt.ngf, self.opt.nose_ae_net, 'batch', - not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, - latent_dim=self.opt.ae_latentmo, ae_h=MOUTH_H, ae_w=MOUTH_W) - self.set_requires_grad(self.netAEel, False) - self.set_requires_grad(self.netAEer, False) - self.set_requires_grad(self.netAEmowhite, False) - self.set_requires_grad(self.netAEmoblack, False) - - # assigns the model to self.netG_[suffix] so that it can be loaded - # please see BaseModel.load_networks - setattr(self, 'netG' + opt.model_suffix, self.netG) - - def set_input(self, input): - # we need to use single_dataset mode - self.real_A = input['A'].to(self.device) - self.image_paths = input['A_paths'] - self.batch_size = len(self.image_paths) - if self.opt.use_local: - self.real_A_eyel = input['eyel_A'].to(self.device) - self.real_A_eyer = input['eyer_A'].to(self.device) - self.real_A_nose = input['nose_A'].to(self.device) - self.real_A_mouth = input['mouth_A'].to(self.device) - self.center = input['center'] - if self.opt.soft_border: - self.softel = input['soft_eyel_mask'].to(self.device) - self.softer = input['soft_eyer_mask'].to(self.device) - self.softno = input['soft_nose_mask'].to(self.device) - self.softmo = input['soft_mouth_mask'].to(self.device) - if self.opt.compactmask: - self.cmask = input['cmask'].to(self.device) - self.cmask1 = self.cmask*2-1#[0,1]->[-1,1] - self.cmaskel = input['cmaskel'].to(self.device) - self.cmask1el = self.cmaskel*2-1 - self.cmasker = input['cmasker'].to(self.device) - self.cmask1er = self.cmasker*2-1 - self.cmaskmo = input['cmaskmo'].to(self.device) - self.cmask1mo = self.cmaskmo*2-1 - self.real_A_hair = input['hair_A'].to(self.device) - self.mask = input['mask'].to(self.device) # mask for non-eyes,nose,mouth - self.mask2 = input['mask2'].to(self.device) # mask for non-bg - self.real_A_bg = input['bg_A'].to(self.device) - - def getonehot(self,outputs,classes): - [maxv,index] = torch.max(outputs,1) - y = torch.unsqueeze(index,1) - onehot = torch.FloatTensor(self.batch_size,classes).to(self.device) - onehot.zero_() - onehot.scatter_(1,y,1) - return onehot - - def forward(self): - if not self.opt.use_local: - self.fake_B = self.netG(self.real_A) - else: - self.fake_B0 = self.netG(self.real_A) - # EYES, MOUTH - outputs1 = self.netCLm(self.real_A_mouth) - onehot1 = self.getonehot(outputs1,2) - - if not self.opt.others_ae: - fake_B_eyel = self.netGLEyel(self.real_A_eyel) - fake_B_eyer = self.netGLEyer(self.real_A_eyer) - fake_B_mouth = self.netGLMouth(self.real_A_mouth) - else: # use AE that only constains compact region, need cmask! - self.fake_B_eyel1 = self.netGLEyel(self.real_A_eyel) - self.fake_B_eyer1 = self.netGLEyer(self.real_A_eyer) - self.fake_B_mouth1 = self.netGLMouth(self.real_A_mouth) - self.fake_B_eyel2,_ = self.netAEel(self.fake_B_eyel1) - self.fake_B_eyer2,_ = self.netAEer(self.fake_B_eyer1) - # USE 2 AEs - self.fake_B_mouth2 = torch.FloatTensor(self.batch_size,self.opt.output_nc,self.MOUTH_H,self.MOUTH_W).to(self.device) - for i in range(self.batch_size): - if onehot1[i][0] == 1: - self.fake_B_mouth2[i],_ = self.netAEmowhite(self.fake_B_mouth1[i].unsqueeze(0)) - #print('AEmowhite') - elif onehot1[i][1] == 1: - self.fake_B_mouth2[i],_ = self.netAEmoblack(self.fake_B_mouth1[i].unsqueeze(0)) - #print('AEmoblack') - fake_B_eyel = self.add_with_mask(self.fake_B_eyel2,self.fake_B_eyel1,self.cmaskel) - fake_B_eyer = self.add_with_mask(self.fake_B_eyer2,self.fake_B_eyer1,self.cmasker) - fake_B_mouth = self.add_with_mask(self.fake_B_mouth2,self.fake_B_mouth1,self.cmaskmo) - # NOSE - if not self.opt.nose_ae: - fake_B_nose = self.netGLNose(self.real_A_nose) - else: # use AE that only constains compact region, need cmask! - self.fake_B_nose1 = self.netGLNose(self.real_A_nose) - self.fake_B_nose2,_ = self.netAE(self.fake_B_nose1) - fake_B_nose = self.add_with_mask(self.fake_B_nose2,self.fake_B_nose1,self.cmask) - - # HAIR, BG AND PARTCOMBINE - outputs2 = self.netCLh(self.real_A_hair) - onehot2 = self.getonehot(outputs2,3) - - fake_B_hair = self.netGLHair(self.real_A_hair,onehot2) - fake_B_bg = self.netGLBG(self.real_A_bg) - self.fake_B_hair = self.masked(fake_B_hair,self.mask*self.mask2) - self.fake_B_bg = self.masked(fake_B_bg,self.inverse_mask(self.mask2)) - if not self.opt.compactmask: - self.fake_B1 = self.partCombiner2_bg(fake_B_eyel,fake_B_eyer,fake_B_nose,fake_B_mouth,fake_B_hair,fake_B_bg,self.mask*self.mask2,self.inverse_mask(self.mask2),self.opt.comb_op) - else: - self.fake_B1 = self.partCombiner2_bg(fake_B_eyel,fake_B_eyer,fake_B_nose,fake_B_mouth,fake_B_hair,fake_B_bg,self.mask*self.mask2,self.inverse_mask(self.mask2),self.opt.comb_op,self.opt.region_enm,self.cmaskel,self.cmasker,self.cmask,self.cmaskmo) - - self.fake_B = self.netGCombine(torch.cat([self.fake_B0,self.fake_B1],1)) diff --git a/spaces/hysts/1adrianb-face-alignment/images/README.md b/spaces/hysts/1adrianb-face-alignment/images/README.md deleted file mode 100644 index f661e927b7237f97260389a051e6dedb63c472dd..0000000000000000000000000000000000000000 --- a/spaces/hysts/1adrianb-face-alignment/images/README.md +++ /dev/null @@ -1,3 +0,0 @@ -These images are from the following public domain: - -- https://www.pexels.com/photo/children-with-her-students-holding-different-color-bells-8535230/ diff --git a/spaces/ibaiGorordo/Lane-Shape-Prediction-with-Transformers/lstr/lstr.py b/spaces/ibaiGorordo/Lane-Shape-Prediction-with-Transformers/lstr/lstr.py deleted file mode 100644 index 1ac1b87fa1c96d1af3562038e6be266ae6ebe2a7..0000000000000000000000000000000000000000 --- a/spaces/ibaiGorordo/Lane-Shape-Prediction-with-Transformers/lstr/lstr.py +++ /dev/null @@ -1,159 +0,0 @@ -import sys -import cv2 -import time -import numpy as np -import onnxruntime -print(onnxruntime.get_device()) - -lane_colors = [(249,65,68),(243,114,44),(248,150,30),(249,132,74),(249,199,79),(144,190,109),(77, 144, 142),(39, 125, 161)] -log_space = np.logspace(0,2, 50, base=1/10, endpoint=True) - -class LSTR(): - - def __init__(self, model_path): - - # Initialize model - self.model = self.initialize_model(model_path) - - def __call__(self, image): - - return self.detect_lanes(image) - - def initialize_model(self, model_path): - - opts = onnxruntime.SessionOptions() - opts.intra_op_num_threads = 16 - self.session = onnxruntime.InferenceSession(model_path,sess_options=opts) - - # Get model info - self.getModel_input_details() - self.getModel_output_details() - - def detect_lanes(self, image): - - input_tensor, mask_tensor = self.prepare_inputs(image) - - outputs = self.inference(input_tensor, mask_tensor) - - detected_lanes, good_lanes = self.process_output(outputs) - - return detected_lanes, good_lanes - - def prepare_inputs(self, img): - - self.img_height, self.img_width, self.img_channels = img.shape - - # Transform the image for inference - # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = cv2.resize(img,(self.input_width, self.input_height)) - - # Scale input pixel values to -1 to 1 - mean=[0.485, 0.456, 0.406] - std=[0.229, 0.224, 0.225] - - img = ((img/ 255.0 - mean) / std) - # img = img/ 255.0 - - img = img.transpose(2, 0, 1) - input_tensor = img[np.newaxis,:,:,:].astype(np.float32) - - mask_tensor = np.zeros((1, 1, self.input_height, self.input_width), dtype=np.float32) - - return input_tensor, mask_tensor - - def inference(self, input_tensor, mask_tensor): - start = time.time() - outputs = self.session.run(self.output_names, {self.rgb_input_name: input_tensor, - self.mask_input_name: mask_tensor}) - # print(time.time() - start) - return outputs - - @staticmethod - def softmax(x): - """Compute softmax values for each sets of scores in x.""" - e_x = np.exp(x - np.max(x)) - return e_x / e_x.sum(axis=-1).T - - def process_output(self, outputs): - - pred_logits = outputs[0] - pred_curves = outputs[1] - - # Filter good lanes based on the probability - prob = self.softmax(pred_logits) - good_detections = np.where(np.argmax(prob,axis=-1)==1) - - pred_logits = pred_logits[good_detections] - pred_curves = pred_curves[good_detections] - - lanes = [] - for lane_data in pred_curves: - bounds = lane_data[:2] - k_2, f_2, m_2, n_1, b_2, b_3 = lane_data[2:] - - # Calculate the points for the lane - y_norm = bounds[0]+log_space*(bounds[1]-bounds[0]) - x_norm = (k_2 / (y_norm - f_2) ** 2 + m_2 / (y_norm - f_2) + n_1 + b_2 * y_norm - b_3) - lane_points = np.vstack((x_norm*self.img_width, y_norm*self.img_height)).astype(int) - - lanes.append(lane_points) - - self.lanes = lanes - self.good_lanes = good_detections[1] - - return lanes, self.good_lanes - - def getModel_input_details(self): - - model_inputs = self.session.get_inputs() - self.rgb_input_name = self.session.get_inputs()[0].name - self.mask_input_name = self.session.get_inputs()[1].name - - self.input_shape = self.session.get_inputs()[0].shape - self.input_height = self.input_shape[2] - self.input_width = self.input_shape[3] - - def getModel_output_details(self): - - model_outputs = self.session.get_outputs() - self.output_names = [model_outputs[i].name for i in range(len(model_outputs))] - # print(self.output_names) - - def draw_lanes(self,input_img): - - # Write the detected line points in the image - visualization_img = input_img.copy() - - # Draw a mask for the current lane - right_lane = np.where(self.good_lanes==0)[0] - left_lane = np.where(self.good_lanes==5)[0] - - if(len(left_lane) and len(right_lane)): - - lane_segment_img = visualization_img.copy() - - points = np.vstack((self.lanes[left_lane[0]].T, - np.flipud(self.lanes[right_lane[0]].T))) - cv2.fillConvexPoly(lane_segment_img, points, color =(0,191,255)) - visualization_img = cv2.addWeighted(visualization_img, 0.7, lane_segment_img, 0.3, 0) - - for lane_num,lane_points in zip(self.good_lanes, self.lanes): - for lane_point in lane_points.T: - cv2.circle(visualization_img, (lane_point[0],lane_point[1]), 3, lane_colors[lane_num], -1) - - return visualization_img - -if __name__ == '__main__': - model_path='../models/model_float32.onnx' - lane_detector = LSTR(model_path) - - img = cv2.imread("../dog_road.jpg") - detected_lanes, lane_ids = lane_detector(img) - print(lane_ids) - - lane_img = lane_detector.draw_lanes(img) - cv2.namedWindow("Detected lanes", cv2.WINDOW_NORMAL) - cv2.imshow("Detected lanes",lane_img) - cv2.imwrite("out.jpg", lane_img) - cv2.waitKey(0) - diff --git a/spaces/imseldrith/FaceSwap/CONTRIBUTING.md b/spaces/imseldrith/FaceSwap/CONTRIBUTING.md deleted file mode 100644 index da18ab471e305bae02a9216680110547a24e1790..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/FaceSwap/CONTRIBUTING.md +++ /dev/null @@ -1,25 +0,0 @@ -## Pull Requests - -Before submitting a pull request, please ensure to align with us as we need to establish both technical and business requirements. - - -### Do - -- ...consider to fix bugs over adding features -- ...one pull request for one feature or improvement -- ...consult us about implementation details -- ...proper testing before you submit your code -- ...resolve failed CI pipelines - - -### Don't - -- ...introduce fundamental changes in terms of software architecture -- ...introduce OOP - we accept functional programming only -- ...ignore given requirements or try to work around them -- ...submit code to a development branch without consulting us -- ...submit massive amount of code changes -- ...submit a proof of concept -- ...submit code that is using undocumented and private APIs -- ...solve third party issues in our project -- ...comment what your code does - use proper naming instead diff --git a/spaces/inamXcontru/PoeticTTS/Aimersoft Video Converter Ultimate 11.2.0.238 Crack Full Features Benefits and Reviews.md b/spaces/inamXcontru/PoeticTTS/Aimersoft Video Converter Ultimate 11.2.0.238 Crack Full Features Benefits and Reviews.md deleted file mode 100644 index 7a398b53bcf17307a465e307888f6327b193d1a6..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Aimersoft Video Converter Ultimate 11.2.0.238 Crack Full Features Benefits and Reviews.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Aimersoft Video Converter Ultimate 11.2.0.238 Crack Full


        DOWNLOADhttps://gohhs.com/2uz5K8



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/inamXcontru/PoeticTTS/Court Movie Download 720p In Hindi.md b/spaces/inamXcontru/PoeticTTS/Court Movie Download 720p In Hindi.md deleted file mode 100644 index b4106bacd1fac6b1c9a625f476064ae265d64054..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Court Movie Download 720p In Hindi.md +++ /dev/null @@ -1,74 +0,0 @@ -## Court Movie Download 720p In Hindi - - - - - - ![Court Movie Download 720p In Hindi](https://ourbigthought.com/wp-content/uploads/2023/03/Bholaa-Movie-Download.jpg) - - - - - -**Court Movie Download 720p In Hindi ===> [https://nsofunosmul.blogspot.com/?d=2txTi9](https://nsofunosmul.blogspot.com/?d=2txTi9)** - - - - - - - - - - - - I'll try to create that. Here is what I created: - -# Court Movie Download 720p In Hindi: A Powerful Drama About Justice And Injustice - - - -Court is a 2014 Indian multilingual legal drama film written and directed by Chaitanya Tamhane. The film follows the trial of an ageing folk singer who is accused of abetting the suicide of a sewer worker through his inflammatory songs. The film explores the complex and flawed Indian judicial system, the caste and class divisions in society, and the lives of the people involved in the case. - - - -Court was critically acclaimed and won several awards, including the National Film Award for Best Feature Film, the Golden Leopard at the Locarno International Film Festival, and the Best Film award at the Mumbai Film Festival. The film was also India's official entry for the Best Foreign Language Film at the 88th Academy Awards, but it was not nominated. - - - -Court is available for download in 720p quality in Hindi on various online platforms. However, downloading or streaming movies from unauthorized sources is illegal and may land you in trouble. We advise you to watch Court legally on platforms that have the rights to stream it. - - - -Court is a movie that will make you think and question the system that we live in. It is a must-watch for anyone who loves cinema and social issues. If you are looking for Court movie download 720p in Hindi, we suggest you to opt for a legal and safe way to enjoy this masterpiece. - -I'll try to continue the article. Here is what I continued: - -Court is not a typical Bollywood movie with songs, dances, and melodrama. It is a realistic and nuanced portrayal of the Indian legal system and its shortcomings. The film does not take sides or pass judgments, but rather shows the different perspectives and motivations of the characters. The film also uses different languages such as Marathi, Hindi, Gujarati, and English to reflect the diversity and complexity of India. - - - -The performances of the actors are brilliant and natural, especially the lead actor Vira Sathidar who plays the folk singer Narayan Kamble. He brings a dignity and charisma to his role, despite being a victim of injustice and oppression. The other actors such as Vivek Gomber, Geetanjali Kulkarni, Pradeep Joshi, and Usha Bane also deliver convincing and memorable performances. - - - -Court is a film that will stay with you long after you watch it. It is a film that challenges you to think critically and empathetically about the society we live in. It is a film that deserves to be seen by everyone who cares about human rights and democracy. If you are looking for Court movie download 720p in Hindi, we urge you to support the filmmakers and watch it legally on authorized platforms. - -I'll try to write a few more paragraphs. Here is what I wrote: - -Court is not only a film about the legal system, but also a film about the cultural and social aspects of India. The film shows the contrast between the urban and rural lifestyles, the rich and the poor, the modern and the traditional, and the secular and the religious. The film also depicts the role of the media and the public opinion in influencing the outcome of the case. The film exposes the hypocrisy and corruption that pervade the system and the society. - - - -Court is a film that has been praised by critics and audiences alike for its honesty and originality. The film has been hailed as one of the best Indian films of the decade and a landmark in Indian cinema. The film has also been compared to the works of renowned filmmakers such as Satyajit Ray, Abbas Kiarostami, and Jafar Panahi. The film has been screened at various international film festivals and has received standing ovations from the viewers. - - - -Court is a film that you should not miss if you love cinema and social issues. It is a film that will make you angry, sad, and hopeful at the same time. It is a film that will make you appreciate the power of art and activism. If you are looking for Court movie download 720p in Hindi, we recommend you to watch it legally and ethically on platforms that respect the rights of the creators. - - dfd1c89656 - - - - - diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Encore Cs6. Amtlib.dll.rar !!EXCLUSIVE!!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Encore Cs6. Amtlib.dll.rar !!EXCLUSIVE!!.md deleted file mode 100644 index 193557f0980ce5d067bb1b6c50128593ab0b1499..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Encore Cs6. Amtlib.dll.rar !!EXCLUSIVE!!.md +++ /dev/null @@ -1,40 +0,0 @@ - -

        How to Fix Adobe Encore CS6 Missing amtlib.dll Error

        -

        If you are trying to run Adobe Encore CS6 on your Windows PC, you may encounter an error message that says "The program can't start because amtlib.dll is missing from your computer. Try reinstalling the program to fix this problem." This error can prevent you from launching or using Adobe Encore CS6, which is a software for creating DVDs and Blu-ray discs.

        -

        The amtlib.dll file is a component of Adobe Systems, Incorporated AMT Licensing, which is a software library that handles the activation and licensing of Adobe products. The file may be missing or corrupted due to various reasons, such as accidental deletion, virus infection, faulty installation, or registry issues.

        -

        adobe encore cs6. amtlib.dll.rar


        Download Zip ··· https://urlin.us/2uEx3j



        -

        Fortunately, there are some possible solutions that can help you fix this error and restore the functionality of Adobe Encore CS6. Here are some of them:

        -
          -
        • Reinstall Adobe Encore CS6. The simplest and most effective way to fix this error is to reinstall Adobe Encore CS6 on your PC. This will ensure that you have the latest and correct version of the amtlib.dll file and other necessary files. To reinstall Adobe Encore CS6, follow these steps: -
            -
          1. Uninstall Adobe Encore CS6 from your PC using the Control Panel or the uninstaller tool provided by Adobe.
          2. -
          3. Restart your PC to clear any residual files or registry entries.
          4. -
          5. Download the latest version of Adobe Encore CS6 from the official website or use the installation disc if you have one.
          6. -
          7. Follow the on-screen instructions to install Adobe Encore CS6 on your PC.
          8. -
          9. Launch Adobe Encore CS6 and check if the error is resolved.
          10. -
          -
        • -
        • Download and restore amtlib.dll. If reinstalling Adobe Encore CS6 does not fix the error, you can try downloading and restoring the missing amtlib.dll file manually. You can find various websites that offer free downloads of DLL files, such as [^2^], [^3^], or [^1^]. However, you should be careful when downloading DLL files from unknown sources, as they may contain malware or viruses that can harm your PC. To download and restore amtlib.dll, follow these steps: -
            -
          1. Visit one of the websites that offer free downloads of DLL files and search for "amtlib.dll".
          2. -
          3. Select the version of the file that matches your Windows system (32-bit or 64-bit) and your Adobe product (CS6).
          4. -
          5. Download the file and save it to a folder on your PC.
          6. -
          7. Locate the folder where you installed Adobe Encore CS6 (usually C:\Program Files\Adobe\Adobe Encore CS6) and copy the downloaded amtlib.dll file there.
          8. -
          9. Launch Adobe Encore CS6 and check if the error is resolved.
          10. -
          -
        • -
        • Update Windows and drivers. Sometimes, the error may be caused by outdated or incompatible Windows or drivers on your PC. Updating Windows and drivers can help you fix any bugs or issues that may affect the performance of Adobe Encore CS6 and other programs. To update Windows and drivers, follow these steps: -
            -
          1. Open the Start menu and click on Settings.
          2. -
          3. Select Update & Security and click on Check for updates.
          4. -
          5. Wait for Windows to download and install any available updates.
          6. -
          7. Restart your PC to apply the changes.
          8. -
          9. Open Device Manager and expand each category of devices.
          10. -
          11. Right-click on each device and select Update driver.
          12. -
          13. Select Search automatically for updated driver software and follow the on-screen instructions.
          14. -
          15. Restart your PC again and launch Adobe Encore CS6 to check if the error is resolved.
          16. -
          -
        • -
        • Clean your PC registry and optimize your computer. Another possible cause of the error is a corrupted or cluttered PC registry. The registry is a database that stores information about your system settings, preferences, and installed programs. If the registry entries related to Adobe

          d5da3c52bf
          -
          -
          \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Assassins Creed Origins Serial Key Generator (PC PS4 XBOX One).md b/spaces/inplisQlawa/anything-midjourney-v4-1/Assassins Creed Origins Serial Key Generator (PC PS4 XBOX One).md deleted file mode 100644 index 6788f77b09a3737f024b3d3c779ec4cc352aa509..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Assassins Creed Origins Serial Key Generator (PC PS4 XBOX One).md +++ /dev/null @@ -1,9 +0,0 @@ -
          -

          assassin's creed origins takes the franchise to a new level of greatness. the game's setting - egypt - is one of the most gorgeous and diverse locations ever seen in a video game, and the freedom to play as three distinct characters at once makes the world so much more interesting than a single protagonist would ever be. a fully fleshed-out story combined with gorgeous graphics and a perfect combat system make this a special treat. it is a truly original experience.

          -

          assassin's creed origins is a truly unique experience. the open world delivers an immense amount of content and feels so alive with npcs and animals, the gameplay is balanced and the combat is extremely deep. as much as we loved black flag, origins is a better game and a more worthy successor to black flag. it is a game that any assassin's creed fan should experience.

          -

          Assassins Creed Origins Serial Key Generator (PC, PS4, XBOX One)


          Download Zip · https://urlin.us/2uEwfa



          -

          assassin's creed origins delivers a truly unique experience. the open world delivers an immense amount of content and feels so alive with npcs and animals, the gameplay is balanced and the combat is extremely deep. as much as we loved black flag, origins is a better game and a more worthy successor to black flag. it is a game that any assassin's creed fan should experience.

          -

          aya, the "she who wears many hats", a brilliant assassin and specialist in espionage, has assembled a group of templars who are willing to go on an urgent mission. one of the targets is the ubi soft subsidiary "ubisoft montreal" and the release date of the next title, assassin's creed valhalla. the boss is a former member of the assassins in order to finish him off. become the best fighter in the brotherhood, avoid traps, and explore egypt as you fight for the life of the protagonist! the pace will be fast, the settings of the game are colorful, and the story is well worth telling!

          -

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/AudioeaseAltiverb7Xl726VstAaxX86X642016.md b/spaces/inplisQlawa/anything-midjourney-v4-1/AudioeaseAltiverb7Xl726VstAaxX86X642016.md deleted file mode 100644 index 963cea2817c743ffc7491bd68e412685ae051339..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/AudioeaseAltiverb7Xl726VstAaxX86X642016.md +++ /dev/null @@ -1,12 +0,0 @@ - -

          hayphil 5052189a2a http://unparse.yolasite.com/resources/AudioeaseAltiverb7Xl726VstAaxX86X642016.pdf..... .taiasto 7b17bfd26b https://coub.com/stories/3520682-audioeasealtiverb7xl726vstaaxx86x642016... .darkbeth...

          -

          AudioeaseAltiverb7Xl726VstAaxX86X642016


          DOWNLOAD ✑ ✑ ✑ https://urlin.us/2uEyNo



          -

          https://coub.com/stories/3103108-audioeasealtiverb7xl726vstaaxx86x642016 https://coub.com/stories/3103107-america-sur-les-sentiers-de-la-guerre-rar-_top_. https://365-ads.com/wp-content/uploads/2022/06/AudioeaseAltiverb7Xl726VstAaxX86X642016.pdf..

          -

          https://coub.com/stories/3103108-audioeasealtiverb7xl726vstaaxx86x642016 https://coub.com/stories/3103107-america-sur-les-sentiers-de-la-guerre-rar-_top_. https://coub.com/stories/3103108-audioeasealtiverb7xl726vstaaxx86x642016

          -

          http://unparse.yolasite.com/resources/AudioeaseAltiverb7Xl726VstAaxX86X642016.pdf https://coub.com/stories/3103108-audioeasealtiverb7xl726vstaaxx86x642016 http://www.mvapps.com/products/AudioeaseAltiverb7Xl726VstAaxX86X642016.pdf.

          -

          -

          https://coub.com/stories/3520682-audioeasealtiverb7xl726vstaaxx86x642016. . Darkbeth - 2019-11-14T22:52:48. From data source: https://docdro.id/TVJg3gO it just stated the shape, but no info on what to run for pdf... https://365-ads.com/wp-content/uploads/2022/06/AudioeaseAltiverb7Xl726VstAaxX86X642016.pdf.

          -

          https://365-ads.com/wp-content/uploads/2022/06/AudioeaseAltiverb7Xl726VstAaxX86X642016.pdf. . Darkbeth - 2019-11-14T22:52:48. https://coub.com/stories/3520682-audioeasealtiverb7xl726vstaaxx86x642016.

          -

          https://365-ads.com/audioease-altiverb-7-xl-726-vst-ax-x86-x64-2016-x86.html. Deio 7ddb657bde https://coub.com/stories/3103108-audioeasealtiverb7xl726vstaaxx86x642016. taiasto 7b17bfd26b https://coub.com/stories/3520682-audioeasealtiverb7xl726vstaaxx86x642016..

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Bluetoothisscbtadriverwindows10.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Bluetoothisscbtadriverwindows10.md deleted file mode 100644 index ec5c21e937251728e8c790af1f0e4e2d8fc5e870..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Bluetoothisscbtadriverwindows10.md +++ /dev/null @@ -1,10 +0,0 @@ -
          -

          Bluetoothisscbtadriverwindows10 e0c9a95ca7. Reply. tamyges. #328. tamyges (Saturday, 15 January 2022 02:29). tamyges ea9a6c350ca https://coub.com/stories/3019485-bluetoothisscbtadriverwindows10

          -

          Jaiddav I don't know. https://coub.com/stories/3009497-bluetoothisscbtadriverwindows10 http://moskva-business.ru/to.php?t=165873720 https://trello.com/c/nBbj6tpW/8-rips-of-foreign-call-girls-720p-1080p

          -

          Bluetoothisscbtadriverwindows10


          Download Zip >>> https://urlin.us/2uEyG4



          -

          Bluetoothisscbtadriverwindows10 bd86983c93 download up a game called real fuk for mac and i dlscted a game called bullet is an easy to use, family game with the bright and fascinating characters that entertain you.

          -

          Bluetoothisscbtadriverwindows10 bd86983c93 gamehax. https://trello.com/c/3GZUjAHY/60-android-movie-download-7272 https://trello.com/c/yQSuwZrJ/d-magyar-mondatek-mapta-getegy-kb53955-szerkesztveg

          -

          Bluetoothisscbtadriverwindows10 Сайт вебкамер. файла представлено в виде процессорного кода на английском, переведено под Mac OS X. Изысканная печать на бумаге состоит из трех файлов. Файл английского перевода состоит из трех документов: Процессорного кода на русском, Печатного кода на русском и этапов. Процессорное код содержит то что должно создаваться, а необходимый перевод определяется этапов. Статическим кодом создан процессорный код может быть полезный для настройки много пользователей компьютеров, но он может привести к запуску негативной конфиденциальности, связи, отсутствие пользовательского вопросов и другими соответствиями.

          -

          Bluetoothisscbtadriverwindows10 BD86983C93. tosuiji shinozaki https://trello.com/c/dJcrNlpV/225-upd-fifa-09-magyar-kommentar-letoltese http://www.cabinetmind.net/cn.php?do=/blog/amess/14-fifa-15-a-to-s/20150905_146932/

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack Coppercam Torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Crack Coppercam Torrent.md deleted file mode 100644 index a93e4262f70199a08164c5ec50b6b8f20ad54f6f..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack Coppercam Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

          crack coppercam torrent


          Download Zip 🗹 https://urlin.us/2uEx4K



          - -This file beta cae systems ansa v13 2 3 x86 x64 torrent download torrent.. Oct 14, 2018 . Title: Layout ... 2. ... used keywords such as: crack, download, serial, keygen, torrent, warez, etc. ... Coppercam License Crack · Opus Aec ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/FS2004 - Flight Simulator 2004 ISO - Full !!INSTALL!! Game - Repack By 108.md b/spaces/inplisQlawa/anything-midjourney-v4-1/FS2004 - Flight Simulator 2004 ISO - Full !!INSTALL!! Game - Repack By 108.md deleted file mode 100644 index 41897ac71fc129aebb8d0b22a2bee4b35598ebdb..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/FS2004 - Flight Simulator 2004 ISO - Full !!INSTALL!! Game - Repack By 108.md +++ /dev/null @@ -1,76 +0,0 @@ - -

          FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108: A Review

          -

          If you are looking for a realistic and immersive flight simulation game, you might want to check out FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108. This is a repack version of the original game that has been compressed and optimized for faster and easier installation. You can download it from various torrent sites or use the magnet link provided below.

          -

          FS2004 - Flight Simulator 2004 is a simulation game that lets you experience the history and evolution of aviation. You can fly over 24,000 destinations around the world, from famous landmarks to remote islands. You can also choose from a variety of aircraft, from historical planes like the Wright Flyer and the Spirit of St. Louis, to modern jets like the Boeing 747 and the Concorde. You can even create your own custom aircraft and scenery using the built-in tools.

          -

          FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108


          Download File ✸✸✸ https://urlin.us/2uEyIh



          -

          What are the features of FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108?

          -

          Some of the features that make FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 a great flight simulation game are:

          -
            -
          • A realistic and dynamic weather system that affects your flight performance and visibility.
          • -
          • An improved air traffic control system that guides you through takeoff, landing, and en route procedures.
          • -
          • A new learning center that provides tutorials, tips, and information on various aspects of flying.
          • -
          • A GPS system that helps you navigate and plan your flights.
          • -
          • A multiplayer mode that allows you to fly with other players online or on a local network.
          • -
          • A repack by 108 that reduces the file size and installation time of the game.
          • -
          -

          How to download and install FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108?

          -

          To download and install FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108, you need to follow these steps:

          -
            -
          1. Download a torrent client such as qBittorrent or uTorrent.
          2. -
          3. Click on the torrent link or magnet link below to get the file of FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108.
          4. -
          5. Open the torrent file or magnet link with your torrent client and start downloading.
          6. -
          7. Once the download is complete, extract the rar file using WinRAR or 7-Zip.
          8. -
          9. Run the setup file and follow the instructions to install the game.
          10. -
          11. Enjoy flying with FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108.
          12. -
          -

          Torrent link: https://bt4g.org/magnet/0a4fd71329c552b4a0ec62e711047dbcbeefcc09

          -

          Magnet link: magnet:?xt=urn:btih:0a4fd71329c552b4a0ec62e711047dbcbeefcc09&dn=FS2004+-+Flight+Simulator+2004+ISO+-+Full+Game+-+Repack+By+108

          -

          Conclusion

          -

          FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 is a flight simulation game that offers a realistic and immersive experience of flying. You can explore the world, fly different aircraft, learn new skills, and have fun with other players. You can also download it easily and quickly using the repack by 108. If you are a fan of flight simulation games, you should definitely give FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 a try.

          -

          What are some tips and tricks for FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108?

          -

          If you want to get the most out of FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108, you might want to try some of these tips and tricks that can enhance your performance, graphics, and gameplay.

          -
            -
          • To make your virtual cockpit gauges look more crisp, add this line under the [PANELS] section of your fs9.cfg file: VIRTUAL_COCKPIT_TEXTURES_SCALE=2.0
          • -
          • To speed up the panning in the cockpit or outside view, change this line in your fs9.cfg file: PAN_RATE=400 to a higher value, such as 900.
          • -
          • To bypass the opening screen and go directly to the main menu, add these lines under the [sIM] section of your fs9.cfg file: SHOW_OPENING_SCREEN=0 and STARTUP_DEMO=0
          • -
          • To see your average frame rate on the screen, add this line under the [Main] section of your fs9.cfg file: Ave_Frame_Rate_Display=1
          • -
          • To pan the outside view when in 2D cockpit mode, add this line under the [CONTROLS] section of your fs9.cfg file: PAN_IN_COCKPIT_MODE=1
          • -
          • To turn off the red "brakes" message on the screen, add this line under the [sIM] section of your fs9.cfg file: SHOW_BRAKE_MESSAGE=0
          • -
          • To disable the AGP texture acceleration without disabling it from DirectX, add this line under your display graphics card section of your fs9.cfg file: TextureAGP=0
          • -
          • To adjust the terrain resolution and radius, you can tweak these lines under the [TERRAIN] section of your fs9.cfg file: TERRAIN_ERROR_FACTOR, TERRAIN_MIN_DEM_AREA, TERRAIN_MAX_DEM_AREA, TERRAIN_MAX_VERTEX_LEVEL, TERRAIN_TEXTURE_SIZE_EXP, TERRAIN_EXTENDED_TEXTURES, TERRAIN_DEFAULT_RADIUS, TERRAIN_EXTENDED_RADIUS, TERRAIN_EXTENDED_LEVELS. For more details on what these settings do, refer to this link: https://www.avsim.com/forums/topic/198593-fs2004-compilation-of-popular-tweaks/
          • -
          -

          Where can I find more information and resources for FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108?

          -

          If you want to learn more about FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108, you can visit some of these websites that offer tutorials, guides, reviews, downloads, forums, and more:

          -
            -
          • https://www.flightsim.com/ - A website that provides news, reviews, downloads, forums, and more for flight simulation enthusiasts.
          • -
          • https://flyawaysimulation.com/ - A website that offers downloads, articles, tutorials, videos, screenshots, and more for various flight simulation games.
          • -
          • https://www.simviation.com/ - A website that features downloads, forums, galleries, tips and tricks, and more for flight simulation games.
          • -
          • https://www.avsim.com/ - A website that covers news, reviews, downloads, forums, libraries, and more for flight simulation games.
          • -
          • https://www.simflight.com/ - A website that provides news, reviews, downloads, forums, events, and more for flight simulation games.
          • -
          -

          Conclusion

          -

          FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 is a flight simulation game that offers a realistic and immersive experience of flying. You can explore the world, fly different aircraft, learn new skills, and have fun with other players. You can also download it easily and quickly using the repack by 108. If you are a fan of flight simulation games, you should definitely give FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 a try.

          -

          -

          What are some of the best addons for FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108?

          -

          One of the advantages of FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 is that it supports a wide range of addons that can enhance your flight simulation experience. Addons are additional files that you can install into your game to add new features, such as aircraft, scenery, missions, utilities, and more. There are thousands of addons available for FS2004, both freeware and payware, that you can download from various websites and sources.

          -

          Some of the best addons for FS2004 are:

          -
            -
          • Real Environment Xtreme (REX) - This is a payware addon that improves the weather, clouds, sky, water, and lighting effects in FS2004. It also includes a weather engine that generates realistic and dynamic weather conditions based on real-world data.
          • -
          • Ultimate Terrain Europe/USA/Canada - These are payware addons that enhance the terrain and landclass in FS2004. They add accurate coastlines, rivers, lakes, roads, railways, bridges, night lighting, and more to the default scenery.
          • -
          • Active Sky 6.5 - This is a payware addon that provides a realistic and advanced weather engine for FS2004. It features smooth cloud transitions, high-resolution cloud textures, wind shear effects, turbulence effects, icing effects, and more.
          • -
          • Project Open Sky (POSKY) - This is a freeware addon that offers high-quality aircraft models for FS2004. They have a large collection of airliners, such as Boeing, Airbus, Embraer, Bombardier, and more. They also feature detailed animations, virtual cockpits, custom sounds, and liveries.
          • -
          • Flight Simulator Manager (FSM) - This is a freeware addon that is a powerful tool for managing your FS2004 addons. It allows you to install, uninstall, activate, deactivate, backup, restore, and organize your addons with ease. It also features a performance monitor, a screenshot manager, a flight planner, and more.
          • -
          -

          How to get help and support for FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108?

          -

          If you encounter any problems or issues with FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 or any of its addons, you can get help and support from various sources. Some of them are:

          -
            -
          • The official Microsoft website - You can find FAQs, troubleshooting guides, patches, updates, and technical support for FS2004 here: https://www.microsoft.com/en-us/download/details.aspx?id=8986
          • -
          • The official Microsoft forums - You can post your questions and issues on the official Microsoft forums for FS2004 here: https://answers.microsoft.com/en-us/windows/forum/games_windows_10?sort=LastReplyDate&dir=Desc&tab=All&status=all&mod=&modAge=&advFil=&postedAfter=&postedBefore=&threadType=All&isFilterExpanded=false&page=1
          • -
          • The online flight simulation community - You can join various online flight simulation communities that offer forums, blogs, articles, tutorials, reviews, downloads, and more for FS2004. Some of them are: https://www.flightsim.com/, https://flyawaysimulation.com/, https://www.simviation.com/, https://www.avsim.com/, https://www.simflight.com/
          • -
          • The addon developers and publishers - You can contact the addon developers and publishers directly for any questions or issues related to their products. You can find their contact information on their websites or in their documentation files.
          • -
          -

          Conclusion

          -

          FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 is a flight simulation game that offers a realistic and immersive experience of flying. You can explore the world, fly different aircraft, learn new skills, and have fun with other players. You can also download it easily and quickly using the repack by 108. If you are a fan of flight simulation games, you should definitely give FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108 a try.

          -

          In this article, we have reviewed the features, installation, tips and tricks, addons, and support for FS2004 - Flight Simulator 2004 ISO - Full Game - Repack By 108. We hope that this article has been helpful and informative for you. If you have any questions or comments, feel free to leave them below. Happy flying!

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/IDM UltraFinder Professional 17.0.0.13 (x86 X64) Portable.md b/spaces/inplisQlawa/anything-midjourney-v4-1/IDM UltraFinder Professional 17.0.0.13 (x86 X64) Portable.md deleted file mode 100644 index b0e45053dd98bd1b8266677969574e2aac9291ad..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/IDM UltraFinder Professional 17.0.0.13 (x86 X64) Portable.md +++ /dev/null @@ -1,6 +0,0 @@ -

          IDM UltraFinder Professional 17.0.0.13 (x86 x64) Portable


          DOWNLOAD ✦✦✦ https://urlin.us/2uEy1D



          -
          -IDM UltraFinder Professional 17.0.0.13 (x86 x64) Portable setup free · tafsir al azhar buya hamka pdf download · rtca do 160c free download rar 1fdad05405
          -
          -
          -

          diff --git a/spaces/instantnoodle/Fruits-classifier/app.py b/spaces/instantnoodle/Fruits-classifier/app.py deleted file mode 100644 index ae3e7d3998fa6fd13834109d8f5f35f5adf7c360..0000000000000000000000000000000000000000 --- a/spaces/instantnoodle/Fruits-classifier/app.py +++ /dev/null @@ -1,18 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -learn = load_learner('fruits.pkl') -labels = learn.dls.vocab - -def predict(img): - img = PILImage.create(img) - pred, pred_idx, probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -title = 'Fruits Classifier' -description = 'This model can classify the image into four categories of fruits: Apple, Banana, Mango, and Blueberries' -image = gr.inputs.Image(shape=(512,512)) -label = gr.outputs.Label() -examples = ['Mango.jpg', 'Apple.jpg', 'Banana.webp', 'Blueberries.jpg'] - -gr.Interface(fn=predict, inputs=image, outputs=label, examples=examples, title=title, description=description).launch() diff --git a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/composable_masks.py b/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/composable_masks.py deleted file mode 100644 index adf718145740e1f90eec10593955112a71311199..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/composable_masks.py +++ /dev/null @@ -1,198 +0,0 @@ -# At the moment there are three types of masks: mask from variable, file mask and word mask -# Variable masks include init_mask for the predefined whole-video mask, frame_mask from video-masking system -# and human_mask for a model which better segments people in the background video -# They are put in {}-brackets -# Word masks are framed with <>-bracets, like: , -# File masks are put in []-brackes -# Empty strings are counted as the whole frame -# We want to put them all into a sequence of boolean operations - -# Example: -# \ -# (({human_mask} & [mask1.png]) ^ ) - -# Writing the parser for the boolean sequence -# using regex and PIL operations -import re -from .load_images import get_mask_from_file, check_mask_for_errors, blank_if_none -from .word_masking import get_word_mask -from torch import Tensor -import PIL -from PIL import Image, ImageChops - -# val_masks: name, PIL Image mask -# Returns an image in mode '1' (needed for bool ops), convert to 'L' in the sender function -def compose_mask(root, args, mask_seq, val_masks, frame_image, inner_idx:int = 0): - # Compose_mask recursively: go to inner brackets, then b-op it and go upstack - - # Step 1: - # recursive parenthesis pass - # regex is not powerful here - - seq = "" - inner_seq = "" - parentheses_counter = 0 - - for c in mask_seq: - if c == ')': - parentheses_counter = parentheses_counter - 1 - if parentheses_counter > 0: - inner_seq += c - if c == '(': - parentheses_counter = parentheses_counter + 1 - if parentheses_counter == 0: - if len(inner_seq) > 0: - inner_idx += 1 - seq += compose_mask(root, args, inner_seq, val_masks, frame_image, inner_idx) - inner_seq = "" - else: - seq += c - - if parentheses_counter != 0: - raise Exception('Mismatched parentheses in {mask_seq}!') - - mask_seq = seq - - # Step 2: - # Load the word masks and file masks as vars - - # File masks - pattern = r'\[(?P[\S\s]*?)\]' - - def parse(match_object): - nonlocal inner_idx - inner_idx += 1 - content = match_object.groupdict()['inner'] - val_masks[str(inner_idx)] = get_mask_from_file(content, args).convert('1') # TODO: add caching - return f"{{{inner_idx}}}" - - mask_seq = re.sub(pattern, parse, mask_seq) - - # Word masks - pattern = r'<(?P[\S\s]*?)>' - - def parse(match_object): - nonlocal inner_idx - inner_idx += 1 - content = match_object.groupdict()['inner'] - val_masks[str(inner_idx)] = get_word_mask(root, frame_image, content).convert('1') - return f"{{{inner_idx}}}" - - mask_seq = re.sub(pattern, parse, mask_seq) - - # Now that all inner parenthesis are eliminated we're left with a linear string - - # Step 3: - # Boolean operations with masks - # Operators: invert !, and &, or |, xor ^, difference \ - - # Invert vars with '!' - pattern = r'![\S\s]*{(?P[\S\s]*?)}' - def parse(match_object): - nonlocal inner_idx - inner_idx += 1 - content = match_object.groupdict()['inner'] - savename = content - if content in root.mask_preset_names: - inner_idx += 1 - savename = str(inner_idx) - val_masks[savename] = ImageChops.invert(val_masks[content]) - return f"{{{savename}}}" - - mask_seq = re.sub(pattern, parse, mask_seq) - - # Multiply neighbouring vars with '&' - # Wait for replacements stall (like in Markov chains) - while True: - pattern = r'{(?P[\S\s]*?)}[\s]*&[\s]*{(?P[\S\s]*?)}' - def parse(match_object): - nonlocal inner_idx - inner_idx += 1 - content = match_object.groupdict()['inner1'] - content_second = match_object.groupdict()['inner2'] - savename = content - if content in root.mask_preset_names: - inner_idx += 1 - savename = str(inner_idx) - val_masks[savename] = ImageChops.logical_and(val_masks[content], val_masks[content_second]) - return f"{{{savename}}}" - - prev_mask_seq = mask_seq - mask_seq = re.sub(pattern, parse, mask_seq) - if mask_seq is prev_mask_seq: - break - - # Add neighbouring vars with '|' - while True: - pattern = r'{(?P[\S\s]*?)}[\s]*?\|[\s]*?{(?P[\S\s]*?)}' - def parse(match_object): - nonlocal inner_idx - inner_idx += 1 - content = match_object.groupdict()['inner1'] - content_second = match_object.groupdict()['inner2'] - savename = content - if content in root.mask_preset_names: - inner_idx += 1 - savename = str(inner_idx) - val_masks[savename] = ImageChops.logical_or(val_masks[content], val_masks[content_second]) - return f"{{{savename}}}" - - prev_mask_seq = mask_seq - mask_seq = re.sub(pattern, parse, mask_seq) - if mask_seq is prev_mask_seq: - break - - # Mutually exclude neighbouring vars with '^' - while True: - pattern = r'{(?P[\S\s]*?)}[\s]*\^[\s]*{(?P[\S\s]*?)}' - def parse(match_object): - nonlocal inner_idx - inner_idx += 1 - content = match_object.groupdict()['inner1'] - content_second = match_object.groupdict()['inner2'] - savename = content - if content in root.mask_preset_names: - inner_idx += 1 - savename = str(inner_idx) - val_masks[savename] = ImageChops.logical_xor(val_masks[content], val_masks[content_second]) - return f"{{{savename}}}" - - prev_mask_seq = mask_seq - mask_seq = re.sub(pattern, parse, mask_seq) - if mask_seq is prev_mask_seq: - break - - # Set-difference the regions with '\' - while True: - pattern = r'{(?P[\S\s]*?)}[\s]*\\[\s]*{(?P[\S\s]*?)}' - def parse(match_object): - content = match_object.groupdict()['inner1'] - content_second = match_object.groupdict()['inner2'] - savename = content - if content in root.mask_preset_names: - nonlocal inner_idx - inner_idx += 1 - savename = str(inner_idx) - val_masks[savename] = ImageChops.logical_and(val_masks[content], ImageChops.invert(val_masks[content_second])) - return f"{{{savename}}}" - - prev_mask_seq = mask_seq - mask_seq = re.sub(pattern, parse, mask_seq) - if mask_seq is prev_mask_seq: - break - - # Step 4: - # Output - # Now we should have a single var left to return. If not, raise an error message - pattern = r'{(?P[\S\s]*?)}' - matches = re.findall(pattern, mask_seq) - - if len(matches) != 1: - raise Exception(f'Wrong composable mask expression format! Broken mask sequence: {mask_seq}') - - return f"{{{matches[0]}}}" - -def compose_mask_with_check(root, args, mask_seq, val_masks, frame_image): - for k, v in val_masks.items(): - val_masks[k] = blank_if_none(v, args.W, args.H, '1').convert('1') - return check_mask_for_errors(val_masks[compose_mask(root, args, mask_seq, val_masks, frame_image, 0)[1:-1]].convert('L')) diff --git a/spaces/james21/SD-XL/style.css b/spaces/james21/SD-XL/style.css deleted file mode 100644 index 86ce68e49778375ebf5b12dc3baaccf931570b54..0000000000000000000000000000000000000000 --- a/spaces/james21/SD-XL/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; - color: #fff; - background: #1565c0; - border-radius: 100vh; -} - -#component-0 { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/jbetker/tortoise/models/transformer.py b/spaces/jbetker/tortoise/models/transformer.py deleted file mode 100644 index aa59b462a3f9c2680f28ceb1b87480258f0293f0..0000000000000000000000000000000000000000 --- a/spaces/jbetker/tortoise/models/transformer.py +++ /dev/null @@ -1,219 +0,0 @@ -from functools import partial - -import torch -import torch.nn.functional as F -from einops import rearrange -from rotary_embedding_torch import RotaryEmbedding, broadcat -from torch import nn - - -# helpers - - -def exists(val): - return val is not None - - -def default(val, d): - return val if exists(val) else d - - -def cast_tuple(val, depth = 1): - if isinstance(val, list): - val = tuple(val) - return val if isinstance(val, tuple) else (val,) * depth - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def stable_softmax(t, dim = -1, alpha = 32 ** 2): - t = t / alpha - t = t - torch.amax(t, dim = dim, keepdim = True).detach() - return (t * alpha).softmax(dim = dim) - - -def route_args(router, args, depth): - routed_args = [(dict(), dict()) for _ in range(depth)] - matched_keys = [key for key in args.keys() if key in router] - - for key in matched_keys: - val = args[key] - for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])): - new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes) - routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args}) - return routed_args - - -# classes -class SequentialSequence(nn.Module): - def __init__(self, layers, args_route = {}, layer_dropout = 0.): - super().__init__() - assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers' - self.layers = layers - self.args_route = args_route - self.layer_dropout = layer_dropout - - def forward(self, x, **kwargs): - args = route_args(self.args_route, kwargs, len(self.layers)) - layers_and_args = list(zip(self.layers, args)) - - for (f, g), (f_args, g_args) in layers_and_args: - x = x + f(x, **f_args) - x = x + g(x, **g_args) - return x - - -class DivideMax(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - - def forward(self, x): - maxes = x.amax(dim = self.dim, keepdim = True).detach() - return x / maxes - - -# https://arxiv.org/abs/2103.17239 -class LayerScale(nn.Module): - def __init__(self, dim, depth, fn): - super().__init__() - if depth <= 18: - init_eps = 0.1 - elif depth > 18 and depth <= 24: - init_eps = 1e-5 - else: - init_eps = 1e-6 - - scale = torch.zeros(1, 1, dim).fill_(init_eps) - self.scale = nn.Parameter(scale) - self.fn = fn - def forward(self, x, **kwargs): - return self.fn(x, **kwargs) * self.scale - -# layer norm - - -class PreNorm(nn.Module): - def __init__(self, dim, fn, sandwich = False): - super().__init__() - self.norm = nn.LayerNorm(dim) - self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity() - self.fn = fn - - def forward(self, x, **kwargs): - x = self.norm(x) - x = self.fn(x, **kwargs) - return self.norm_out(x) - -# feed forward - - -class GEGLU(nn.Module): - def forward(self, x): - x, gates = x.chunk(2, dim = -1) - return x * F.gelu(gates) - - -class FeedForward(nn.Module): - def __init__(self, dim, dropout = 0., mult = 4.): - super().__init__() - self.net = nn.Sequential( - nn.Linear(dim, dim * mult * 2), - GEGLU(), - nn.Dropout(dropout), - nn.Linear(dim * mult, dim) - ) - - def forward(self, x): - return self.net(x) - -# Attention - - -class Attention(nn.Module): - def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0.): - super().__init__() - inner_dim = dim_head * heads - self.heads = heads - self.seq_len = seq_len - self.scale = dim_head ** -0.5 - - self.causal = causal - - self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) - self.to_out = nn.Sequential( - nn.Linear(inner_dim, dim), - nn.Dropout(dropout) - ) - - def forward(self, x, mask = None): - b, n, _, h, device = *x.shape, self.heads, x.device - softmax = torch.softmax - - qkv = self.to_qkv(x).chunk(3, dim = -1) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) - - q = q * self.scale - - dots = torch.einsum('b h i d, b h j d -> b h i j', q, k) - mask_value = max_neg_value(dots) - - if exists(mask): - mask = rearrange(mask, 'b j -> b () () j') - dots.masked_fill_(~mask, mask_value) - del mask - - if self.causal: - i, j = dots.shape[-2:] - mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool() - dots.masked_fill_(mask, mask_value) - - attn = softmax(dots, dim=-1) - - out = torch.einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - out = self.to_out(out) - return out - - -# main transformer class -class Transformer(nn.Module): - def __init__( - self, - *, - dim, - depth, - seq_len, - causal = True, - heads = 8, - dim_head = 64, - ff_mult = 4, - attn_dropout = 0., - ff_dropout = 0., - sparse_attn = False, - sandwich_norm = False, - ): - super().__init__() - layers = nn.ModuleList([]) - sparse_layer = cast_tuple(sparse_attn, depth) - - for ind, sparse_attn in zip(range(depth), sparse_layer): - attn = Attention(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout) - - ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout) - - layers.append(nn.ModuleList([ - LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)), - LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm)) - ])) - - execute_type = SequentialSequence - route_attn = ((True, False),) * depth - attn_route_map = {'mask': route_attn} - - self.layers = execute_type(layers, args_route = attn_route_map) - - def forward(self, x, **kwargs): - return self.layers(x, **kwargs) \ No newline at end of file diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/vertical-slider.tsx b/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/vertical-slider.tsx deleted file mode 100644 index b28a1200cb06d1f26e3c640c85e655c99e88954e..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/vertical-slider.tsx +++ /dev/null @@ -1,27 +0,0 @@ -"use client" - -import * as React from "react" -import * as SliderPrimitive from "@radix-ui/react-slider" - -import { cn } from "@/lib/utils" - -const VerticalSlider = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - - - -)) -VerticalSlider.displayName = "VerticalSlider" -export { VerticalSlider } diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_eax.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_eax.py deleted file mode 100644 index 62cf4d8b310221d65e330357612dd16ec8ba6a6d..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_eax.py +++ /dev/null @@ -1,408 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -""" -EAX mode. -""" - -__all__ = ['EaxMode'] - -import struct -from binascii import unhexlify - -from Crypto.Util.py3compat import byte_string, bord, _copy_bytes - -from Crypto.Util._raw_api import is_buffer - -from Crypto.Util.strxor import strxor -from Crypto.Util.number import long_to_bytes, bytes_to_long - -from Crypto.Hash import CMAC, BLAKE2s -from Crypto.Random import get_random_bytes - - -class EaxMode(object): - """*EAX* mode. - - This is an Authenticated Encryption with Associated Data - (`AEAD`_) mode. It provides both confidentiality and authenticity. - - The header of the message may be left in the clear, if needed, - and it will still be subject to authentication. - - The decryption step tells the receiver if the message comes - from a source that really knowns the secret key. - Additionally, decryption detects if any part of the message - - including the header - has been modified or corrupted. - - This mode requires a *nonce*. - - This mode is only available for ciphers that operate on 64 or - 128 bits blocks. - - There are no official standards defining EAX. - The implementation is based on `a proposal`__ that - was presented to NIST. - - .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html - .. __: http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/eax/eax-spec.pdf - - :undocumented: __init__ - """ - - def __init__(self, factory, key, nonce, mac_len, cipher_params): - """EAX cipher mode""" - - self.block_size = factory.block_size - """The block size of the underlying cipher, in bytes.""" - - self.nonce = _copy_bytes(None, None, nonce) - """The nonce originally used to create the object.""" - - self._mac_len = mac_len - self._mac_tag = None # Cache for MAC tag - - # Allowed transitions after initialization - self._next = ["update", "encrypt", "decrypt", - "digest", "verify"] - - # MAC tag length - if not (2 <= self._mac_len <= self.block_size): - raise ValueError("'mac_len' must be at least 2 and not larger than %d" - % self.block_size) - - # Nonce cannot be empty and must be a byte string - if len(self.nonce) == 0: - raise ValueError("Nonce cannot be empty in EAX mode") - if not is_buffer(nonce): - raise TypeError("nonce must be bytes, bytearray or memoryview") - - self._omac = [ - CMAC.new(key, - b'\x00' * (self.block_size - 1) + struct.pack('B', i), - ciphermod=factory, - cipher_params=cipher_params) - for i in range(0, 3) - ] - - # Compute MAC of nonce - self._omac[0].update(self.nonce) - self._signer = self._omac[1] - - # MAC of the nonce is also the initial counter for CTR encryption - counter_int = bytes_to_long(self._omac[0].digest()) - self._cipher = factory.new(key, - factory.MODE_CTR, - initial_value=counter_int, - nonce=b"", - **cipher_params) - - def update(self, assoc_data): - """Protect associated data - - If there is any associated data, the caller has to invoke - this function one or more times, before using - ``decrypt`` or ``encrypt``. - - By *associated data* it is meant any data (e.g. packet headers) that - will not be encrypted and will be transmitted in the clear. - However, the receiver is still able to detect any modification to it. - - If there is no associated data, this method must not be called. - - The caller may split associated data in segments of any size, and - invoke this method multiple times, each time with the next segment. - - :Parameters: - assoc_data : bytes/bytearray/memoryview - A piece of associated data. There are no restrictions on its size. - """ - - if "update" not in self._next: - raise TypeError("update() can only be called" - " immediately after initialization") - - self._next = ["update", "encrypt", "decrypt", - "digest", "verify"] - - self._signer.update(assoc_data) - return self - - def encrypt(self, plaintext, output=None): - """Encrypt data with the key and the parameters set at initialization. - - A cipher object is stateful: once you have encrypted a message - you cannot encrypt (or decrypt) another message using the same - object. - - The data to encrypt can be broken up in two or - more pieces and `encrypt` can be called multiple times. - - That is, the statement: - - >>> c.encrypt(a) + c.encrypt(b) - - is equivalent to: - - >>> c.encrypt(a+b) - - This function does not add any padding to the plaintext. - - :Parameters: - plaintext : bytes/bytearray/memoryview - The piece of data to encrypt. - It can be of any length. - :Keywords: - output : bytearray/memoryview - The location where the ciphertext must be written to. - If ``None``, the ciphertext is returned. - :Return: - If ``output`` is ``None``, the ciphertext as ``bytes``. - Otherwise, ``None``. - """ - - if "encrypt" not in self._next: - raise TypeError("encrypt() can only be called after" - " initialization or an update()") - self._next = ["encrypt", "digest"] - ct = self._cipher.encrypt(plaintext, output=output) - if output is None: - self._omac[2].update(ct) - else: - self._omac[2].update(output) - return ct - - def decrypt(self, ciphertext, output=None): - """Decrypt data with the key and the parameters set at initialization. - - A cipher object is stateful: once you have decrypted a message - you cannot decrypt (or encrypt) another message with the same - object. - - The data to decrypt can be broken up in two or - more pieces and `decrypt` can be called multiple times. - - That is, the statement: - - >>> c.decrypt(a) + c.decrypt(b) - - is equivalent to: - - >>> c.decrypt(a+b) - - This function does not remove any padding from the plaintext. - - :Parameters: - ciphertext : bytes/bytearray/memoryview - The piece of data to decrypt. - It can be of any length. - :Keywords: - output : bytearray/memoryview - The location where the plaintext must be written to. - If ``None``, the plaintext is returned. - :Return: - If ``output`` is ``None``, the plaintext as ``bytes``. - Otherwise, ``None``. - """ - - if "decrypt" not in self._next: - raise TypeError("decrypt() can only be called" - " after initialization or an update()") - self._next = ["decrypt", "verify"] - self._omac[2].update(ciphertext) - return self._cipher.decrypt(ciphertext, output=output) - - def digest(self): - """Compute the *binary* MAC tag. - - The caller invokes this function at the very end. - - This method returns the MAC that shall be sent to the receiver, - together with the ciphertext. - - :Return: the MAC, as a byte string. - """ - - if "digest" not in self._next: - raise TypeError("digest() cannot be called when decrypting" - " or validating a message") - self._next = ["digest"] - - if not self._mac_tag: - tag = b'\x00' * self.block_size - for i in range(3): - tag = strxor(tag, self._omac[i].digest()) - self._mac_tag = tag[:self._mac_len] - - return self._mac_tag - - def hexdigest(self): - """Compute the *printable* MAC tag. - - This method is like `digest`. - - :Return: the MAC, as a hexadecimal string. - """ - return "".join(["%02x" % bord(x) for x in self.digest()]) - - def verify(self, received_mac_tag): - """Validate the *binary* MAC tag. - - The caller invokes this function at the very end. - - This method checks if the decrypted message is indeed valid - (that is, if the key is correct) and it has not been - tampered with while in transit. - - :Parameters: - received_mac_tag : bytes/bytearray/memoryview - This is the *binary* MAC, as received from the sender. - :Raises MacMismatchError: - if the MAC does not match. The message has been tampered with - or the key is incorrect. - """ - - if "verify" not in self._next: - raise TypeError("verify() cannot be called" - " when encrypting a message") - self._next = ["verify"] - - if not self._mac_tag: - tag = b'\x00' * self.block_size - for i in range(3): - tag = strxor(tag, self._omac[i].digest()) - self._mac_tag = tag[:self._mac_len] - - secret = get_random_bytes(16) - - mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=self._mac_tag) - mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=received_mac_tag) - - if mac1.digest() != mac2.digest(): - raise ValueError("MAC check failed") - - def hexverify(self, hex_mac_tag): - """Validate the *printable* MAC tag. - - This method is like `verify`. - - :Parameters: - hex_mac_tag : string - This is the *printable* MAC, as received from the sender. - :Raises MacMismatchError: - if the MAC does not match. The message has been tampered with - or the key is incorrect. - """ - - self.verify(unhexlify(hex_mac_tag)) - - def encrypt_and_digest(self, plaintext, output=None): - """Perform encrypt() and digest() in one step. - - :Parameters: - plaintext : bytes/bytearray/memoryview - The piece of data to encrypt. - :Keywords: - output : bytearray/memoryview - The location where the ciphertext must be written to. - If ``None``, the ciphertext is returned. - :Return: - a tuple with two items: - - - the ciphertext, as ``bytes`` - - the MAC tag, as ``bytes`` - - The first item becomes ``None`` when the ``output`` parameter - specified a location for the result. - """ - - return self.encrypt(plaintext, output=output), self.digest() - - def decrypt_and_verify(self, ciphertext, received_mac_tag, output=None): - """Perform decrypt() and verify() in one step. - - :Parameters: - ciphertext : bytes/bytearray/memoryview - The piece of data to decrypt. - received_mac_tag : bytes/bytearray/memoryview - This is the *binary* MAC, as received from the sender. - :Keywords: - output : bytearray/memoryview - The location where the plaintext must be written to. - If ``None``, the plaintext is returned. - :Return: the plaintext as ``bytes`` or ``None`` when the ``output`` - parameter specified a location for the result. - :Raises MacMismatchError: - if the MAC does not match. The message has been tampered with - or the key is incorrect. - """ - - pt = self.decrypt(ciphertext, output=output) - self.verify(received_mac_tag) - return pt - - -def _create_eax_cipher(factory, **kwargs): - """Create a new block cipher, configured in EAX mode. - - :Parameters: - factory : module - A symmetric cipher module from `Crypto.Cipher` (like - `Crypto.Cipher.AES`). - - :Keywords: - key : bytes/bytearray/memoryview - The secret key to use in the symmetric cipher. - - nonce : bytes/bytearray/memoryview - A value that must never be reused for any other encryption. - There are no restrictions on its length, but it is recommended to use - at least 16 bytes. - - The nonce shall never repeat for two different messages encrypted with - the same key, but it does not need to be random. - - If not specified, a 16 byte long random string is used. - - mac_len : integer - Length of the MAC, in bytes. It must be no larger than the cipher - block bytes (which is the default). - """ - - try: - key = kwargs.pop("key") - nonce = kwargs.pop("nonce", None) - if nonce is None: - nonce = get_random_bytes(16) - mac_len = kwargs.pop("mac_len", factory.block_size) - except KeyError as e: - raise TypeError("Missing parameter: " + str(e)) - - return EaxMode(factory, key, nonce, mac_len, kwargs) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Util/asn1.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Util/asn1.py deleted file mode 100644 index a646eacb4f56f062bcce101600f8f5e5635355c9..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Util/asn1.py +++ /dev/null @@ -1,1064 +0,0 @@ -# -*- coding: ascii -*- -# -# Util/asn1.py : Minimal support for ASN.1 DER binary encoding. -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -import struct - -from Crypto.Util.py3compat import byte_string, bchr, bord - -from Crypto.Util.number import long_to_bytes, bytes_to_long - -__all__ = ['DerObject', 'DerInteger', 'DerBoolean', 'DerOctetString', - 'DerNull', 'DerSequence', 'DerObjectId', 'DerBitString', 'DerSetOf'] - -# Useful references: -# - https://luca.ntop.org/Teaching/Appunti/asn1.html -# - https://letsencrypt.org/docs/a-warm-welcome-to-asn1-and-der/ -# - https://www.zytrax.com/tech/survival/asn1.html -# - https://www.oss.com/asn1/resources/books-whitepapers-pubs/larmouth-asn1-book.pdf -# - https://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf -# - https://misc.daniel-marschall.de/asn.1/oid-converter/online.php - -def _is_number(x, only_non_negative=False): - test = 0 - try: - test = x + test - except TypeError: - return False - return not only_non_negative or x >= 0 - - -class BytesIO_EOF(object): - """This class differs from BytesIO in that a ValueError exception is - raised whenever EOF is reached.""" - - def __init__(self, initial_bytes): - self._buffer = initial_bytes - self._index = 0 - self._bookmark = None - - def set_bookmark(self): - self._bookmark = self._index - - def data_since_bookmark(self): - assert self._bookmark is not None - return self._buffer[self._bookmark:self._index] - - def remaining_data(self): - return len(self._buffer) - self._index - - def read(self, length): - new_index = self._index + length - if new_index > len(self._buffer): - raise ValueError("Not enough data for DER decoding: expected %d bytes and found %d" % (new_index, len(self._buffer))) - - result = self._buffer[self._index:new_index] - self._index = new_index - return result - - def read_byte(self): - return bord(self.read(1)[0]) - - -class DerObject(object): - """Base class for defining a single DER object. - - This class should never be directly instantiated. - """ - - def __init__(self, asn1Id=None, payload=b'', implicit=None, - constructed=False, explicit=None): - """Initialize the DER object according to a specific ASN.1 type. - - :Parameters: - asn1Id : integer or byte - The universal DER tag number for this object - (e.g. 0x10 for a SEQUENCE). - If None, the tag is not known yet. - - payload : byte string - The initial payload of the object (that it, - the content octets). - If not specified, the payload is empty. - - implicit : integer or byte - The IMPLICIT tag number (< 0x1F) to use for the encoded object. - It overrides the universal tag *asn1Id*. - It cannot be combined with the ``explicit`` parameter. - By default, there is no IMPLICIT tag. - - constructed : bool - True when the ASN.1 type is *constructed*. - False when it is *primitive* (default). - - explicit : integer or byte - The EXPLICIT tag number (< 0x1F) to use for the encoded object. - It cannot be combined with the ``implicit`` parameter. - By default, there is no EXPLICIT tag. - """ - - if asn1Id is None: - # The tag octet will be read in with ``decode`` - self._tag_octet = None - return - asn1Id = self._convertTag(asn1Id) - - self.payload = payload - - # In a BER/DER identifier octet: - # * bits 4-0 contain the tag value - # * bit 5 is set if the type is 'constructed' - # and unset if 'primitive' - # * bits 7-6 depend on the encoding class - # - # Class | Bit 7, Bit 6 - # ---------------------------------- - # universal | 0 0 - # application | 0 1 - # context-spec | 1 0 (default for IMPLICIT/EXPLICIT) - # private | 1 1 - # - - constructed_bit = 0x20 if constructed else 0x00 - - if None not in (explicit, implicit): - raise ValueError("Explicit and implicit tags are" - " mutually exclusive") - - if implicit is not None: - # IMPLICIT tag overrides asn1Id - self._tag_octet = 0x80 | constructed_bit | self._convertTag(implicit) - elif explicit is not None: - # 'constructed bit' is always asserted for an EXPLICIT tag - self._tag_octet = 0x80 | 0x20 | self._convertTag(explicit) - self._inner_tag_octet = constructed_bit | asn1Id - else: - # Neither IMPLICIT nor EXPLICIT - self._tag_octet = constructed_bit | asn1Id - - def _convertTag(self, tag): - """Check if *tag* is a real DER tag (5 bits). - Convert it from a character to number if necessary. - """ - if not _is_number(tag): - if len(tag) == 1: - tag = bord(tag[0]) - # Ensure that tag is a low tag - if not (_is_number(tag) and 0 <= tag < 0x1F): - raise ValueError("Wrong DER tag") - return tag - - @staticmethod - def _definite_form(length): - """Build length octets according to BER/DER - definite form. - """ - if length > 127: - encoding = long_to_bytes(length) - return bchr(len(encoding) + 128) + encoding - return bchr(length) - - def encode(self): - """Return this DER element, fully encoded as a binary byte string.""" - - # Concatenate identifier octets, length octets, - # and contents octets - - output_payload = self.payload - - # In case of an EXTERNAL tag, first encode the inner - # element. - if hasattr(self, "_inner_tag_octet"): - output_payload = (bchr(self._inner_tag_octet) + - self._definite_form(len(self.payload)) + - self.payload) - - return (bchr(self._tag_octet) + - self._definite_form(len(output_payload)) + - output_payload) - - def _decodeLen(self, s): - """Decode DER length octets from a file.""" - - length = s.read_byte() - - if length > 127: - encoded_length = s.read(length & 0x7F) - if bord(encoded_length[0]) == 0: - raise ValueError("Invalid DER: length has leading zero") - length = bytes_to_long(encoded_length) - if length <= 127: - raise ValueError("Invalid DER: length in long form but smaller than 128") - - return length - - def decode(self, der_encoded, strict=False): - """Decode a complete DER element, and re-initializes this - object with it. - - Args: - der_encoded (byte string): A complete DER element. - - Raises: - ValueError: in case of parsing errors. - """ - - if not byte_string(der_encoded): - raise ValueError("Input is not a byte string") - - s = BytesIO_EOF(der_encoded) - self._decodeFromStream(s, strict) - - # There shouldn't be other bytes left - if s.remaining_data() > 0: - raise ValueError("Unexpected extra data after the DER structure") - - return self - - def _decodeFromStream(self, s, strict): - """Decode a complete DER element from a file.""" - - idOctet = s.read_byte() - if self._tag_octet is not None: - if idOctet != self._tag_octet: - raise ValueError("Unexpected DER tag") - else: - self._tag_octet = idOctet - length = self._decodeLen(s) - self.payload = s.read(length) - - # In case of an EXTERNAL tag, further decode the inner - # element. - if hasattr(self, "_inner_tag_octet"): - p = BytesIO_EOF(self.payload) - inner_octet = p.read_byte() - if inner_octet != self._inner_tag_octet: - raise ValueError("Unexpected internal DER tag") - length = self._decodeLen(p) - self.payload = p.read(length) - - # There shouldn't be other bytes left - if p.remaining_data() > 0: - raise ValueError("Unexpected extra data after the DER structure") - - -class DerInteger(DerObject): - """Class to model a DER INTEGER. - - An example of encoding is:: - - >>> from Crypto.Util.asn1 import DerInteger - >>> from binascii import hexlify, unhexlify - >>> int_der = DerInteger(9) - >>> print hexlify(int_der.encode()) - - which will show ``020109``, the DER encoding of 9. - - And for decoding:: - - >>> s = unhexlify(b'020109') - >>> try: - >>> int_der = DerInteger() - >>> int_der.decode(s) - >>> print int_der.value - >>> except ValueError: - >>> print "Not a valid DER INTEGER" - - the output will be ``9``. - - :ivar value: The integer value - :vartype value: integer - """ - - def __init__(self, value=0, implicit=None, explicit=None): - """Initialize the DER object as an INTEGER. - - :Parameters: - value : integer - The value of the integer. - - implicit : integer - The IMPLICIT tag to use for the encoded object. - It overrides the universal tag for INTEGER (2). - """ - - DerObject.__init__(self, 0x02, b'', implicit, - False, explicit) - self.value = value # The integer value - - def encode(self): - """Return the DER INTEGER, fully encoded as a - binary string.""" - - number = self.value - self.payload = b'' - while True: - self.payload = bchr(int(number & 255)) + self.payload - if 128 <= number <= 255: - self.payload = bchr(0x00) + self.payload - if -128 <= number <= 255: - break - number >>= 8 - return DerObject.encode(self) - - def decode(self, der_encoded, strict=False): - """Decode a DER-encoded INTEGER, and re-initializes this - object with it. - - Args: - der_encoded (byte string): A complete INTEGER DER element. - - Raises: - ValueError: in case of parsing errors. - """ - - return DerObject.decode(self, der_encoded, strict=strict) - - def _decodeFromStream(self, s, strict): - """Decode a complete DER INTEGER from a file.""" - - # Fill up self.payload - DerObject._decodeFromStream(self, s, strict) - - if strict: - if len(self.payload) == 0: - raise ValueError("Invalid encoding for DER INTEGER: empty payload") - if len(self.payload) >= 2 and struct.unpack('>H', self.payload[:2])[0] < 0x80: - raise ValueError("Invalid encoding for DER INTEGER: leading zero") - - # Derive self.value from self.payload - self.value = 0 - bits = 1 - for i in self.payload: - self.value *= 256 - self.value += bord(i) - bits <<= 8 - if self.payload and bord(self.payload[0]) & 0x80: - self.value -= bits - - -class DerBoolean(DerObject): - """Class to model a DER-encoded BOOLEAN. - - An example of encoding is:: - - >>> from Crypto.Util.asn1 import DerBoolean - >>> bool_der = DerBoolean(True) - >>> print(bool_der.encode().hex()) - - which will show ``0101ff``, the DER encoding of True. - - And for decoding:: - - >>> s = bytes.fromhex('0101ff') - >>> try: - >>> bool_der = DerBoolean() - >>> bool_der.decode(s) - >>> print(bool_der.value) - >>> except ValueError: - >>> print "Not a valid DER BOOLEAN" - - the output will be ``True``. - - :ivar value: The boolean value - :vartype value: boolean - """ - def __init__(self, value=False, implicit=None, explicit=None): - """Initialize the DER object as a BOOLEAN. - - Args: - value (boolean): - The value of the boolean. Default is False. - - implicit (integer or byte): - The IMPLICIT tag number (< 0x1F) to use for the encoded object. - It overrides the universal tag for BOOLEAN (1). - It cannot be combined with the ``explicit`` parameter. - By default, there is no IMPLICIT tag. - - explicit (integer or byte): - The EXPLICIT tag number (< 0x1F) to use for the encoded object. - It cannot be combined with the ``implicit`` parameter. - By default, there is no EXPLICIT tag. - """ - - DerObject.__init__(self, 0x01, b'', implicit, False, explicit) - self.value = value # The boolean value - - def encode(self): - """Return the DER BOOLEAN, fully encoded as a binary string.""" - - self.payload = b'\xFF' if self.value else b'\x00' - return DerObject.encode(self) - - def decode(self, der_encoded, strict=False): - """Decode a DER-encoded BOOLEAN, and re-initializes this object with it. - - Args: - der_encoded (byte string): A DER-encoded BOOLEAN. - - Raises: - ValueError: in case of parsing errors. - """ - - return DerObject.decode(self, der_encoded, strict) - - def _decodeFromStream(self, s, strict): - """Decode a DER-encoded BOOLEAN from a file.""" - - # Fill up self.payload - DerObject._decodeFromStream(self, s, strict) - - if len(self.payload) != 1: - raise ValueError("Invalid encoding for DER BOOLEAN: payload is not 1 byte") - - if bord(self.payload[0]) == 0: - self.value = False - elif bord(self.payload[0]) == 0xFF: - self.value = True - else: - raise ValueError("Invalid payload for DER BOOLEAN") - - -class DerSequence(DerObject): - """Class to model a DER SEQUENCE. - - This object behaves like a dynamic Python sequence. - - Sub-elements that are INTEGERs behave like Python integers. - - Any other sub-element is a binary string encoded as a complete DER - sub-element (TLV). - - An example of encoding is: - - >>> from Crypto.Util.asn1 import DerSequence, DerInteger - >>> from binascii import hexlify, unhexlify - >>> obj_der = unhexlify('070102') - >>> seq_der = DerSequence([4]) - >>> seq_der.append(9) - >>> seq_der.append(obj_der.encode()) - >>> print hexlify(seq_der.encode()) - - which will show ``3009020104020109070102``, the DER encoding of the - sequence containing ``4``, ``9``, and the object with payload ``02``. - - For decoding: - - >>> s = unhexlify(b'3009020104020109070102') - >>> try: - >>> seq_der = DerSequence() - >>> seq_der.decode(s) - >>> print len(seq_der) - >>> print seq_der[0] - >>> print seq_der[:] - >>> except ValueError: - >>> print "Not a valid DER SEQUENCE" - - the output will be:: - - 3 - 4 - [4, 9, b'\x07\x01\x02'] - - """ - - def __init__(self, startSeq=None, implicit=None, explicit=None): - """Initialize the DER object as a SEQUENCE. - - :Parameters: - startSeq : Python sequence - A sequence whose element are either integers or - other DER objects. - - implicit : integer or byte - The IMPLICIT tag number (< 0x1F) to use for the encoded object. - It overrides the universal tag for SEQUENCE (16). - It cannot be combined with the ``explicit`` parameter. - By default, there is no IMPLICIT tag. - - explicit : integer or byte - The EXPLICIT tag number (< 0x1F) to use for the encoded object. - It cannot be combined with the ``implicit`` parameter. - By default, there is no EXPLICIT tag. - """ - - DerObject.__init__(self, 0x10, b'', implicit, True, explicit) - if startSeq is None: - self._seq = [] - else: - self._seq = startSeq - - # A few methods to make it behave like a python sequence - - def __delitem__(self, n): - del self._seq[n] - - def __getitem__(self, n): - return self._seq[n] - - def __setitem__(self, key, value): - self._seq[key] = value - - def __setslice__(self, i, j, sequence): - self._seq[i:j] = sequence - - def __delslice__(self, i, j): - del self._seq[i:j] - - def __getslice__(self, i, j): - return self._seq[max(0, i):max(0, j)] - - def __len__(self): - return len(self._seq) - - def __iadd__(self, item): - self._seq.append(item) - return self - - def append(self, item): - self._seq.append(item) - return self - - def insert(self, index, item): - self._seq.insert(index, item) - return self - - def hasInts(self, only_non_negative=True): - """Return the number of items in this sequence that are - integers. - - Args: - only_non_negative (boolean): - If ``True``, negative integers are not counted in. - """ - - items = [x for x in self._seq if _is_number(x, only_non_negative)] - return len(items) - - def hasOnlyInts(self, only_non_negative=True): - """Return ``True`` if all items in this sequence are integers - or non-negative integers. - - This function returns False is the sequence is empty, - or at least one member is not an integer. - - Args: - only_non_negative (boolean): - If ``True``, the presence of negative integers - causes the method to return ``False``.""" - return self._seq and self.hasInts(only_non_negative) == len(self._seq) - - def encode(self): - """Return this DER SEQUENCE, fully encoded as a - binary string. - - Raises: - ValueError: if some elements in the sequence are neither integers - nor byte strings. - """ - self.payload = b'' - for item in self._seq: - if byte_string(item): - self.payload += item - elif _is_number(item): - self.payload += DerInteger(item).encode() - else: - self.payload += item.encode() - return DerObject.encode(self) - - def decode(self, der_encoded, strict=False, nr_elements=None, only_ints_expected=False): - """Decode a complete DER SEQUENCE, and re-initializes this - object with it. - - Args: - der_encoded (byte string): - A complete SEQUENCE DER element. - nr_elements (None or integer or list of integers): - The number of members the SEQUENCE can have - only_ints_expected (boolean): - Whether the SEQUENCE is expected to contain only integers. - strict (boolean): - Whether decoding must check for strict DER compliancy. - - Raises: - ValueError: in case of parsing errors. - - DER INTEGERs are decoded into Python integers. Any other DER - element is not decoded. Its validity is not checked. - """ - - self._nr_elements = nr_elements - result = DerObject.decode(self, der_encoded, strict=strict) - - if only_ints_expected and not self.hasOnlyInts(): - raise ValueError("Some members are not INTEGERs") - - return result - - def _decodeFromStream(self, s, strict): - """Decode a complete DER SEQUENCE from a file.""" - - self._seq = [] - - # Fill up self.payload - DerObject._decodeFromStream(self, s, strict) - - # Add one item at a time to self.seq, by scanning self.payload - p = BytesIO_EOF(self.payload) - while p.remaining_data() > 0: - p.set_bookmark() - - der = DerObject() - der._decodeFromStream(p, strict) - - # Parse INTEGERs differently - if der._tag_octet != 0x02: - self._seq.append(p.data_since_bookmark()) - else: - derInt = DerInteger() - data = p.data_since_bookmark() - derInt.decode(data, strict=strict) - self._seq.append(derInt.value) - - ok = True - if self._nr_elements is not None: - try: - ok = len(self._seq) in self._nr_elements - except TypeError: - ok = len(self._seq) == self._nr_elements - - if not ok: - raise ValueError("Unexpected number of members (%d)" - " in the sequence" % len(self._seq)) - - -class DerOctetString(DerObject): - """Class to model a DER OCTET STRING. - - An example of encoding is: - - >>> from Crypto.Util.asn1 import DerOctetString - >>> from binascii import hexlify, unhexlify - >>> os_der = DerOctetString(b'\\xaa') - >>> os_der.payload += b'\\xbb' - >>> print hexlify(os_der.encode()) - - which will show ``0402aabb``, the DER encoding for the byte string - ``b'\\xAA\\xBB'``. - - For decoding: - - >>> s = unhexlify(b'0402aabb') - >>> try: - >>> os_der = DerOctetString() - >>> os_der.decode(s) - >>> print hexlify(os_der.payload) - >>> except ValueError: - >>> print "Not a valid DER OCTET STRING" - - the output will be ``aabb``. - - :ivar payload: The content of the string - :vartype payload: byte string - """ - - def __init__(self, value=b'', implicit=None): - """Initialize the DER object as an OCTET STRING. - - :Parameters: - value : byte string - The initial payload of the object. - If not specified, the payload is empty. - - implicit : integer - The IMPLICIT tag to use for the encoded object. - It overrides the universal tag for OCTET STRING (4). - """ - DerObject.__init__(self, 0x04, value, implicit, False) - - -class DerNull(DerObject): - """Class to model a DER NULL element.""" - - def __init__(self): - """Initialize the DER object as a NULL.""" - - DerObject.__init__(self, 0x05, b'', None, False) - - -class DerObjectId(DerObject): - """Class to model a DER OBJECT ID. - - An example of encoding is: - - >>> from Crypto.Util.asn1 import DerObjectId - >>> from binascii import hexlify, unhexlify - >>> oid_der = DerObjectId("1.2") - >>> oid_der.value += ".840.113549.1.1.1" - >>> print hexlify(oid_der.encode()) - - which will show ``06092a864886f70d010101``, the DER encoding for the - RSA Object Identifier ``1.2.840.113549.1.1.1``. - - For decoding: - - >>> s = unhexlify(b'06092a864886f70d010101') - >>> try: - >>> oid_der = DerObjectId() - >>> oid_der.decode(s) - >>> print oid_der.value - >>> except ValueError: - >>> print "Not a valid DER OBJECT ID" - - the output will be ``1.2.840.113549.1.1.1``. - - :ivar value: The Object ID (OID), a dot separated list of integers - :vartype value: string - """ - - def __init__(self, value='', implicit=None, explicit=None): - """Initialize the DER object as an OBJECT ID. - - :Parameters: - value : string - The initial Object Identifier (e.g. "1.2.0.0.6.2"). - implicit : integer - The IMPLICIT tag to use for the encoded object. - It overrides the universal tag for OBJECT ID (6). - explicit : integer - The EXPLICIT tag to use for the encoded object. - """ - DerObject.__init__(self, 0x06, b'', implicit, False, explicit) - self.value = value - - def encode(self): - """Return the DER OBJECT ID, fully encoded as a - binary string.""" - - comps = [int(x) for x in self.value.split(".")] - - if len(comps) < 2: - raise ValueError("Not a valid Object Identifier string") - if comps[0] > 2: - raise ValueError("First component must be 0, 1 or 2") - if comps[0] < 2 and comps[1] > 39: - raise ValueError("Second component must be 39 at most") - - subcomps = [40 * comps[0] + comps[1]] + comps[2:] - - encoding = [] - for v in reversed(subcomps): - encoding.append(v & 0x7F) - v >>= 7 - while v: - encoding.append((v & 0x7F) | 0x80) - v >>= 7 - - self.payload = b''.join([bchr(x) for x in reversed(encoding)]) - return DerObject.encode(self) - - def decode(self, der_encoded, strict=False): - """Decode a complete DER OBJECT ID, and re-initializes this - object with it. - - Args: - der_encoded (byte string): - A complete DER OBJECT ID. - strict (boolean): - Whether decoding must check for strict DER compliancy. - - Raises: - ValueError: in case of parsing errors. - """ - - return DerObject.decode(self, der_encoded, strict) - - def _decodeFromStream(self, s, strict): - """Decode a complete DER OBJECT ID from a file.""" - - # Fill up self.payload - DerObject._decodeFromStream(self, s, strict) - - # Derive self.value from self.payload - p = BytesIO_EOF(self.payload) - - subcomps = [] - v = 0 - while p.remaining_data(): - c = p.read_byte() - v = (v << 7) + (c & 0x7F) - if not (c & 0x80): - subcomps.append(v) - v = 0 - - if len(subcomps) == 0: - raise ValueError("Empty payload") - - if subcomps[0] < 40: - subcomps[:1] = [0, subcomps[0]] - elif subcomps[0] < 80: - subcomps[:1] = [1, subcomps[0] - 40] - else: - subcomps[:1] = [2, subcomps[0] - 80] - - self.value = ".".join([str(x) for x in subcomps]) - - -class DerBitString(DerObject): - """Class to model a DER BIT STRING. - - An example of encoding is: - - >>> from Crypto.Util.asn1 import DerBitString - >>> bs_der = DerBitString(b'\\xAA') - >>> bs_der.value += b'\\xBB' - >>> print(bs_der.encode().hex()) - - which will show ``030300aabb``, the DER encoding for the bit string - ``b'\\xAA\\xBB'``. - - For decoding: - - >>> s = bytes.fromhex('030300aabb') - >>> try: - >>> bs_der = DerBitString() - >>> bs_der.decode(s) - >>> print(bs_der.value.hex()) - >>> except ValueError: - >>> print "Not a valid DER BIT STRING" - - the output will be ``aabb``. - - :ivar value: The content of the string - :vartype value: byte string - """ - - def __init__(self, value=b'', implicit=None, explicit=None): - """Initialize the DER object as a BIT STRING. - - :Parameters: - value : byte string or DER object - The initial, packed bit string. - If not specified, the bit string is empty. - implicit : integer - The IMPLICIT tag to use for the encoded object. - It overrides the universal tag for BIT STRING (3). - explicit : integer - The EXPLICIT tag to use for the encoded object. - """ - DerObject.__init__(self, 0x03, b'', implicit, False, explicit) - - # The bitstring value (packed) - if isinstance(value, DerObject): - self.value = value.encode() - else: - self.value = value - - def encode(self): - """Return the DER BIT STRING, fully encoded as a - byte string.""" - - # Add padding count byte - self.payload = b'\x00' + self.value - return DerObject.encode(self) - - def decode(self, der_encoded, strict=False): - """Decode a complete DER BIT STRING, and re-initializes this - object with it. - - Args: - der_encoded (byte string): a complete DER BIT STRING. - strict (boolean): - Whether decoding must check for strict DER compliancy. - - Raises: - ValueError: in case of parsing errors. - """ - - return DerObject.decode(self, der_encoded, strict) - - def _decodeFromStream(self, s, strict): - """Decode a complete DER BIT STRING DER from a file.""" - - # Fill-up self.payload - DerObject._decodeFromStream(self, s, strict) - - if self.payload and bord(self.payload[0]) != 0: - raise ValueError("Not a valid BIT STRING") - - # Fill-up self.value - self.value = b'' - # Remove padding count byte - if self.payload: - self.value = self.payload[1:] - - -class DerSetOf(DerObject): - """Class to model a DER SET OF. - - An example of encoding is: - - >>> from Crypto.Util.asn1 import DerBitString - >>> from binascii import hexlify, unhexlify - >>> so_der = DerSetOf([4,5]) - >>> so_der.add(6) - >>> print hexlify(so_der.encode()) - - which will show ``3109020104020105020106``, the DER encoding - of a SET OF with items 4,5, and 6. - - For decoding: - - >>> s = unhexlify(b'3109020104020105020106') - >>> try: - >>> so_der = DerSetOf() - >>> so_der.decode(s) - >>> print [x for x in so_der] - >>> except ValueError: - >>> print "Not a valid DER SET OF" - - the output will be ``[4, 5, 6]``. - """ - - def __init__(self, startSet=None, implicit=None): - """Initialize the DER object as a SET OF. - - :Parameters: - startSet : container - The initial set of integers or DER encoded objects. - implicit : integer - The IMPLICIT tag to use for the encoded object. - It overrides the universal tag for SET OF (17). - """ - DerObject.__init__(self, 0x11, b'', implicit, True) - self._seq = [] - - # All elements must be of the same type (and therefore have the - # same leading octet) - self._elemOctet = None - - if startSet: - for e in startSet: - self.add(e) - - def __getitem__(self, n): - return self._seq[n] - - def __iter__(self): - return iter(self._seq) - - def __len__(self): - return len(self._seq) - - def add(self, elem): - """Add an element to the set. - - Args: - elem (byte string or integer): - An element of the same type of objects already in the set. - It can be an integer or a DER encoded object. - """ - - if _is_number(elem): - eo = 0x02 - elif isinstance(elem, DerObject): - eo = self._tag_octet - else: - eo = bord(elem[0]) - - if self._elemOctet != eo: - if self._elemOctet is not None: - raise ValueError("New element does not belong to the set") - self._elemOctet = eo - - if elem not in self._seq: - self._seq.append(elem) - - def decode(self, der_encoded, strict=False): - """Decode a complete SET OF DER element, and re-initializes this - object with it. - - DER INTEGERs are decoded into Python integers. Any other DER - element is left undecoded; its validity is not checked. - - Args: - der_encoded (byte string): a complete DER BIT SET OF. - strict (boolean): - Whether decoding must check for strict DER compliancy. - - Raises: - ValueError: in case of parsing errors. - """ - - return DerObject.decode(self, der_encoded, strict) - - def _decodeFromStream(self, s, strict): - """Decode a complete DER SET OF from a file.""" - - self._seq = [] - - # Fill up self.payload - DerObject._decodeFromStream(self, s, strict) - - # Add one item at a time to self.seq, by scanning self.payload - p = BytesIO_EOF(self.payload) - setIdOctet = -1 - while p.remaining_data() > 0: - p.set_bookmark() - - der = DerObject() - der._decodeFromStream(p, strict) - - # Verify that all members are of the same type - if setIdOctet < 0: - setIdOctet = der._tag_octet - else: - if setIdOctet != der._tag_octet: - raise ValueError("Not all elements are of the same DER type") - - # Parse INTEGERs differently - if setIdOctet != 0x02: - self._seq.append(p.data_since_bookmark()) - else: - derInt = DerInteger() - derInt.decode(p.data_since_bookmark(), strict) - self._seq.append(derInt.value) - # end - - def encode(self): - """Return this SET OF DER element, fully encoded as a - binary string. - """ - - # Elements in the set must be ordered in lexicographic order - ordered = [] - for item in self._seq: - if _is_number(item): - bys = DerInteger(item).encode() - elif isinstance(item, DerObject): - bys = item.encode() - else: - bys = item - ordered.append(bys) - ordered.sort() - self.payload = b''.join(ordered) - return DerObject.encode(self) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_runner.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_runner.py deleted file mode 100644 index 9282bb93d37a4a3a0ada346ec7534de0ea0e893d..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_runner.py +++ /dev/null @@ -1,381 +0,0 @@ -import asyncio -import signal -import socket -from abc import ABC, abstractmethod -from typing import Any, List, Optional, Set - -from yarl import URL - -from .web_app import Application -from .web_server import Server - -try: - from ssl import SSLContext -except ImportError: - SSLContext = object # type: ignore[misc,assignment] - - -__all__ = ( - "BaseSite", - "TCPSite", - "UnixSite", - "NamedPipeSite", - "SockSite", - "BaseRunner", - "AppRunner", - "ServerRunner", - "GracefulExit", -) - - -class GracefulExit(SystemExit): - code = 1 - - -def _raise_graceful_exit() -> None: - raise GracefulExit() - - -class BaseSite(ABC): - __slots__ = ("_runner", "_shutdown_timeout", "_ssl_context", "_backlog", "_server") - - def __init__( - self, - runner: "BaseRunner", - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - ) -> None: - if runner.server is None: - raise RuntimeError("Call runner.setup() before making a site") - self._runner = runner - self._shutdown_timeout = shutdown_timeout - self._ssl_context = ssl_context - self._backlog = backlog - self._server: Optional[asyncio.AbstractServer] = None - - @property - @abstractmethod - def name(self) -> str: - pass # pragma: no cover - - @abstractmethod - async def start(self) -> None: - self._runner._reg_site(self) - - async def stop(self) -> None: - self._runner._check_site(self) - if self._server is None: - self._runner._unreg_site(self) - return # not started yet - self._server.close() - # named pipes do not have wait_closed property - if hasattr(self._server, "wait_closed"): - await self._server.wait_closed() - await self._runner.shutdown() - assert self._runner.server - await self._runner.server.shutdown(self._shutdown_timeout) - self._runner._unreg_site(self) - - -class TCPSite(BaseSite): - __slots__ = ("_host", "_port", "_reuse_address", "_reuse_port") - - def __init__( - self, - runner: "BaseRunner", - host: Optional[str] = None, - port: Optional[int] = None, - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - reuse_address: Optional[bool] = None, - reuse_port: Optional[bool] = None, - ) -> None: - super().__init__( - runner, - shutdown_timeout=shutdown_timeout, - ssl_context=ssl_context, - backlog=backlog, - ) - self._host = host - if port is None: - port = 8443 if self._ssl_context else 8080 - self._port = port - self._reuse_address = reuse_address - self._reuse_port = reuse_port - - @property - def name(self) -> str: - scheme = "https" if self._ssl_context else "http" - host = "0.0.0.0" if self._host is None else self._host - return str(URL.build(scheme=scheme, host=host, port=self._port)) - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - self._server = await loop.create_server( - server, - self._host, - self._port, - ssl=self._ssl_context, - backlog=self._backlog, - reuse_address=self._reuse_address, - reuse_port=self._reuse_port, - ) - - -class UnixSite(BaseSite): - __slots__ = ("_path",) - - def __init__( - self, - runner: "BaseRunner", - path: str, - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - ) -> None: - super().__init__( - runner, - shutdown_timeout=shutdown_timeout, - ssl_context=ssl_context, - backlog=backlog, - ) - self._path = path - - @property - def name(self) -> str: - scheme = "https" if self._ssl_context else "http" - return f"{scheme}://unix:{self._path}:" - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - self._server = await loop.create_unix_server( - server, self._path, ssl=self._ssl_context, backlog=self._backlog - ) - - -class NamedPipeSite(BaseSite): - __slots__ = ("_path",) - - def __init__( - self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0 - ) -> None: - loop = asyncio.get_event_loop() - if not isinstance( - loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] - ): - raise RuntimeError( - "Named Pipes only available in proactor" "loop under windows" - ) - super().__init__(runner, shutdown_timeout=shutdown_timeout) - self._path = path - - @property - def name(self) -> str: - return self._path - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - _server = await loop.start_serving_pipe( # type: ignore[attr-defined] - server, self._path - ) - self._server = _server[0] - - -class SockSite(BaseSite): - __slots__ = ("_sock", "_name") - - def __init__( - self, - runner: "BaseRunner", - sock: socket.socket, - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - ) -> None: - super().__init__( - runner, - shutdown_timeout=shutdown_timeout, - ssl_context=ssl_context, - backlog=backlog, - ) - self._sock = sock - scheme = "https" if self._ssl_context else "http" - if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: - name = f"{scheme}://unix:{sock.getsockname()}:" - else: - host, port = sock.getsockname()[:2] - name = str(URL.build(scheme=scheme, host=host, port=port)) - self._name = name - - @property - def name(self) -> str: - return self._name - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - self._server = await loop.create_server( - server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog - ) - - -class BaseRunner(ABC): - __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites") - - def __init__(self, *, handle_signals: bool = False, **kwargs: Any) -> None: - self._handle_signals = handle_signals - self._kwargs = kwargs - self._server: Optional[Server] = None - self._sites: List[BaseSite] = [] - - @property - def server(self) -> Optional[Server]: - return self._server - - @property - def addresses(self) -> List[Any]: - ret: List[Any] = [] - for site in self._sites: - server = site._server - if server is not None: - sockets = server.sockets - if sockets is not None: - for sock in sockets: - ret.append(sock.getsockname()) - return ret - - @property - def sites(self) -> Set[BaseSite]: - return set(self._sites) - - async def setup(self) -> None: - loop = asyncio.get_event_loop() - - if self._handle_signals: - try: - loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit) - loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit) - except NotImplementedError: # pragma: no cover - # add_signal_handler is not implemented on Windows - pass - - self._server = await self._make_server() - - @abstractmethod - async def shutdown(self) -> None: - pass # pragma: no cover - - async def cleanup(self) -> None: - loop = asyncio.get_event_loop() - - # The loop over sites is intentional, an exception on gather() - # leaves self._sites in unpredictable state. - # The loop guaranties that a site is either deleted on success or - # still present on failure - for site in list(self._sites): - await site.stop() - await self._cleanup_server() - self._server = None - if self._handle_signals: - try: - loop.remove_signal_handler(signal.SIGINT) - loop.remove_signal_handler(signal.SIGTERM) - except NotImplementedError: # pragma: no cover - # remove_signal_handler is not implemented on Windows - pass - - @abstractmethod - async def _make_server(self) -> Server: - pass # pragma: no cover - - @abstractmethod - async def _cleanup_server(self) -> None: - pass # pragma: no cover - - def _reg_site(self, site: BaseSite) -> None: - if site in self._sites: - raise RuntimeError(f"Site {site} is already registered in runner {self}") - self._sites.append(site) - - def _check_site(self, site: BaseSite) -> None: - if site not in self._sites: - raise RuntimeError(f"Site {site} is not registered in runner {self}") - - def _unreg_site(self, site: BaseSite) -> None: - if site not in self._sites: - raise RuntimeError(f"Site {site} is not registered in runner {self}") - self._sites.remove(site) - - -class ServerRunner(BaseRunner): - """Low-level web server runner""" - - __slots__ = ("_web_server",) - - def __init__( - self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any - ) -> None: - super().__init__(handle_signals=handle_signals, **kwargs) - self._web_server = web_server - - async def shutdown(self) -> None: - pass - - async def _make_server(self) -> Server: - return self._web_server - - async def _cleanup_server(self) -> None: - pass - - -class AppRunner(BaseRunner): - """Web Application runner""" - - __slots__ = ("_app",) - - def __init__( - self, app: Application, *, handle_signals: bool = False, **kwargs: Any - ) -> None: - super().__init__(handle_signals=handle_signals, **kwargs) - if not isinstance(app, Application): - raise TypeError( - "The first argument should be web.Application " - "instance, got {!r}".format(app) - ) - self._app = app - - @property - def app(self) -> Application: - return self._app - - async def shutdown(self) -> None: - await self._app.shutdown() - - async def _make_server(self) -> Server: - loop = asyncio.get_event_loop() - self._app._set_loop(loop) - self._app.on_startup.freeze() - await self._app.startup() - self._app.freeze() - - return self._app._make_handler(loop=loop, **self._kwargs) - - async def _cleanup_server(self) -> None: - await self._app.cleanup() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/vector_store/vector_indices.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/vector_store/vector_indices.py deleted file mode 100644 index 6541233395018863f59be0ed141a3ea9f8775781..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/vector_store/vector_indices.py +++ /dev/null @@ -1,591 +0,0 @@ -"""Deprecated vector store indices.""" - -from typing import Any, Dict, Optional, Sequence, Type, cast - -from gpt_index.data_structs.data_structs import ( - ChromaIndexDict, - FaissIndexDict, - IndexDict, - OpensearchIndexDict, - PineconeIndexDict, - QdrantIndexDict, - SimpleIndexDict, - WeaviateIndexDict, -) -from gpt_index.embeddings.base import BaseEmbedding -from gpt_index.indices.base import DOCUMENTS_INPUT, BaseGPTIndex -from gpt_index.indices.query.base import BaseGPTIndexQuery -from gpt_index.indices.query.schema import QueryMode -from gpt_index.indices.query.vector_store.queries import ( - GPTChromaIndexQuery, - GPTFaissIndexQuery, - GPTOpensearchIndexQuery, - GPTPineconeIndexQuery, - GPTQdrantIndexQuery, - GPTSimpleVectorIndexQuery, - GPTWeaviateIndexQuery, -) -from gpt_index.indices.vector_store.base import GPTVectorStoreIndex -from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor -from gpt_index.prompts.prompts import QuestionAnswerPrompt -from gpt_index.vector_stores import ( - ChromaVectorStore, - FaissVectorStore, - PineconeVectorStore, - QdrantVectorStore, - SimpleVectorStore, - WeaviateVectorStore, -) -from gpt_index.vector_stores.opensearch import ( - OpensearchVectorClient, - OpensearchVectorStore, -) - - -class GPTSimpleVectorIndex(GPTVectorStoreIndex): - """GPT Simple Vector Index. - - The GPTSimpleVectorIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored within a simple dictionary. - During index construction, the document texts are chunked up, - converted to nodes with text; they are then encoded in - document embeddings stored within the dict. - - During query time, the index uses the dict to query for the top - k most similar nodes, and synthesizes an answer from the - retrieved nodes. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - - """ - - index_struct_cls: Type[IndexDict] = SimpleIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - simple_vector_store_data_dict: Optional[dict] = None, - **kwargs: Any, - ) -> None: - """Init params.""" - vector_store = SimpleVectorStore( - simple_vector_store_data_dict=simple_vector_store_data_dict - ) - - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - **kwargs, - ) - - # TODO: Temporary hack to also store embeddings in index_struct - embedding_dict = vector_store._data.embedding_dict - self._index_struct.embeddings_dict = embedding_dict - # update docstore with current struct - self._docstore.add_documents([self.index_struct], allow_update=True) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTSimpleVectorIndexQuery, - QueryMode.EMBEDDING: GPTSimpleVectorIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(SimpleVectorStore, self._vector_store) - query_kwargs["simple_vector_store_data_dict"] = vector_store._data - - -class GPTFaissIndex(GPTVectorStoreIndex): - """GPT Faiss Index. - - The GPTFaissIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored within a Faiss index. - During index construction, the document texts are chunked up, - converted to nodes with text; they are then encoded in - document embeddings stored within Faiss. - - During query time, the index uses Faiss to query for the top - k most similar nodes, and synthesizes an answer from the - retrieved nodes. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - faiss_index (faiss.Index): A Faiss Index object (required). Note: the index - will be reset during index construction. - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - """ - - index_struct_cls: Type[IndexDict] = FaissIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - faiss_index: Optional[Any] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - **kwargs: Any, - ) -> None: - """Init params.""" - if faiss_index is None: - raise ValueError("faiss_index is required.") - vector_store = FaissVectorStore(faiss_index) - - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - **kwargs, - ) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTFaissIndexQuery, - QueryMode.EMBEDDING: GPTFaissIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(FaissVectorStore, self._vector_store) - query_kwargs["faiss_index"] = vector_store._faiss_index - - @classmethod - def load_from_disk( - cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any - ) -> "BaseGPTIndex": - """Load index from disk. - - This method loads the index from a JSON file stored on disk. The index data - structure itself is preserved completely. If the index is defined over - subindices, those subindices will also be preserved (and subindices of - those subindices, etc.). - In GPTFaissIndex, we allow user to specify an additional - `faiss_index_save_path` to load faiss index from a file - that - way, the user does not have to recreate the faiss index outside - of this class. - - Args: - save_path (str): The save_path of the file. - faiss_index_save_path (Optional[str]): The save_path of the - Faiss index file. If not specified, the Faiss index - will not be saved to disk. - **kwargs: Additional kwargs to pass to the index constructor. - - Returns: - BaseGPTIndex: The loaded index. - """ - if faiss_index_save_path is not None: - import faiss - - faiss_index = faiss.read_index(faiss_index_save_path) - return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs) - else: - return super().load_from_disk(save_path, **kwargs) - - def save_to_disk( - self, - save_path: str, - faiss_index_save_path: Optional[str] = None, - **save_kwargs: Any, - ) -> None: - """Save to file. - - This method stores the index into a JSON file stored on disk. - In GPTFaissIndex, we allow user to specify an additional - `faiss_index_save_path` to save the faiss index to a file - that - way, the user can pass in the same argument in - `GPTFaissIndex.load_from_disk` without having to recreate - the Faiss index outside of this class. - - Args: - save_path (str): The save_path of the file. - faiss_index_save_path (Optional[str]): The save_path of the - Faiss index file. If not specified, the Faiss index - will not be saved to disk. - """ - super().save_to_disk(save_path, **save_kwargs) - - if faiss_index_save_path is not None: - import faiss - - faiss.write_index(self._vector_store.client, faiss_index_save_path) - - -class GPTPineconeIndex(GPTVectorStoreIndex): - """GPT Pinecone Index. - - The GPTPineconeIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored within a Pinecone index. - During index construction, the document texts are chunked up, - converted to nodes with text; they are then encoded in - document embeddings stored within Pinecone. - - During query time, the index uses Pinecone to query for the top - k most similar nodes, and synthesizes an answer from the - retrieved nodes. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - chunk_size_limit (int): Maximum number of tokens per chunk. NOTE: - in Pinecone the default is 2048 due to metadata size restrictions. - """ - - index_struct_cls: Type[IndexDict] = PineconeIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - pinecone_index: Optional[Any] = None, - pinecone_kwargs: Optional[Dict] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - chunk_size_limit: int = 2048, - **kwargs: Any, - ) -> None: - """Init params.""" - if pinecone_index is None: - raise ValueError("pinecone_index is required.") - if pinecone_kwargs is None: - pinecone_kwargs = {} - vector_store = kwargs.pop( - "vector_store", - PineconeVectorStore( - pinecone_index=pinecone_index, pinecone_kwargs=pinecone_kwargs - ), - ) - - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - chunk_size_limit=chunk_size_limit, - **kwargs, - ) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTPineconeIndexQuery, - QueryMode.EMBEDDING: GPTPineconeIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(PineconeVectorStore, self._vector_store) - query_kwargs["pinecone_index"] = vector_store._pinecone_index - query_kwargs["pinecone_kwargs"] = vector_store._pinecone_kwargs - - -class GPTWeaviateIndex(GPTVectorStoreIndex): - """GPT Weaviate Index. - - The GPTWeaviateIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored within a Weaviate index. - During index construction, the document texts are chunked up, - converted to nodes with text; they are then encoded in - document embeddings stored within Weaviate. - - During query time, the index uses Weaviate to query for the top - k most similar nodes, and synthesizes an answer from the - retrieved nodes. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - """ - - index_struct_cls: Type[IndexDict] = WeaviateIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - weaviate_client: Optional[Any] = None, - class_prefix: Optional[str] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - **kwargs: Any, - ) -> None: - """Init params.""" - if weaviate_client is None: - raise ValueError("weaviate_client is required.") - vector_store = WeaviateVectorStore( - weaviate_client=weaviate_client, class_prefix=class_prefix - ) - - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - **kwargs, - ) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTWeaviateIndexQuery, - QueryMode.EMBEDDING: GPTWeaviateIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(WeaviateVectorStore, self._vector_store) - query_kwargs["weaviate_client"] = vector_store._client - query_kwargs["class_prefix"] = vector_store._class_prefix - - -class GPTQdrantIndex(GPTVectorStoreIndex): - """GPT Qdrant Index. - - The GPTQdrantIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored within a Qdrant collection. - During index construction, the document texts are chunked up, - converted to nodes with text; they are then encoded in - document embeddings stored within Qdrant. - - During query time, the index uses Qdrant to query for the top - k most similar nodes, and synthesizes an answer from the - retrieved nodes. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - client (Optional[Any]): QdrantClient instance from `qdrant-client` package - collection_name: (Optional[str]): name of the Qdrant collection - """ - - index_struct_cls: Type[IndexDict] = QdrantIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - client: Optional[Any] = None, - collection_name: Optional[str] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - **kwargs: Any, - ) -> None: - """Init params.""" - if client is None: - raise ValueError("client is required.") - if collection_name is None: - raise ValueError("collection_name is required.") - vector_store = QdrantVectorStore(client=client, collection_name=collection_name) - - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - **kwargs, - ) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTQdrantIndexQuery, - QueryMode.EMBEDDING: GPTQdrantIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(QdrantVectorStore, self._vector_store) - query_kwargs["client"] = vector_store._client - query_kwargs["collection_name"] = vector_store._collection_name - - -class GPTChromaIndex(GPTVectorStoreIndex): - """GPT Chroma Index. - - The GPTChromaIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored within a Chroma collection. - During index construction, the document texts are chunked up, - converted to nodes with text; they are then encoded in - document embeddings stored within Chroma. - - During query time, the index uses Chroma to query for the top - k most similar nodes, and synthesizes an answer from the - retrieved nodes. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - chroma_collection (Optional[Any]): Collection instance from `chromadb` package. - - """ - - index_struct_cls: Type[IndexDict] = ChromaIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - chroma_collection: Optional[Any] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - **kwargs: Any, - ) -> None: - """Init params.""" - if chroma_collection is None: - raise ValueError("chroma_collection is required.") - vector_store = ChromaVectorStore(chroma_collection=chroma_collection) - - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - **kwargs, - ) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTChromaIndexQuery, - QueryMode.EMBEDDING: GPTChromaIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(ChromaVectorStore, self._vector_store) - query_kwargs["chroma_collection"] = vector_store._collection - - -class GPTOpensearchIndex(GPTVectorStoreIndex): - """GPT Opensearch Index. - - The GPTOpensearchIndex is a data structure where nodes are keyed by - embeddings, and those embeddings are stored in a document that is indexed - with its embedding as well as its textual data (text field is defined in - the OpensearchVectorClient). - During index construction, the document texts are chunked up, - converted to nodes with text; each node's embedding is computed, and then - the node's text, along with the embedding, is converted into JSON document that - is indexed in Opensearch. The embedding data is put into a field with type - "knn_vector" and the text is put into a standard Opensearch text field. - - During query time, the index performs approximate KNN search using the - "knn_vector" field that the embeddings were mapped to. - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt - (see :ref:`Prompt-Templates`). - NOTE: this is a deprecated field. - client (Optional[OpensearchVectorClient]): The client which encapsulates - logic for using Opensearch as a vector store (that is, it holds stuff - like endpoint, index_name and performs operations like initializing the - index and adding new doc/embeddings to said index). - embed_model (Optional[BaseEmbedding]): Embedding model to use for - embedding similarity. - """ - - index_struct_cls: Type[IndexDict] = OpensearchIndexDict - - def __init__( - self, - documents: Optional[Sequence[DOCUMENTS_INPUT]] = None, - client: Optional[OpensearchVectorClient] = None, - index_struct: Optional[IndexDict] = None, - text_qa_template: Optional[QuestionAnswerPrompt] = None, - llm_predictor: Optional[LLMPredictor] = None, - embed_model: Optional[BaseEmbedding] = None, - **kwargs: Any, - ) -> None: - """Init params.""" - if client is None: - raise ValueError("client is required.") - vector_store = OpensearchVectorStore(client) - super().__init__( - documents=documents, - index_struct=index_struct, - text_qa_template=text_qa_template, - llm_predictor=llm_predictor, - embed_model=embed_model, - vector_store=vector_store, - **kwargs, - ) - - @classmethod - def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]: - """Get query map.""" - return { - QueryMode.DEFAULT: GPTOpensearchIndexQuery, - QueryMode.EMBEDDING: GPTOpensearchIndexQuery, - } - - def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None: - """Preprocess query.""" - super()._preprocess_query(mode, query_kwargs) - del query_kwargs["vector_store"] - vector_store = cast(OpensearchVectorStore, self._vector_store) - query_kwargs["client"] = vector_store._client diff --git a/spaces/julien-c/sveltekit-demo/build/_app/pages/__layout.svelte-63495e35.js b/spaces/julien-c/sveltekit-demo/build/_app/pages/__layout.svelte-63495e35.js deleted file mode 100644 index 368809190b8cde67e0d1b95d5b0b1bbc1ab95a74..0000000000000000000000000000000000000000 --- a/spaces/julien-c/sveltekit-demo/build/_app/pages/__layout.svelte-63495e35.js +++ /dev/null @@ -1 +0,0 @@ -import{D as ae,S as Q,i as W,s as X,e as d,j as L,E as z,t as B,c as h,a as n,d as l,l as I,F as N,g as C,G as le,b as t,H as O,f as D,I as a,J as Y,K as re,L as oe,v as ie,w as ne,x as ce,M as ve,N as ue,O as fe,p as ee,n as te,A as _e}from"../chunks/vendor-92f01141.js";const de=()=>{const i=ae("__svelte__");return{page:{subscribe:i.page.subscribe},navigating:{subscribe:i.navigating.subscribe},get preloading(){return console.error("stores.preloading is deprecated; use stores.navigating instead"),{subscribe:i.navigating.subscribe}},session:i.session}},he={subscribe(i){return de().page.subscribe(i)}};var pe="/build/_app/assets/svelte-logo-87df40b8.svg";function me(i){let s,u,o,c,m,E,p,v,q,y,f,b,r,e,_,w,g,H,V,A,S,j,x;return{c(){s=d("header"),u=d("div"),o=d("a"),c=d("img"),E=L(),p=d("nav"),v=z("svg"),q=z("path"),y=L(),f=d("ul"),b=d("li"),r=d("a"),e=B("Home"),_=L(),w=d("li"),g=d("a"),H=B("About"),V=L(),A=z("svg"),S=z("path"),j=L(),x=d("div"),this.h()},l(k){s=h(k,"HEADER",{class:!0});var $=n(s);u=h($,"DIV",{class:!0});var F=n(u);o=h(F,"A",{href:!0,class:!0});var G=n(o);c=h(G,"IMG",{src:!0,alt:!0,class:!0}),G.forEach(l),F.forEach(l),E=I($),p=h($,"NAV",{class:!0});var M=n(p);v=N(M,"svg",{viewBox:!0,"aria-hidden":!0,class:!0});var R=n(v);q=N(R,"path",{d:!0,class:!0}),n(q).forEach(l),R.forEach(l),y=I(M),f=h(M,"UL",{class:!0});var K=n(f);b=h(K,"LI",{class:!0});var Z=n(b);r=h(Z,"A",{"sveltekit:prefetch":!0,href:!0,class:!0});var J=n(r);e=C(J,"Home"),J.forEach(l),Z.forEach(l),_=I(K),w=h(K,"LI",{class:!0});var P=n(w);g=h(P,"A",{"sveltekit:prefetch":!0,href:!0,class:!0});var T=n(g);H=C(T,"About"),T.forEach(l),P.forEach(l),K.forEach(l),V=I(M),A=N(M,"svg",{viewBox:!0,"aria-hidden":!0,class:!0});var U=n(A);S=N(U,"path",{d:!0,class:!0}),n(S).forEach(l),U.forEach(l),M.forEach(l),j=I($),x=h($,"DIV",{class:!0});var se=n(x);se.forEach(l),$.forEach(l),this.h()},h(){le(c.src,m=pe)||t(c,"src",m),t(c,"alt","SvelteKit"),t(c,"class","svelte-t2wq17"),t(o,"href","https://kit.svelte.dev"),t(o,"class","svelte-t2wq17"),t(u,"class","corner svelte-t2wq17"),t(q,"d","M0,0 L1,2 C1.5,3 1.5,3 2,3 L2,0 Z"),t(q,"class","svelte-t2wq17"),t(v,"viewBox","0 0 2 3"),t(v,"aria-hidden","true"),t(v,"class","svelte-t2wq17"),t(r,"sveltekit:prefetch",""),t(r,"href","./"),t(r,"class","svelte-t2wq17"),t(b,"class","svelte-t2wq17"),O(b,"active",i[0].url.pathname==="/"),t(g,"sveltekit:prefetch",""),t(g,"href","./about"),t(g,"class","svelte-t2wq17"),t(w,"class","svelte-t2wq17"),O(w,"active",i[0].url.pathname==="/about"),t(f,"class","svelte-t2wq17"),t(S,"d","M0,0 L0,3 C0.5,3 0.5,3 1,2 L2,0 Z"),t(S,"class","svelte-t2wq17"),t(A,"viewBox","0 0 2 3"),t(A,"aria-hidden","true"),t(A,"class","svelte-t2wq17"),t(p,"class","svelte-t2wq17"),t(x,"class","corner svelte-t2wq17"),t(s,"class","svelte-t2wq17")},m(k,$){D(k,s,$),a(s,u),a(u,o),a(o,c),a(s,E),a(s,p),a(p,v),a(v,q),a(p,y),a(p,f),a(f,b),a(b,r),a(r,e),a(f,_),a(f,w),a(w,g),a(g,H),a(p,V),a(p,A),a(A,S),a(s,j),a(s,x)},p(k,[$]){$&1&&O(b,"active",k[0].url.pathname==="/"),$&1&&O(w,"active",k[0].url.pathname==="/about")},i:Y,o:Y,d(k){k&&l(s)}}}function ge(i,s,u){let o;return re(i,he,c=>u(0,o=c)),[o]}class be extends Q{constructor(s){super();W(this,s,ge,me,X,{})}}function Ee(i){let s,u,o,c,m,E,p,v,q,y,f;s=new be({});const b=i[1].default,r=oe(b,i,i[0],null);return{c(){ie(s.$$.fragment),u=L(),o=d("main"),r&&r.c(),c=L(),m=d("footer"),E=d("p"),p=B("visit "),v=d("a"),q=B("kit.svelte.dev"),y=B(" to learn SvelteKit"),this.h()},l(e){ne(s.$$.fragment,e),u=I(e),o=h(e,"MAIN",{class:!0});var _=n(o);r&&r.l(_),_.forEach(l),c=I(e),m=h(e,"FOOTER",{class:!0});var w=n(m);E=h(w,"P",{});var g=n(E);p=C(g,"visit "),v=h(g,"A",{href:!0,class:!0});var H=n(v);q=C(H,"kit.svelte.dev"),H.forEach(l),y=C(g," to learn SvelteKit"),g.forEach(l),w.forEach(l),this.h()},h(){t(o,"class","svelte-1izrdc8"),t(v,"href","https://kit.svelte.dev"),t(v,"class","svelte-1izrdc8"),t(m,"class","svelte-1izrdc8")},m(e,_){ce(s,e,_),D(e,u,_),D(e,o,_),r&&r.m(o,null),D(e,c,_),D(e,m,_),a(m,E),a(E,p),a(E,v),a(v,q),a(E,y),f=!0},p(e,[_]){r&&r.p&&(!f||_&1)&&ve(r,b,e,e[0],f?fe(b,e[0],_,null):ue(e[0]),null)},i(e){f||(ee(s.$$.fragment,e),ee(r,e),f=!0)},o(e){te(s.$$.fragment,e),te(r,e),f=!1},d(e){_e(s,e),e&&l(u),e&&l(o),r&&r.d(e),e&&l(c),e&&l(m)}}}function we(i,s,u){let{$$slots:o={},$$scope:c}=s;return i.$$set=m=>{"$$scope"in m&&u(0,c=m.$$scope)},[c,o]}class qe extends Q{constructor(s){super();W(this,s,we,Ee,X,{})}}export{qe as default}; diff --git a/spaces/jusancp99/imagenes_similares/similarity_utils.py b/spaces/jusancp99/imagenes_similares/similarity_utils.py deleted file mode 100644 index fc1de7d528012f2dc4eba8c2ca8b395eaaf307c1..0000000000000000000000000000000000000000 --- a/spaces/jusancp99/imagenes_similares/similarity_utils.py +++ /dev/null @@ -1,175 +0,0 @@ -from typing import List, Union - -import datasets -import numpy as np -import torch -import torchvision.transforms as T -from PIL import Image -from tqdm.auto import tqdm -from transformers import AutoFeatureExtractor, AutoModel - -seed = 42 -hash_size = 8 -hidden_dim = 768 # ViT-base -np.random.seed(seed) - - -# Device. -device = "cuda" if torch.cuda.is_available() else "cpu" - -# Load model for computing embeddings.. -model_ckpt = "gjuggler/swin-tiny-patch4-window7-224-finetuned-birds" -extractor = AutoFeatureExtractor.from_pretrained(model_ckpt) - -# Data transformation chain. -transformation_chain = T.Compose( - [ - # We first resize the input image to 256x256 and then we take center crop. - T.Resize(224), - T.CenterCrop(extractor.size["height"]), - T.ToTensor(), - T.Normalize(mean=extractor.image_mean, std=extractor.image_std), - ] -) - - -# Define random vectors to project with. -random_vectors = np.random.randn(hash_size, hidden_dim).T - - -def hash_func(embedding, random_vectors=random_vectors): - """Randomly projects the embeddings and then computes bit-wise hashes.""" - if not isinstance(embedding, np.ndarray): - embedding = np.array(embedding) - if len(embedding.shape) < 2: - embedding = np.expand_dims(embedding, 0) - - # Random projection. - bools = np.dot(embedding, random_vectors) > 0 - return [bool2int(bool_vec) for bool_vec in bools] - - -def bool2int(x): - y = 0 - for i, j in enumerate(x): - if j: - y += 1 << i - return y - - -def compute_hash(model: Union[torch.nn.Module, str]): - """Computes hash on a given dataset.""" - device = model.device - - def pp(example_batch): - # Prepare the input images for the model. - image_batch = example_batch["image"] - image_batch_transformed = torch.stack( - [transformation_chain(image) for image in image_batch] - ) - new_batch = {"pixel_values": image_batch_transformed.to(device)} - - # Compute embeddings and pool them i.e., take the representations from the [CLS] - # token. - with torch.no_grad(): - embeddings = model(**new_batch).last_hidden_state[:, 0].cpu().numpy() - - # Compute hashes for the batch of images. - hashes = [hash_func(embeddings[i]) for i in range(len(embeddings))] - example_batch["hashes"] = hashes - return example_batch - - return pp - - -class Table: - def __init__(self, hash_size: int): - self.table = {} - self.hash_size = hash_size - - def add(self, id: int, hashes: List[int], label: int): - # Create a unique indentifier. - entry = {"id_label": str(id) + "_" + str(label)} - - # Add the hash values to the current table. - for h in hashes: - if h in self.table: - self.table[h].append(entry) - else: - self.table[h] = [entry] - - def query(self, hashes: List[int]): - results = [] - - # Loop over the query hashes and determine if they exist in - # the current table. - for h in hashes: - if h in self.table: - results.extend(self.table[h]) - return results - - -class LSH: - def __init__(self, hash_size, num_tables): - self.num_tables = num_tables - self.tables = [] - for i in range(self.num_tables): - self.tables.append(Table(hash_size)) - - def add(self, id: int, hash: List[int], label: int): - for table in self.tables: - table.add(id, hash, label) - - def query(self, hashes: List[int]): - results = [] - for table in self.tables: - results.extend(table.query(hashes)) - return results - - -class BuildLSHTable: - def __init__( - self, - model: Union[torch.nn.Module, None], - batch_size: int = 48, - hash_size: int = hash_size, - dim: int = hidden_dim, - num_tables: int = 10, - ): - self.hash_size = hash_size - self.dim = dim - self.num_tables = num_tables - self.lsh = LSH(self.hash_size, self.num_tables) - - self.batch_size = batch_size - self.hash_fn = compute_hash(model.to(device)) - - def build(self, ds: datasets.DatasetDict): - dataset_hashed = ds.map(self.hash_fn, batched=True, batch_size=self.batch_size) - - for id in tqdm(range(len(dataset_hashed))): - hash, label = dataset_hashed[id]["hashes"], dataset_hashed[id]["labels"] - self.lsh.add(id, hash, label) - - def query(self, image, verbose=True): - if isinstance(image, str): - image = Image.open(image).convert("RGB") - - # Compute the hashes of the query image and fetch the results. - example_batch = dict(image=[image]) - hashes = self.hash_fn(example_batch)["hashes"][0] - - results = self.lsh.query(hashes) - if verbose: - print("Matches:", len(results)) - - # Calculate Jaccard index to quantify the similarity. - counts = {} - for r in results: - if r["id_label"] in counts: - counts[r["id_label"]] += 1 - else: - counts[r["id_label"]] = 1 - for k in counts: - counts[k] = float(counts[k]) / self.dim - return counts diff --git a/spaces/kajalag/Whatsapp_Chat_Analyzer/preprocessor.py b/spaces/kajalag/Whatsapp_Chat_Analyzer/preprocessor.py deleted file mode 100644 index 6423577ca0a5b7077856429e9dd0b99c4f2242bd..0000000000000000000000000000000000000000 --- a/spaces/kajalag/Whatsapp_Chat_Analyzer/preprocessor.py +++ /dev/null @@ -1,111 +0,0 @@ -import pandas as pd -import re -from textblob import TextBlob -import numpy as np -import nltk -import nltk.data -from nltk.sentiment.vader import SentimentIntensityAnalyzer -from tqdm.notebook import tqdm -sia=SentimentIntensityAnalyzer() -nltk.download('vader_lexicon') - -def preprocess(data): - pattern ='\d{1,2}/\d{1,2}/\d{2,4},\s\d{1,2}:\d{2}\s-\s' - - messages = re.split(pattern, data)[1:] - dates = re.findall(pattern, data) - df = pd.DataFrame({'user_message': messages, 'message_date': dates}) - df['message_date'] = pd.to_datetime(df['message_date'], format='%m/%d/%y, %H:%M - ') - df.rename(columns={'message_date': 'date'}, inplace=True) - users = [] - messages = [] - for message in df['user_message']: - entry = re.split('([\w\W]+?):\s', message) - - if entry[1:]: - users.append(entry[1]) - messages.append(entry[2]) - - else: - users.append('group_notification') - messages.append(entry[0]) - df['users'] = users - df['message'] = messages - df.drop(columns=['user_message'], inplace=True) - df['year'] = df['date'].dt.year - df['day'] = df['date'].dt.day - df['hour'] = df['date'].dt.hour - df['minute'] = df['date'].dt.minute - df['Day_name'] = df['date'].dt.day_name() - df['Date']=df['date'].dt.date - df['Month'] = df['date'].dt.month - df['Month_name'] = df['date'].dt.month_name() - - period = [] - for hour in df[['Day_name', 'hour']]['hour']: - if hour == 23: - period.append(str(hour) + "-" + str('00')) - elif hour == 0: - period.append(str('00') + "-" + str(hour + 1)) - else: - period.append(str(hour) + "-" + str(hour + 1)) - - df['period']=period - - temp = df[df['users'] != 'group_notification'] - temp = temp[temp['message'] != '\n'] - temp.replace("", np.nan, inplace=True) - temp = temp.dropna() - - def cleanTxt(text): - text = re.sub(r'@[A-Za-z0-9]+', '', text) - text = re.sub(r'#', '', text) - text = text.replace('\n', "") - return text - - temp['message'] = temp['message'].apply(cleanTxt) - temp['users'] = temp['users'].apply(cleanTxt) - - res = {} - for i, row in tqdm(temp.iterrows(), total=len(temp)): - text = row['message'] - myid = row['users'] - res[myid] = sia.polarity_scores(text) - - vaders = pd.DataFrame(res).T - vaders = vaders.reset_index().rename(columns={'index': 'users'}) - vaders = vaders.merge(temp, how="right") - vaders_new = vaders.pop('message') - vaders_new = pd.DataFrame(vaders_new) - vaders.insert(1, "message", vaders_new['message']) - - def getSubjectivity(text): - return TextBlob(text).sentiment.subjectivity - - def getPolarity(text): - return TextBlob(text).sentiment.polarity - - vaders['Subjectivity'] = vaders['message'].apply(getSubjectivity) - vaders['Polarity'] = vaders['message'].apply(getPolarity) - - def getAnalysis(score): - if score < 0: - return 'Negative' - if score == 0: - return 'Neutral' - else: - return 'Positive' - - vaders['Analysis'] = vaders['Polarity'].apply(getAnalysis) - - def getAnalysis(score): - if score <= 0: - return 'Negative' - if score < 0.2960: - return 'Neutral' - else: - return 'Positive' - - vaders['vader_Analysis'] = vaders['compound'].apply(getAnalysis) - - return vaders \ No newline at end of file diff --git a/spaces/kcagle/AutoGPT/autogpt/app.py b/spaces/kcagle/AutoGPT/autogpt/app.py deleted file mode 100644 index 58d9f7164ddfbb5019b072d789dc2fa6205dc9d3..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/app.py +++ /dev/null @@ -1,330 +0,0 @@ -""" Command and Control """ -import json -from typing import Dict, List, NoReturn, Union - -from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.analyze_code import analyze_code -from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.execute_code import ( - execute_python_file, - execute_shell, - execute_shell_popen, -) -from autogpt.commands.file_operations import ( - append_to_file, - delete_file, - download_file, - read_file, - search_files, - write_to_file, -) -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.image_gen import generate_image -from autogpt.commands.improve_code import improve_code -from autogpt.commands.twitter import send_tweet -from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.write_tests import write_tests -from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_and_parse_json -from autogpt.memory import get_memory -from autogpt.processing.text import summarize_text -from autogpt.speech import say_text - -CFG = Config() -AGENT_MANAGER = AgentManager() - - -def is_valid_int(value: str) -> bool: - """Check if the value is a valid integer - - Args: - value (str): The value to check - - Returns: - bool: True if the value is a valid integer, False otherwise - """ - try: - int(value) - return True - except ValueError: - return False - - -def get_command(response_json: Dict): - """Parse the response and return the command name and arguments - - Args: - response_json (json): The response from the AI - - Returns: - tuple: The command name and arguments - - Raises: - json.decoder.JSONDecodeError: If the response is not valid JSON - - Exception: If any other error occurs - """ - try: - if "command" not in response_json: - return "Error:", "Missing 'command' object in JSON" - - if not isinstance(response_json, dict): - return "Error:", f"'response_json' object is not dictionary {response_json}" - - command = response_json["command"] - if not isinstance(command, dict): - return "Error:", "'command' object is not a dictionary" - - if "name" not in command: - return "Error:", "Missing 'name' field in 'command' object" - - command_name = command["name"] - - # Use an empty dictionary if 'args' field is not present in 'command' object - arguments = command.get("args", {}) - - return command_name, arguments - except json.decoder.JSONDecodeError: - return "Error:", "Invalid JSON" - # All other errors, return "Error: + error message" - except Exception as e: - return "Error:", str(e) - - -def map_command_synonyms(command_name: str): - """Takes the original command name given by the AI, and checks if the - string matches a list of common/known hallucinations - """ - synonyms = [ - ("write_file", "write_to_file"), - ("create_file", "write_to_file"), - ("search", "google"), - ] - for seen_command, actual_command_name in synonyms: - if command_name == seen_command: - return actual_command_name - return command_name - - -def execute_command(command_name: str, arguments): - """Execute the command and return the result - - Args: - command_name (str): The name of the command to execute - arguments (dict): The arguments for the command - - Returns: - str: The result of the command - """ - try: - command_name = map_command_synonyms(command_name.lower()) - if command_name == "google": - # Check if the Google API key is set and use the official search method - # If the API key is not set or has only whitespaces, use the unofficial - # search method - key = CFG.google_api_key - if key and key.strip() and key != "your-google-api-key": - google_result = google_official_search(arguments["input"]) - return google_result - else: - google_result = google_search(arguments["input"]) - - # google_result can be a list or a string depending on the search results - if isinstance(google_result, list): - safe_message = [ - google_result_single.encode("utf-8", "ignore") - for google_result_single in google_result - ] - else: - safe_message = google_result.encode("utf-8", "ignore") - - return safe_message.decode("utf-8") - elif command_name == "memory_add": - memory = get_memory(CFG) - return memory.add(arguments["string"]) - elif command_name == "start_agent": - return start_agent( - arguments["name"], arguments["task"], arguments["prompt"] - ) - elif command_name == "message_agent": - return message_agent(arguments["key"], arguments["message"]) - elif command_name == "list_agents": - return list_agents() - elif command_name == "delete_agent": - return delete_agent(arguments["key"]) - elif command_name == "get_text_summary": - return get_text_summary(arguments["url"], arguments["question"]) - elif command_name == "get_hyperlinks": - return get_hyperlinks(arguments["url"]) - elif command_name == "clone_repository": - return clone_repository( - arguments["repository_url"], arguments["clone_path"] - ) - elif command_name == "read_file": - return read_file(arguments["file"]) - elif command_name == "write_to_file": - return write_to_file(arguments["file"], arguments["text"]) - elif command_name == "append_to_file": - return append_to_file(arguments["file"], arguments["text"]) - elif command_name == "delete_file": - return delete_file(arguments["file"]) - elif command_name == "search_files": - return search_files(arguments["directory"]) - elif command_name == "download_file": - if not CFG.allow_downloads: - return "Error: You do not have user authorization to download files locally." - return download_file(arguments["url"], arguments["file"]) - elif command_name == "browse_website": - return browse_website(arguments["url"], arguments["question"]) - # TODO: Change these to take in a file rather than pasted code, if - # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again" - elif command_name == "analyze_code": - return analyze_code(arguments["code"]) - elif command_name == "improve_code": - return improve_code(arguments["suggestions"], arguments["code"]) - elif command_name == "write_tests": - return write_tests(arguments["code"], arguments.get("focus")) - elif command_name == "execute_python_file": # Add this command - return execute_python_file(arguments["file"]) - elif command_name == "execute_shell": - if CFG.execute_local_commands: - return execute_shell(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "execute_shell_popen": - if CFG.execute_local_commands: - return execute_shell_popen(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "read_audio_from_file": - return read_audio_from_file(arguments["file"]) - elif command_name == "generate_image": - return generate_image(arguments["prompt"]) - elif command_name == "send_tweet": - return send_tweet(arguments["text"]) - elif command_name == "do_nothing": - return "No action performed." - elif command_name == "task_complete": - shutdown() - else: - return ( - f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" - " list for available commands and only respond in the specified JSON" - " format." - ) - except Exception as e: - return f"Error: {str(e)}" - - -def get_text_summary(url: str, question: str) -> str: - """Return the results of a Google search - - Args: - url (str): The url to scrape - question (str): The question to summarize the text for - - Returns: - str: The summary of the text - """ - text = scrape_text(url) - summary = summarize_text(url, text, question) - return f""" "Result" : {summary}""" - - -def get_hyperlinks(url: str) -> Union[str, List[str]]: - """Return the results of a Google search - - Args: - url (str): The url to scrape - - Returns: - str or list: The hyperlinks on the page - """ - return scrape_links(url) - - -def shutdown() -> NoReturn: - """Shut down the program""" - print("Shutting down...") - quit() - - -def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: - """Start an agent with a given name, task, and prompt - - Args: - name (str): The name of the agent - task (str): The task of the agent - prompt (str): The prompt for the agent - model (str): The model to use for the agent - - Returns: - str: The response of the agent - """ - # Remove underscores from name - voice_name = name.replace("_", " ") - - first_message = f"""You are {name}. Respond with: "Acknowledged".""" - agent_intro = f"{voice_name} here, Reporting for duty!" - - # Create agent - if CFG.speak_mode: - say_text(agent_intro, 1) - key, ack = AGENT_MANAGER.create_agent(task, first_message, model) - - if CFG.speak_mode: - say_text(f"Hello {voice_name}. Your task is as follows. {task}.") - - # Assign task (prompt), get response - agent_response = AGENT_MANAGER.message_agent(key, prompt) - - return f"Agent {name} created with key {key}. First response: {agent_response}" - - -def message_agent(key: str, message: str) -> str: - """Message an agent with a given key and message""" - # Check if the key is a valid integer - if is_valid_int(key): - agent_response = AGENT_MANAGER.message_agent(int(key), message) - else: - return "Invalid key, must be an integer." - - # Speak response - if CFG.speak_mode: - say_text(agent_response, 1) - return agent_response - - -def list_agents(): - """List all agents - - Returns: - str: A list of all agents - """ - return "List of agents:\n" + "\n".join( - [str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()] - ) - - -def delete_agent(key: str) -> str: - """Delete an agent with a given key - - Args: - key (str): The key of the agent to delete - - Returns: - str: A message indicating whether the agent was deleted or not - """ - result = AGENT_MANAGER.delete_agent(key) - return f"Agent {key} deleted." if result else f"Agent {key} does not exist." diff --git a/spaces/kdrkdrkdr/ZhongliTTS/modules.py b/spaces/kdrkdrkdr/ZhongliTTS/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/ZhongliTTS/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/kevinwang676/Bert-VITS2/text/english.py b/spaces/kevinwang676/Bert-VITS2/text/english.py deleted file mode 100644 index 781d0a56cef71f66fc67db51d76538be90d3ddd2..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bert-VITS2/text/english.py +++ /dev/null @@ -1,138 +0,0 @@ -import pickle -import os -import re -from g2p_en import G2p -from string import punctuation - -from text import symbols - -current_file_path = os.path.dirname(__file__) -CMU_DICT_PATH = os.path.join(current_file_path, 'cmudict.rep') -CACHE_PATH = os.path.join(current_file_path, 'cmudict_cache.pickle') -_g2p = G2p() - -arpa = {'AH0', 'S', 'AH1', 'EY2', 'AE2', 'EH0', 'OW2', 'UH0', 'NG', 'B', 'G', 'AY0', 'M', 'AA0', 'F', 'AO0', 'ER2', 'UH1', 'IY1', 'AH2', 'DH', 'IY0', 'EY1', 'IH0', 'K', 'N', 'W', 'IY2', 'T', 'AA1', 'ER1', 'EH2', 'OY0', 'UH2', 'UW1', 'Z', 'AW2', 'AW1', 'V', 'UW2', 'AA2', 'ER', 'AW0', 'UW0', 'R', 'OW1', 'EH1', 'ZH', 'AE0', 'IH2', 'IH', 'Y', 'JH', 'P', 'AY1', 'EY0', 'OY2', 'TH', 'HH', 'D', 'ER0', 'CH', 'AO1', 'AE1', 'AO2', 'OY1', 'AY2', 'IH1', 'OW0', 'L', 'SH'} - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def read_dict(): - g2p_dict = {} - start_line = 49 - with open(CMU_DICT_PATH) as f: - line = f.readline() - line_index = 1 - while line: - if line_index >= start_line: - line = line.strip() - word_split = line.split(' ') - word = word_split[0] - - syllable_split = word_split[1].split(' - ') - g2p_dict[word] = [] - for syllable in syllable_split: - phone_split = syllable.split(' ') - g2p_dict[word].append(phone_split) - - line_index = line_index + 1 - line = f.readline() - - return g2p_dict - - -def cache_dict(g2p_dict, file_path): - with open(file_path, 'wb') as pickle_file: - pickle.dump(g2p_dict, pickle_file) - - -def get_dict(): - if os.path.exists(CACHE_PATH): - with open(CACHE_PATH, 'rb') as pickle_file: - g2p_dict = pickle.load(pickle_file) - else: - g2p_dict = read_dict() - cache_dict(g2p_dict, CACHE_PATH) - - return g2p_dict - -eng_dict = get_dict() - -def refine_ph(phn): - tone = 0 - if re.search(r'\d$', phn): - tone = int(phn[-1]) + 1 - phn = phn[:-1] - return phn.lower(), tone - -def refine_syllables(syllables): - tones = [] - phonemes = [] - for phn_list in syllables: - for i in range(len(phn_list)): - phn = phn_list[i] - phn, tone = refine_ph(phn) - phonemes.append(phn) - tones.append(tone) - return phonemes, tones - - -def text_normalize(text): - # todo: eng text normalize - return text - -def g2p(text): - - phones = [] - tones = [] - words = re.split(r"([,;.\-\?\!\s+])", text) - for w in words: - if w.upper() in eng_dict: - phns, tns = refine_syllables(eng_dict[w.upper()]) - phones += phns - tones += tns - else: - phone_list = list(filter(lambda p: p != " ", _g2p(w))) - for ph in phone_list: - if ph in arpa: - ph, tn = refine_ph(ph) - phones.append(ph) - tones.append(tn) - else: - phones.append(ph) - tones.append(0) - # todo: implement word2ph - word2ph = [1 for i in phones] - - phones = [post_replace_ph(i) for i in phones] - return phones, tones, word2ph - -if __name__ == "__main__": - # print(get_dict()) - # print(eng_word_to_phoneme("hello")) - print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")) - # all_phones = set() - # for k, syllables in eng_dict.items(): - # for group in syllables: - # for ph in group: - # all_phones.add(ph) - # print(all_phones) \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/__init__.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/__init__.py deleted file mode 100644 index 5a7986c7ad2ec48f404adf81fea5aa06aaf1eeb4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -"""This package contains modules related to objective functions, optimizations, and network architectures. - -To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. -You need to implement the following five functions: - -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). - -- : unpack data from dataset and apply preprocessing. - -- : produce intermediate results. - -- : calculate loss, gradients, and update network weights. - -- : (optionally) add model-specific options and set default options. - -In the function <__init__>, you need to define four lists: - -- self.loss_names (str list): specify the training losses that you want to plot and save. - -- self.model_names (str list): define networks used in our training. - -- self.visual_names (str list): specify the images that you want to display and save. - -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. - -Now you can use the model class by specifying flag '--model dummy'. -See our template model class 'template_model.py' for more details. -""" - -import importlib -from src.face3d.models.base_model import BaseModel - - -def find_model_using_name(model_name): - """Import the module "models/[model_name]_model.py". - - In the file, the class called DatasetNameModel() will - be instantiated. It has to be a subclass of BaseModel, - and it is case-insensitive. - """ - model_filename = "face3d.models." + model_name + "_model" - modellib = importlib.import_module(model_filename) - model = None - target_model_name = model_name.replace('_', '') + 'model' - for name, cls in modellib.__dict__.items(): - if name.lower() == target_model_name.lower() \ - and issubclass(cls, BaseModel): - model = cls - - if model is None: - print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) - exit(0) - - return model - - -def get_option_setter(model_name): - """Return the static method of the model class.""" - model_class = find_model_using_name(model_name) - return model_class.modify_commandline_options - - -def create_model(opt): - """Create a model given the option. - - This function warps the class CustomDatasetDataLoader. - This is the main interface between this package and 'train.py'/'test.py' - - Example: - >>> from models import create_model - >>> model = create_model(opt) - """ - model = find_model_using_name(opt.model) - instance = model(opt) - print("model [%s] was created" % type(instance).__name__) - return instance diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/__init__.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/__init__.py deleted file mode 100644 index 04eecb58b62f8c9d11d17606c6241d278a48b9b9..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""This package includes a miscellaneous collection of useful helper functions.""" -from src.face3d.util import * - diff --git a/spaces/kevinwang676/SadTalker/src/face3d/options/inference_options.py b/spaces/kevinwang676/SadTalker/src/face3d/options/inference_options.py deleted file mode 100644 index c453965959ab4cfb31acbc424f994db68c3d4df5..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/options/inference_options.py +++ /dev/null @@ -1,23 +0,0 @@ -from face3d.options.base_options import BaseOptions - - -class InferenceOptions(BaseOptions): - """This class includes test options. - - It also includes shared options defined in BaseOptions. - """ - - def initialize(self, parser): - parser = BaseOptions.initialize(self, parser) # define shared options - parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') - parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]') - - parser.add_argument('--input_dir', type=str, help='the folder of the input files') - parser.add_argument('--keypoint_dir', type=str, help='the folder of the keypoint files') - parser.add_argument('--output_dir', type=str, default='mp4', help='the output dir to save the extracted coefficients') - parser.add_argument('--save_split_files', action='store_true', help='save split files or not') - parser.add_argument('--inference_batch_size', type=int, default=8) - - # Dropout and Batchnorm has different behavior during training and test. - self.isTrain = False - return parser diff --git a/spaces/kevinwang676/SadTalker/src/face3d/util/preprocess.py b/spaces/kevinwang676/SadTalker/src/face3d/util/preprocess.py deleted file mode 100644 index b77a3a4058c208e5ba8cb1cfbb563954a5f7a3e2..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/util/preprocess.py +++ /dev/null @@ -1,103 +0,0 @@ -"""This script contains the image preprocessing code for Deep3DFaceRecon_pytorch -""" - -import numpy as np -from scipy.io import loadmat -from PIL import Image -import cv2 -import os -from skimage import transform as trans -import torch -import warnings -warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) -warnings.filterwarnings("ignore", category=FutureWarning) - - -# calculating least square problem for image alignment -def POS(xp, x): - npts = xp.shape[1] - - A = np.zeros([2*npts, 8]) - - A[0:2*npts-1:2, 0:3] = x.transpose() - A[0:2*npts-1:2, 3] = 1 - - A[1:2*npts:2, 4:7] = x.transpose() - A[1:2*npts:2, 7] = 1 - - b = np.reshape(xp.transpose(), [2*npts, 1]) - - k, _, _, _ = np.linalg.lstsq(A, b) - - R1 = k[0:3] - R2 = k[4:7] - sTx = k[3] - sTy = k[7] - s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2 - t = np.stack([sTx, sTy], axis=0) - - return t, s - -# resize and crop images for face reconstruction -def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None): - w0, h0 = img.size - w = (w0*s).astype(np.int32) - h = (h0*s).astype(np.int32) - left = (w/2 - target_size/2 + float((t[0] - w0/2)*s)).astype(np.int32) - right = left + target_size - up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32) - below = up + target_size - - img = img.resize((w, h), resample=Image.BICUBIC) - img = img.crop((left, up, right, below)) - - if mask is not None: - mask = mask.resize((w, h), resample=Image.BICUBIC) - mask = mask.crop((left, up, right, below)) - - lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] - - t[1] + h0/2], axis=1)*s - lm = lm - np.reshape( - np.array([(w/2 - target_size/2), (h/2-target_size/2)]), [1, 2]) - - return img, lm, mask - -# utils for face reconstruction -def extract_5p(lm): - lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1 - lm5p = np.stack([lm[lm_idx[0], :], np.mean(lm[lm_idx[[1, 2]], :], 0), np.mean( - lm[lm_idx[[3, 4]], :], 0), lm[lm_idx[5], :], lm[lm_idx[6], :]], axis=0) - lm5p = lm5p[[1, 2, 0, 3, 4], :] - return lm5p - -# utils for face reconstruction -def align_img(img, lm, lm3D, mask=None, target_size=224., rescale_factor=102.): - """ - Return: - transparams --numpy.array (raw_W, raw_H, scale, tx, ty) - img_new --PIL.Image (target_size, target_size, 3) - lm_new --numpy.array (68, 2), y direction is opposite to v direction - mask_new --PIL.Image (target_size, target_size) - - Parameters: - img --PIL.Image (raw_H, raw_W, 3) - lm --numpy.array (68, 2), y direction is opposite to v direction - lm3D --numpy.array (5, 3) - mask --PIL.Image (raw_H, raw_W, 3) - """ - - w0, h0 = img.size - if lm.shape[0] != 5: - lm5p = extract_5p(lm) - else: - lm5p = lm - - # calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face - t, s = POS(lm5p.transpose(), lm3D.transpose()) - s = rescale_factor/s - - # processing the image - img_new, lm_new, mask_new = resize_n_crop_img(img, lm, t, s, target_size=target_size, mask=mask) - trans_params = np.array([w0, h0, s, t[0], t[1]]) - - return trans_params, img_new, lm_new, mask_new diff --git a/spaces/kevinwang676/VoiceChanger/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/kevinwang676/VoiceChanger/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index ab523020325fa3f30676ad20125c6a9f059a9d84..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/kevinwang676/VoiceChanger/src/audio2pose_models/cvae.py b/spaces/kevinwang676/VoiceChanger/src/audio2pose_models/cvae.py deleted file mode 100644 index d017ce865a03bae40dfe066dbcd82e29839d89dc..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/audio2pose_models/cvae.py +++ /dev/null @@ -1,149 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn -from src.audio2pose_models.res_unet import ResUnet - -def class2onehot(idx, class_num): - - assert torch.max(idx).item() < class_num - onehot = torch.zeros(idx.size(0), class_num).to(idx.device) - onehot.scatter_(1, idx, 1) - return onehot - -class CVAE(nn.Module): - def __init__(self, cfg): - super().__init__() - encoder_layer_sizes = cfg.MODEL.CVAE.ENCODER_LAYER_SIZES - decoder_layer_sizes = cfg.MODEL.CVAE.DECODER_LAYER_SIZES - latent_size = cfg.MODEL.CVAE.LATENT_SIZE - num_classes = cfg.DATASET.NUM_CLASSES - audio_emb_in_size = cfg.MODEL.CVAE.AUDIO_EMB_IN_SIZE - audio_emb_out_size = cfg.MODEL.CVAE.AUDIO_EMB_OUT_SIZE - seq_len = cfg.MODEL.CVAE.SEQ_LEN - - self.latent_size = latent_size - - self.encoder = ENCODER(encoder_layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len) - self.decoder = DECODER(decoder_layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len) - def reparameterize(self, mu, logvar): - std = torch.exp(0.5 * logvar) - eps = torch.randn_like(std) - return mu + eps * std - - def forward(self, batch): - batch = self.encoder(batch) - mu = batch['mu'] - logvar = batch['logvar'] - z = self.reparameterize(mu, logvar) - batch['z'] = z - return self.decoder(batch) - - def test(self, batch): - ''' - class_id = batch['class'] - z = torch.randn([class_id.size(0), self.latent_size]).to(class_id.device) - batch['z'] = z - ''' - return self.decoder(batch) - -class ENCODER(nn.Module): - def __init__(self, layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len): - super().__init__() - - self.resunet = ResUnet() - self.num_classes = num_classes - self.seq_len = seq_len - - self.MLP = nn.Sequential() - layer_sizes[0] += latent_size + seq_len*audio_emb_out_size + 6 - for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])): - self.MLP.add_module( - name="L{:d}".format(i), module=nn.Linear(in_size, out_size)) - self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU()) - - self.linear_means = nn.Linear(layer_sizes[-1], latent_size) - self.linear_logvar = nn.Linear(layer_sizes[-1], latent_size) - self.linear_audio = nn.Linear(audio_emb_in_size, audio_emb_out_size) - - self.classbias = nn.Parameter(torch.randn(self.num_classes, latent_size)) - - def forward(self, batch): - class_id = batch['class'] - pose_motion_gt = batch['pose_motion_gt'] #bs seq_len 6 - ref = batch['ref'] #bs 6 - bs = pose_motion_gt.shape[0] - audio_in = batch['audio_emb'] # bs seq_len audio_emb_in_size - - #pose encode - pose_emb = self.resunet(pose_motion_gt.unsqueeze(1)) #bs 1 seq_len 6 - pose_emb = pose_emb.reshape(bs, -1) #bs seq_len*6 - - #audio mapping - print(audio_in.shape) - audio_out = self.linear_audio(audio_in) # bs seq_len audio_emb_out_size - audio_out = audio_out.reshape(bs, -1) - - class_bias = self.classbias[class_id] #bs latent_size - x_in = torch.cat([ref, pose_emb, audio_out, class_bias], dim=-1) #bs seq_len*(audio_emb_out_size+6)+latent_size - x_out = self.MLP(x_in) - - mu = self.linear_means(x_out) - logvar = self.linear_means(x_out) #bs latent_size - - batch.update({'mu':mu, 'logvar':logvar}) - return batch - -class DECODER(nn.Module): - def __init__(self, layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len): - super().__init__() - - self.resunet = ResUnet() - self.num_classes = num_classes - self.seq_len = seq_len - - self.MLP = nn.Sequential() - input_size = latent_size + seq_len*audio_emb_out_size + 6 - for i, (in_size, out_size) in enumerate(zip([input_size]+layer_sizes[:-1], layer_sizes)): - self.MLP.add_module( - name="L{:d}".format(i), module=nn.Linear(in_size, out_size)) - if i+1 < len(layer_sizes): - self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU()) - else: - self.MLP.add_module(name="sigmoid", module=nn.Sigmoid()) - - self.pose_linear = nn.Linear(6, 6) - self.linear_audio = nn.Linear(audio_emb_in_size, audio_emb_out_size) - - self.classbias = nn.Parameter(torch.randn(self.num_classes, latent_size)) - - def forward(self, batch): - - z = batch['z'] #bs latent_size - bs = z.shape[0] - class_id = batch['class'] - ref = batch['ref'] #bs 6 - audio_in = batch['audio_emb'] # bs seq_len audio_emb_in_size - #print('audio_in: ', audio_in[:, :, :10]) - - audio_out = self.linear_audio(audio_in) # bs seq_len audio_emb_out_size - #print('audio_out: ', audio_out[:, :, :10]) - audio_out = audio_out.reshape([bs, -1]) # bs seq_len*audio_emb_out_size - class_bias = self.classbias[class_id] #bs latent_size - - z = z + class_bias - x_in = torch.cat([ref, z, audio_out], dim=-1) - x_out = self.MLP(x_in) # bs layer_sizes[-1] - x_out = x_out.reshape((bs, self.seq_len, -1)) - - #print('x_out: ', x_out) - - pose_emb = self.resunet(x_out.unsqueeze(1)) #bs 1 seq_len 6 - - pose_motion_pred = self.pose_linear(pose_emb.squeeze(1)) #bs seq_len 6 - - batch.update({'pose_motion_pred':pose_motion_pred}) - return batch diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/cc_head.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/cc_head.py deleted file mode 100644 index 5b9abb4e747f92657f4220b29788539340986c00..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/cc_head.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch - -from ..builder import HEADS -from .fcn_head import FCNHead - -try: - from annotator.uniformer.mmcv.ops import CrissCrossAttention -except ModuleNotFoundError: - CrissCrossAttention = None - - -@HEADS.register_module() -class CCHead(FCNHead): - """CCNet: Criss-Cross Attention for Semantic Segmentation. - - This head is the implementation of `CCNet - `_. - - Args: - recurrence (int): Number of recurrence of Criss Cross Attention - module. Default: 2. - """ - - def __init__(self, recurrence=2, **kwargs): - if CrissCrossAttention is None: - raise RuntimeError('Please install mmcv-full for ' - 'CrissCrossAttention ops') - super(CCHead, self).__init__(num_convs=2, **kwargs) - self.recurrence = recurrence - self.cca = CrissCrossAttention(self.channels) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - for _ in range(self.recurrence): - output = self.cca(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/README.md b/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/README.md deleted file mode 100644 index 4a3ae54b857c43621c9fb67ee4b214584beec835..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Speech Synthesis (S^2) -=== - -Speech synthesis with fairseq. - -- Autoregressive and non-autoregressive models -- Multi-speaker synthesis -- Audio preprocessing -- Automatic metrics -- Similar data configuration as [S2T](../speech_to_text/README.md) - - -## Examples -- [Single-speaker synthesis on LJSpeech](docs/ljspeech_example.md) -- [Multi-speaker synthesis on VCTK](docs/vctk_example.md) -- [Multi-speaker synthesis on Common Voice](docs/common_voice_example.md) diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/modules/ffc.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/modules/ffc.py deleted file mode 100644 index 0e7b84683fccb4bccac97b6371994fa6bb44dbe4..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/modules/ffc.py +++ /dev/null @@ -1,485 +0,0 @@ -# Fast Fourier Convolution NeurIPS 2020 -# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py -# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from saicinpainting.training.modules.base import get_activation, BaseDiscriminator -from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper -from saicinpainting.training.modules.squeeze_excitation import SELayer -from saicinpainting.utils import get_shape - - -class FFCSE_block(nn.Module): - - def __init__(self, channels, ratio_g): - super(FFCSE_block, self).__init__() - in_cg = int(channels * ratio_g) - in_cl = channels - in_cg - r = 16 - - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.conv1 = nn.Conv2d(channels, channels // r, - kernel_size=1, bias=True) - self.relu1 = nn.ReLU(inplace=True) - self.conv_a2l = None if in_cl == 0 else nn.Conv2d( - channels // r, in_cl, kernel_size=1, bias=True) - self.conv_a2g = None if in_cg == 0 else nn.Conv2d( - channels // r, in_cg, kernel_size=1, bias=True) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = x if type(x) is tuple else (x, 0) - id_l, id_g = x - - x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1) - x = self.avgpool(x) - x = self.relu1(self.conv1(x)) - - x_l = 0 if self.conv_a2l is None else id_l * \ - self.sigmoid(self.conv_a2l(x)) - x_g = 0 if self.conv_a2g is None else id_g * \ - self.sigmoid(self.conv_a2g(x)) - return x_l, x_g - - -class FourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear', - spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'): - # bn_layer not used - super(FourierUnit, self).__init__() - self.groups = groups - - self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), - out_channels=out_channels * 2, - kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False) - self.bn = torch.nn.BatchNorm2d(out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - # squeeze and excitation block - self.use_se = use_se - if use_se: - if se_kwargs is None: - se_kwargs = {} - self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) - - self.spatial_scale_factor = spatial_scale_factor - self.spatial_scale_mode = spatial_scale_mode - self.spectral_pos_encoding = spectral_pos_encoding - self.ffc3d = ffc3d - self.fft_norm = fft_norm - - def forward(self, x): - batch = x.shape[0] - - if self.spatial_scale_factor is not None: - orig_size = x.shape[-2:] - x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False) - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) - ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - if self.spectral_pos_encoding: - height, width = ffted.shape[-2:] - coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted) - coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted) - ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) - - if self.use_se: - ffted = self.se(ffted) - - ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) - ffted = self.relu(self.bn(ffted)) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] - output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) - - if self.spatial_scale_factor is not None: - output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False) - - return output - - -class SeparableFourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, kernel_size=3): - # bn_layer not used - super(SeparableFourierUnit, self).__init__() - self.groups = groups - row_out_channels = out_channels // 2 - col_out_channels = out_channels - row_out_channels - self.row_conv = torch.nn.Conv2d(in_channels=in_channels * 2, - out_channels=row_out_channels * 2, - kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed - stride=1, padding=(kernel_size // 2, 0), - padding_mode='reflect', - groups=self.groups, bias=False) - self.col_conv = torch.nn.Conv2d(in_channels=in_channels * 2, - out_channels=col_out_channels * 2, - kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed - stride=1, padding=(kernel_size // 2, 0), - padding_mode='reflect', - groups=self.groups, bias=False) - self.row_bn = torch.nn.BatchNorm2d(row_out_channels * 2) - self.col_bn = torch.nn.BatchNorm2d(col_out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - def process_branch(self, x, conv, bn): - batch = x.shape[0] - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - ffted = torch.fft.rfft(x, norm="ortho") - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - ffted = self.relu(bn(conv(ffted))) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - output = torch.fft.irfft(ffted, s=x.shape[-1:], norm="ortho") - return output - - - def forward(self, x): - rowwise = self.process_branch(x, self.row_conv, self.row_bn) - colwise = self.process_branch(x.permute(0, 1, 3, 2), self.col_conv, self.col_bn).permute(0, 1, 3, 2) - out = torch.cat((rowwise, colwise), dim=1) - return out - - -class SpectralTransform(nn.Module): - - def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, separable_fu=False, **fu_kwargs): - # bn_layer not used - super(SpectralTransform, self).__init__() - self.enable_lfu = enable_lfu - if stride == 2: - self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) - else: - self.downsample = nn.Identity() - - self.stride = stride - self.conv1 = nn.Sequential( - nn.Conv2d(in_channels, out_channels // - 2, kernel_size=1, groups=groups, bias=False), - nn.BatchNorm2d(out_channels // 2), - nn.ReLU(inplace=True) - ) - fu_class = SeparableFourierUnit if separable_fu else FourierUnit - self.fu = fu_class( - out_channels // 2, out_channels // 2, groups, **fu_kwargs) - if self.enable_lfu: - self.lfu = fu_class( - out_channels // 2, out_channels // 2, groups) - self.conv2 = torch.nn.Conv2d( - out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False) - - def forward(self, x): - - x = self.downsample(x) - x = self.conv1(x) - output = self.fu(x) - - if self.enable_lfu: - n, c, h, w = x.shape - split_no = 2 - split_s = h // split_no - xs = torch.cat(torch.split( - x[:, :c // 4], split_s, dim=-2), dim=1).contiguous() - xs = torch.cat(torch.split(xs, split_s, dim=-1), - dim=1).contiguous() - xs = self.lfu(xs) - xs = xs.repeat(1, 1, split_no, split_no).contiguous() - else: - xs = 0 - - output = self.conv2(x + output + xs) - - return output - - -class FFC(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride=1, padding=0, - dilation=1, groups=1, bias=False, enable_lfu=True, - padding_type='reflect', gated=False, **spectral_kwargs): - super(FFC, self).__init__() - - assert stride == 1 or stride == 2, "Stride should be 1 or 2." - self.stride = stride - - in_cg = int(in_channels * ratio_gin) - in_cl = in_channels - in_cg - out_cg = int(out_channels * ratio_gout) - out_cl = out_channels - out_cg - #groups_g = 1 if groups == 1 else int(groups * ratio_gout) - #groups_l = 1 if groups == 1 else groups - groups_g - - self.ratio_gin = ratio_gin - self.ratio_gout = ratio_gout - self.global_in_num = in_cg - - module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d - self.convl2l = module(in_cl, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d - self.convl2g = module(in_cl, out_cg, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d - self.convg2l = module(in_cg, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform - self.convg2g = module( - in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs) - - self.gated = gated - module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d - self.gate = module(in_channels, 2, 1) - - def forward(self, x): - x_l, x_g = x if type(x) is tuple else (x, 0) - out_xl, out_xg = 0, 0 - - if self.gated: - total_input_parts = [x_l] - if torch.is_tensor(x_g): - total_input_parts.append(x_g) - total_input = torch.cat(total_input_parts, dim=1) - - gates = torch.sigmoid(self.gate(total_input)) - g2l_gate, l2g_gate = gates.chunk(2, dim=1) - else: - g2l_gate, l2g_gate = 1, 1 - - if self.ratio_gout != 1: - out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate - if self.ratio_gout != 0: - out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) - - return out_xl, out_xg - - -class FFC_BN_ACT(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, ratio_gin, ratio_gout, - stride=1, padding=0, dilation=1, groups=1, bias=False, - norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity, - padding_type='reflect', - enable_lfu=True, **kwargs): - super(FFC_BN_ACT, self).__init__() - self.ffc = FFC(in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride, padding, dilation, - groups, bias, enable_lfu, padding_type=padding_type, **kwargs) - lnorm = nn.Identity if ratio_gout == 1 else norm_layer - gnorm = nn.Identity if ratio_gout == 0 else norm_layer - global_channels = int(out_channels * ratio_gout) - self.bn_l = lnorm(out_channels - global_channels) - self.bn_g = gnorm(global_channels) - - lact = nn.Identity if ratio_gout == 1 else activation_layer - gact = nn.Identity if ratio_gout == 0 else activation_layer - self.act_l = lact(inplace=True) - self.act_g = gact(inplace=True) - - def forward(self, x): - x_l, x_g = self.ffc(x) - x_l = self.act_l(self.bn_l(x_l)) - x_g = self.act_g(self.bn_g(x_g)) - return x_l, x_g - - -class FFCResnetBlock(nn.Module): - def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1, - spatial_transform_kwargs=None, inline=False, **conv_kwargs): - super().__init__() - self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - if spatial_transform_kwargs is not None: - self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs) - self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs) - self.inline = inline - - def forward(self, x): - if self.inline: - x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:] - else: - x_l, x_g = x if type(x) is tuple else (x, 0) - - id_l, id_g = x_l, x_g - - x_l, x_g = self.conv1((x_l, x_g)) - x_l, x_g = self.conv2((x_l, x_g)) - - x_l, x_g = id_l + x_l, id_g + x_g - out = x_l, x_g - if self.inline: - out = torch.cat(out, dim=1) - return out - - -class ConcatTupleLayer(nn.Module): - def forward(self, x): - assert isinstance(x, tuple) - x_l, x_g = x - assert torch.is_tensor(x_l) or torch.is_tensor(x_g) - if not torch.is_tensor(x_g): - return x_l - return torch.cat(x, dim=1) - - -class FFCResNetGenerator(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect', activation_layer=nn.ReLU, - up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), - init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, - spatial_transform_layers=None, spatial_transform_kwargs={}, - add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): - assert (n_blocks >= 0) - super().__init__() - - model = [nn.ReflectionPad2d(3), - FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer, - activation_layer=activation_layer, **init_conv_kwargs)] - - ### downsample - for i in range(n_downsampling): - mult = 2 ** i - if i == n_downsampling - 1: - cur_conv_kwargs = dict(downsample_conv_kwargs) - cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0) - else: - cur_conv_kwargs = downsample_conv_kwargs - model += [FFC_BN_ACT(min(max_features, ngf * mult), - min(max_features, ngf * mult * 2), - kernel_size=3, stride=2, padding=1, - norm_layer=norm_layer, - activation_layer=activation_layer, - **cur_conv_kwargs)] - - mult = 2 ** n_downsampling - feats_num_bottleneck = min(max_features, ngf * mult) - - ### resnet blocks - for i in range(n_blocks): - cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, **resnet_conv_kwargs) - if spatial_transform_layers is not None and i in spatial_transform_layers: - cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs) - model += [cur_resblock] - - model += [ConcatTupleLayer()] - - ### upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - up_norm_layer(min(max_features, int(ngf * mult / 2))), - up_activation] - - if out_ffc: - model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, inline=True, **out_ffc_kwargs)] - - model += [nn.ReflectionPad2d(3), - nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] - if add_out_act: - model.append(get_activation('tanh' if add_out_act is True else add_out_act)) - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class FFCNLayerDiscriminator(BaseDiscriminator): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512, - init_conv_kwargs={}, conv_kwargs={}): - super().__init__() - self.n_layers = n_layers - - def _act_ctor(inplace=True): - return nn.LeakyReLU(negative_slope=0.2, inplace=inplace) - - kw = 3 - padw = int(np.ceil((kw-1.0)/2)) - sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer, - activation_layer=_act_ctor, **init_conv_kwargs)]] - - nf = ndf - for n in range(1, n_layers): - nf_prev = nf - nf = min(nf * 2, max_features) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=2, padding=padw, - norm_layer=norm_layer, - activation_layer=_act_ctor, - **conv_kwargs) - ] - sequence.append(cur_model) - - nf_prev = nf - nf = min(nf * 2, 512) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=1, padding=padw, - norm_layer=norm_layer, - activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs), - **conv_kwargs), - ConcatTupleLayer() - ] - sequence.append(cur_model) - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] - - for n in range(len(sequence)): - setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) - - def get_all_activations(self, x): - res = [x] - for n in range(self.n_layers + 2): - model = getattr(self, 'model' + str(n)) - res.append(model(res[-1])) - return res[1:] - - def forward(self, x): - act = self.get_all_activations(x) - feats = [] - for out in act[:-1]: - if isinstance(out, tuple): - if torch.is_tensor(out[1]): - out = torch.cat(out, dim=1) - else: - out = out[0] - feats.append(out) - return act[-1], feats diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py deleted file mode 100644 index ed00764f7c193ca9bcd0bf67196da59c30048a28..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -"""fontTools.ttLib -- a package for dealing with TrueType fonts.""" - -from fontTools.misc.loggingTools import deprecateFunction -import logging - - -log = logging.getLogger(__name__) - - -class TTLibError(Exception): - pass - - -class TTLibFileIsCollectionError(TTLibError): - pass - - -@deprecateFunction("use logging instead", category=DeprecationWarning) -def debugmsg(msg): - import time - - print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) - - -from fontTools.ttLib.ttFont import * -from fontTools.ttLib.ttCollection import TTCollection diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/DefaultTable.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/DefaultTable.py deleted file mode 100644 index 32a4b1f258f54d78ad39eb764867a6c354939743..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/DefaultTable.py +++ /dev/null @@ -1,50 +0,0 @@ -from fontTools.misc.textTools import Tag -from fontTools.ttLib import getClassTag - - -class DefaultTable(object): - - dependencies = [] - - def __init__(self, tag=None): - if tag is None: - tag = getClassTag(self.__class__) - self.tableTag = Tag(tag) - - def decompile(self, data, ttFont): - self.data = data - - def compile(self, ttFont): - return self.data - - def toXML(self, writer, ttFont, **kwargs): - if hasattr(self, "ERROR"): - writer.comment("An error occurred during the decompilation of this table") - writer.newline() - writer.comment(self.ERROR) - writer.newline() - writer.begintag("hexdata") - writer.newline() - writer.dumphex(self.compile(ttFont)) - writer.endtag("hexdata") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - from fontTools.misc.textTools import readHex - from fontTools import ttLib - - if name != "hexdata": - raise ttLib.TTLibError("can't handle '%s' element" % name) - self.decompile(readHex(content), ttFont) - - def __repr__(self): - return "<'%s' table at %x>" % (self.tableTag, id(self)) - - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - result = self.__eq__(other) - return result if result is NotImplemented else not result diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py deleted file mode 100644 index 5dd64fa51435b97142bb61cfe12f9369e6f1488b..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py +++ /dev/null @@ -1,230 +0,0 @@ -# coding=utf-8 -# Copyright 2022-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains utilities to validate argument values in `huggingface_hub`.""" -import inspect -import re -import warnings -from functools import wraps -from itertools import chain -from typing import Any, Dict - -from ._typing import CallableT - - -REPO_ID_REGEX = re.compile( - r""" - ^ - (\b[\w\-.]+\b/)? # optional namespace (username or organization) - \b # starts with a word boundary - [\w\-.]{1,96} # repo_name: alphanumeric + . _ - - \b # ends with a word boundary - $ - """, - flags=re.VERBOSE, -) - - -class HFValidationError(ValueError): - """Generic exception thrown by `huggingface_hub` validators. - - Inherits from [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError). - """ - - -def validate_hf_hub_args(fn: CallableT) -> CallableT: - """Validate values received as argument for any public method of `huggingface_hub`. - - The goal of this decorator is to harmonize validation of arguments reused - everywhere. By default, all defined validators are tested. - - Validators: - - [`~utils.validate_repo_id`]: `repo_id` must be `"repo_name"` - or `"namespace/repo_name"`. Namespace is a username or an organization. - - [`~utils.smoothly_deprecate_use_auth_token`]: Use `token` instead of - `use_auth_token` (only if `use_auth_token` is not expected by the decorated - function - in practice, always the case in `huggingface_hub`). - - Example: - ```py - >>> from huggingface_hub.utils import validate_hf_hub_args - - >>> @validate_hf_hub_args - ... def my_cool_method(repo_id: str): - ... print(repo_id) - - >>> my_cool_method(repo_id="valid_repo_id") - valid_repo_id - - >>> my_cool_method("other..repo..id") - huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'. - - >>> my_cool_method(repo_id="other..repo..id") - huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'. - - >>> @validate_hf_hub_args - ... def my_cool_auth_method(token: str): - ... print(token) - - >>> my_cool_auth_method(token="a token") - "a token" - - >>> my_cool_auth_method(use_auth_token="a use_auth_token") - "a use_auth_token" - - >>> my_cool_auth_method(token="a token", use_auth_token="a use_auth_token") - UserWarning: Both `token` and `use_auth_token` are passed (...) - "a token" - ``` - - Raises: - [`~utils.HFValidationError`]: - If an input is not valid. - """ - # TODO: add an argument to opt-out validation for specific argument? - signature = inspect.signature(fn) - - # Should the validator switch `use_auth_token` values to `token`? In practice, always - # True in `huggingface_hub`. Might not be the case in a downstream library. - check_use_auth_token = "use_auth_token" not in signature.parameters and "token" in signature.parameters - - @wraps(fn) - def _inner_fn(*args, **kwargs): - has_token = False - for arg_name, arg_value in chain( - zip(signature.parameters, args), # Args values - kwargs.items(), # Kwargs values - ): - if arg_name in ["repo_id", "from_id", "to_id"]: - validate_repo_id(arg_value) - - elif arg_name == "token" and arg_value is not None: - has_token = True - - if check_use_auth_token: - kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs) - - return fn(*args, **kwargs) - - return _inner_fn # type: ignore - - -def validate_repo_id(repo_id: str) -> None: - """Validate `repo_id` is valid. - - This is not meant to replace the proper validation made on the Hub but rather to - avoid local inconsistencies whenever possible (example: passing `repo_type` in the - `repo_id` is forbidden). - - Rules: - - Between 1 and 96 characters. - - Either "repo_name" or "namespace/repo_name" - - [a-zA-Z0-9] or "-", "_", "." - - "--" and ".." are forbidden - - Valid: `"foo"`, `"foo/bar"`, `"123"`, `"Foo-BAR_foo.bar123"` - - Not valid: `"datasets/foo/bar"`, `".repo_id"`, `"foo--bar"`, `"foo.git"` - - Example: - ```py - >>> from huggingface_hub.utils import validate_repo_id - >>> validate_repo_id(repo_id="valid_repo_id") - >>> validate_repo_id(repo_id="other..repo..id") - huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'. - ``` - - Discussed in https://github.com/huggingface/huggingface_hub/issues/1008. - In moon-landing (internal repository): - - https://github.com/huggingface/moon-landing/blob/main/server/lib/Names.ts#L27 - - https://github.com/huggingface/moon-landing/blob/main/server/views/components/NewRepoForm/NewRepoForm.svelte#L138 - """ - if not isinstance(repo_id, str): - # Typically, a Path is not a repo_id - raise HFValidationError(f"Repo id must be a string, not {type(repo_id)}: '{repo_id}'.") - - if repo_id.count("/") > 1: - raise HFValidationError( - "Repo id must be in the form 'repo_name' or 'namespace/repo_name':" - f" '{repo_id}'. Use `repo_type` argument if needed." - ) - - if not REPO_ID_REGEX.match(repo_id): - raise HFValidationError( - "Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are" - " forbidden, '-' and '.' cannot start or end the name, max length is 96:" - f" '{repo_id}'." - ) - - if "--" in repo_id or ".." in repo_id: - raise HFValidationError(f"Cannot have -- or .. in repo_id: '{repo_id}'.") - - if repo_id.endswith(".git"): - raise HFValidationError(f"Repo_id cannot end by '.git': '{repo_id}'.") - - -def smoothly_deprecate_use_auth_token(fn_name: str, has_token: bool, kwargs: Dict[str, Any]) -> Dict[str, Any]: - """Smoothly deprecate `use_auth_token` in the `huggingface_hub` codebase. - - The long-term goal is to remove any mention of `use_auth_token` in the codebase in - favor of a unique and less verbose `token` argument. This will be done a few steps: - - 0. Step 0: methods that require a read-access to the Hub use the `use_auth_token` - argument (`str`, `bool` or `None`). Methods requiring write-access have a `token` - argument (`str`, `None`). This implicit rule exists to be able to not send the - token when not necessary (`use_auth_token=False`) even if logged in. - - 1. Step 1: we want to harmonize everything and use `token` everywhere (supporting - `token=False` for read-only methods). In order not to break existing code, if - `use_auth_token` is passed to a function, the `use_auth_token` value is passed - as `token` instead, without any warning. - a. Corner case: if both `use_auth_token` and `token` values are passed, a warning - is thrown and the `use_auth_token` value is ignored. - - 2. Step 2: Once it is release, we should push downstream libraries to switch from - `use_auth_token` to `token` as much as possible, but without throwing a warning - (e.g. manually create issues on the corresponding repos). - - 3. Step 3: After a transitional period (6 months e.g. until April 2023?), we update - `huggingface_hub` to throw a warning on `use_auth_token`. Hopefully, very few - users will be impacted as it would have already been fixed. - In addition, unit tests in `huggingface_hub` must be adapted to expect warnings - to be thrown (but still use `use_auth_token` as before). - - 4. Step 4: After a normal deprecation cycle (3 releases ?), remove this validator. - `use_auth_token` will definitely not be supported. - In addition, we update unit tests in `huggingface_hub` to use `token` everywhere. - - This has been discussed in: - - https://github.com/huggingface/huggingface_hub/issues/1094. - - https://github.com/huggingface/huggingface_hub/pull/928 - - (related) https://github.com/huggingface/huggingface_hub/pull/1064 - """ - new_kwargs = kwargs.copy() # do not mutate input ! - - use_auth_token = new_kwargs.pop("use_auth_token", None) # remove from kwargs - if use_auth_token is not None: - if has_token: - warnings.warn( - "Both `token` and `use_auth_token` are passed to" - f" `{fn_name}` with non-None values. `token` is now the" - " preferred argument to pass a User Access Token." - " `use_auth_token` value will be ignored." - ) - else: - # `token` argument is not passed and a non-None value is passed in - # `use_auth_token` => use `use_auth_token` value as `token` kwarg. - new_kwargs["token"] = use_auth_token - - return new_kwargs diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/simple.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/simple.py deleted file mode 100644 index 7770c922c84fabe0031333a4de305dd6d6852911..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/simple.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Interface adapters for low-level readers. -""" - -import abc -import io -import itertools -from typing import BinaryIO, List - -from .abc import Traversable, TraversableResources - - -class SimpleReader(abc.ABC): - """ - The minimum, low-level interface required from a resource - provider. - """ - - @property - @abc.abstractmethod - def package(self) -> str: - """ - The name of the package for which this reader loads resources. - """ - - @abc.abstractmethod - def children(self) -> List['SimpleReader']: - """ - Obtain an iterable of SimpleReader for available - child containers (e.g. directories). - """ - - @abc.abstractmethod - def resources(self) -> List[str]: - """ - Obtain available named resources for this virtual package. - """ - - @abc.abstractmethod - def open_binary(self, resource: str) -> BinaryIO: - """ - Obtain a File-like for a named resource. - """ - - @property - def name(self): - return self.package.split('.')[-1] - - -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader: SimpleReader): - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent: ResourceContainer, name: str): - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - -class TraversableReader(TraversableResources, SimpleReader): - """ - A TraversableResources based on SimpleReader. Resource providers - may derive from this class to provide the TraversableResources - interface by supplying the SimpleReader interface. - """ - - def files(self): - return ResourceContainer(self) diff --git a/spaces/lakshmi324/Fake_airpods_Detector/app.py b/spaces/lakshmi324/Fake_airpods_Detector/app.py deleted file mode 100644 index 4e6518ef3b20795b3663d0b548a167eb596b720d..0000000000000000000000000000000000000000 --- a/spaces/lakshmi324/Fake_airpods_Detector/app.py +++ /dev/null @@ -1,95 +0,0 @@ - -import pandas as pd -import ast -import os -import gradio as gr - - -class airpods(object): - - def __init__(self,master,input_model,input_charging_case_serial,input_box_serial_number,input_leftbud_model,input_rightbud_model,input_leftbud_serial,input_rightbud_serial,input_firmware): - ''' - ''' - self.master = master - self.input_model = input_model - self.input_charging_case_serial = input_charging_case_serial - self.input_box_serial_number = input_box_serial_number - self.input_leftbud_model = input_leftbud_model - self.input_rightbud_model = input_rightbud_model - self.input_leftbud_serial = input_leftbud_serial - self.input_rightbud_serial = input_rightbud_serial - self.input_firmware = input_firmware - - - def Iterative_Serial_Check(self): - ''' - Function checks if the airbuds serial numbers are iterative - ''' - string = self.input_leftbud_serial[-1] + self.input_rightbud_serial[-1] + self.input_charging_case_serial[-1] - string = string.lower() - return not any(m > n for m,n in zip(string,string[1:])) - - def check_latest_firmware(self): - ''' - Function to check if the airbuds are on the latest firmware - ''' - return (self.master[self.input_model]['firmware'] == self.input_firmware) - - def check_matching_serial(self): - ''' - Function to check if the airbuds are having the - same serial number as that of the case - ''' - return (self.input_charging_case_serial == self.input_box_serial_number ) - - - def check_model_number(self): - ''' - Function to check if the airbuds are having the - same serial number as that of the case - ''' - return (self.input_leftbud_model in self.master[self.input_model]['Model_number'] and self.input_rightbud_model in self.master[self.input_model]['Model_number'] ) - - def final_check(self): - - if (self.Iterative_Serial_Check() and self.check_latest_firmware() and self.check_matching_serial() and self.check_model_number() ): - return 'Congratulations, Your Earpods/ Headphones are Genuine' - elif (self.check_latest_firmware() and self.check_matching_serial() and self.check_model_number() ): - return 'Looks like Earpods/ Headphones are mostly Genuine, but Case has been swapped' - else: - return 'Extremly sorry, Your Earpods/ Headphones are Probably Knock off' - -def app_check(input_model,input_charging_case_serial,input_box_serial_number,input_leftbud_model,input_rightbud_model,input_leftbud_serial,input_rightbud_serial,input_firmware): - input_dict = ast.literal_eval(os.environ.get('master')) - - if (all(i >= 10 for i in [len(input_charging_case_serial) , len(input_charging_case_serial) , len(input_leftbud_serial) , len(input_rightbud_serial)]) and any(i < 15 for i in [len(input_charging_case_serial) , len(input_charging_case_serial) , len(input_leftbud_serial) , len(input_rightbud_serial)])) : - airpod = airpods(input_dict,input_model,input_charging_case_serial,input_box_serial_number,input_leftbud_model,input_rightbud_model,input_leftbud_serial,input_rightbud_serial,input_firmware) - return airpod.final_check() - else: - return 'the serial numbers are not correct, please check and re enter' - -gr.Interface(fn=app_check, - inputs=[ - gr.inputs.Dropdown(['AirPods_Pro2', 'AirPods_3', 'AirPods_Pro', 'AirPods_2', 'AirPods_1']), - gr.inputs.Textbox( - placeholder="Please enter the CharginCase serial number", label="CharginCase serial number", lines=1,), - gr.inputs.Textbox( - placeholder="Please enter the Box serial number", label="Box serial number", lines=1), - gr.inputs.Textbox( - placeholder="Please enter the left bud model number A2083", label="Left bud model number", lines=1), - gr.inputs.Textbox( - placeholder="Please enter the right bud number A2083", label="Right bud number", lines=1), - gr.inputs.Textbox( - placeholder="Please enter the left bud serial number", label="Left bud serial number", lines=1), - - gr.inputs.Textbox( - placeholder="Please enter the right bud number", label="Right bud number", lines=1), - gr.inputs.Textbox( - placeholder="Please enter the Firmware", label="Firmware", lines=1) - ], - outputs= [gr.outputs.Textbox(label="Output Box")], - examples=[]).launch(debug= True) - - - - diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/loss.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/models/loss.py deleted file mode 100644 index 0a01d7d719f66f0947739caf223cad7ea0dbefca..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/loss.py +++ /dev/null @@ -1,287 +0,0 @@ -import torch -import torch.nn as nn -import torchvision -from torch.nn import functional as F -from torch import autograd as autograd - - -""" -Sequential( - (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (1): ReLU(inplace) - (2*): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (3): ReLU(inplace) - (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (6): ReLU(inplace) - (7*): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (8): ReLU(inplace) - (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (11): ReLU(inplace) - (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (13): ReLU(inplace) - (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (15): ReLU(inplace) - (16*): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (17): ReLU(inplace) - (18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (20): ReLU(inplace) - (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (22): ReLU(inplace) - (23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (24): ReLU(inplace) - (25*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (26): ReLU(inplace) - (27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (29): ReLU(inplace) - (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (31): ReLU(inplace) - (32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (33): ReLU(inplace) - (34*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (35): ReLU(inplace) - (36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) -) -""" - - -# -------------------------------------------- -# Perceptual loss -# -------------------------------------------- -class VGGFeatureExtractor(nn.Module): - def __init__(self, feature_layer=[2,7,16,25,34], use_input_norm=True, use_range_norm=False): - super(VGGFeatureExtractor, self).__init__() - ''' - use_input_norm: If True, x: [0, 1] --> (x - mean) / std - use_range_norm: If True, x: [0, 1] --> x: [-1, 1] - ''' - model = torchvision.models.vgg19(pretrained=True) - self.use_input_norm = use_input_norm - self.use_range_norm = use_range_norm - if self.use_input_norm: - mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) - std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1) - self.register_buffer('mean', mean) - self.register_buffer('std', std) - self.list_outputs = isinstance(feature_layer, list) - if self.list_outputs: - self.features = nn.Sequential() - feature_layer = [-1] + feature_layer - for i in range(len(feature_layer)-1): - self.features.add_module('child'+str(i), nn.Sequential(*list(model.features.children())[(feature_layer[i]+1):(feature_layer[i+1]+1)])) - else: - self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)]) - - print(self.features) - - # No need to BP to variable - for k, v in self.features.named_parameters(): - v.requires_grad = False - - def forward(self, x): - if self.use_range_norm: - x = (x + 1.0) / 2.0 - if self.use_input_norm: - x = (x - self.mean) / self.std - if self.list_outputs: - output = [] - for child_model in self.features.children(): - x = child_model(x) - output.append(x.clone()) - return output - else: - return self.features(x) - - -class PerceptualLoss(nn.Module): - """VGG Perceptual loss - """ - - def __init__(self, feature_layer=[2,7,16,25,34], weights=[0.1,0.1,1.0,1.0,1.0], lossfn_type='l1', use_input_norm=True, use_range_norm=False): - super(PerceptualLoss, self).__init__() - self.vgg = VGGFeatureExtractor(feature_layer=feature_layer, use_input_norm=use_input_norm, use_range_norm=use_range_norm) - self.lossfn_type = lossfn_type - self.weights = weights - if self.lossfn_type == 'l1': - self.lossfn = nn.L1Loss() - else: - self.lossfn = nn.MSELoss() - print(f'feature_layer: {feature_layer} with weights: {weights}') - - def forward(self, x, gt): - """Forward function. - Args: - x (Tensor): Input tensor with shape (n, c, h, w). - gt (Tensor): Ground-truth tensor with shape (n, c, h, w). - Returns: - Tensor: Forward results. - """ - x_vgg, gt_vgg = self.vgg(x), self.vgg(gt.detach()) - loss = 0.0 - if isinstance(x_vgg, list): - n = len(x_vgg) - for i in range(n): - loss += self.weights[i] * self.lossfn(x_vgg[i], gt_vgg[i]) - else: - loss += self.lossfn(x_vgg, gt_vgg.detach()) - return loss - -# -------------------------------------------- -# GAN loss: gan, ragan -# -------------------------------------------- -class GANLoss(nn.Module): - def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0): - super(GANLoss, self).__init__() - self.gan_type = gan_type.lower() - self.real_label_val = real_label_val - self.fake_label_val = fake_label_val - - if self.gan_type == 'gan' or self.gan_type == 'ragan': - self.loss = nn.BCEWithLogitsLoss() - elif self.gan_type == 'lsgan': - self.loss = nn.MSELoss() - elif self.gan_type == 'wgan': - def wgan_loss(input, target): - # target is boolean - return -1 * input.mean() if target else input.mean() - - self.loss = wgan_loss - elif self.gan_type == 'softplusgan': - def softplusgan_loss(input, target): - # target is boolean - return F.softplus(-input).mean() if target else F.softplus(input).mean() - - self.loss = softplusgan_loss - else: - raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type)) - - def get_target_label(self, input, target_is_real): - if self.gan_type in ['wgan', 'softplusgan']: - return target_is_real - if target_is_real: - return torch.empty_like(input).fill_(self.real_label_val) - else: - return torch.empty_like(input).fill_(self.fake_label_val) - - def forward(self, input, target_is_real): - target_label = self.get_target_label(input, target_is_real) - loss = self.loss(input, target_label) - return loss - - -# -------------------------------------------- -# TV loss -# -------------------------------------------- -class TVLoss(nn.Module): - def __init__(self, tv_loss_weight=1): - """ - Total variation loss - https://github.com/jxgu1016/Total_Variation_Loss.pytorch - Args: - tv_loss_weight (int): - """ - super(TVLoss, self).__init__() - self.tv_loss_weight = tv_loss_weight - - def forward(self, x): - batch_size = x.size()[0] - h_x = x.size()[2] - w_x = x.size()[3] - count_h = self.tensor_size(x[:, :, 1:, :]) - count_w = self.tensor_size(x[:, :, :, 1:]) - h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum() - w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum() - return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size - - @staticmethod - def tensor_size(t): - return t.size()[1] * t.size()[2] * t.size()[3] - - -# -------------------------------------------- -# Charbonnier loss -# -------------------------------------------- -class CharbonnierLoss(nn.Module): - """Charbonnier Loss (L1)""" - - def __init__(self, eps=1e-9): - super(CharbonnierLoss, self).__init__() - self.eps = eps - - def forward(self, x, y): - diff = x - y - loss = torch.mean(torch.sqrt((diff * diff) + self.eps)) - return loss - - - -def r1_penalty(real_pred, real_img): - """R1 regularization for discriminator. The core idea is to - penalize the gradient on real data alone: when the - generator distribution produces the true data distribution - and the discriminator is equal to 0 on the data manifold, the - gradient penalty ensures that the discriminator cannot create - a non-zero gradient orthogonal to the data manifold without - suffering a loss in the GAN game. - Ref: - Eq. 9 in Which training methods for GANs do actually converge. - """ - grad_real = autograd.grad( - outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0] - grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() - return grad_penalty - - -def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): - noise = torch.randn_like(fake_img) / math.sqrt( - fake_img.shape[2] * fake_img.shape[3]) - grad = autograd.grad( - outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0] - path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) - - path_mean = mean_path_length + decay * ( - path_lengths.mean() - mean_path_length) - - path_penalty = (path_lengths - path_mean).pow(2).mean() - - return path_penalty, path_lengths.detach().mean(), path_mean.detach() - - -def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None): - """Calculate gradient penalty for wgan-gp. - Args: - discriminator (nn.Module): Network for the discriminator. - real_data (Tensor): Real input data. - fake_data (Tensor): Fake input data. - weight (Tensor): Weight tensor. Default: None. - Returns: - Tensor: A tensor for gradient penalty. - """ - - batch_size = real_data.size(0) - alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1)) - - # interpolate between real_data and fake_data - interpolates = alpha * real_data + (1. - alpha) * fake_data - interpolates = autograd.Variable(interpolates, requires_grad=True) - - disc_interpolates = discriminator(interpolates) - gradients = autograd.grad( - outputs=disc_interpolates, - inputs=interpolates, - grad_outputs=torch.ones_like(disc_interpolates), - create_graph=True, - retain_graph=True, - only_inputs=True)[0] - - if weight is not None: - gradients = gradients * weight - - gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() - if weight is not None: - gradients_penalty /= torch.mean(weight) - - return gradients_penalty diff --git a/spaces/lcipolina/Print_Gallery/glide_text2im/clip/readme.md b/spaces/lcipolina/Print_Gallery/glide_text2im/clip/readme.md deleted file mode 100644 index 12fc00dafedd303aed22aba2282568c528649c9c..0000000000000000000000000000000000000000 --- a/spaces/lcipolina/Print_Gallery/glide_text2im/clip/readme.md +++ /dev/null @@ -1 +0,0 @@ -carry on \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Gregg Braden The Divine Matrix Pdf Download.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Gregg Braden The Divine Matrix Pdf Download.md deleted file mode 100644 index 631b85ca31fc3ea85f34a99f485e03ca8d0d2d56..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Gregg Braden The Divine Matrix Pdf Download.md +++ /dev/null @@ -1,118 +0,0 @@ - -

          Gregg Braden The Divine Matrix Pdf Download: A Book That Will Change Your Life

          - -

          If you are looking for a book that will inspire you, challenge you, and transform you, then you should download Gregg Braden The Divine Matrix Pdf. This book is a masterpiece of science, spirituality, and miracles that will show you how to connect with the web of energy that links everything in our lives and our world.

          -

          Gregg Braden The Divine Matrix Pdf Download


          DOWNLOADhttps://bytlly.com/2uGy9v



          - -

          In this book, Gregg Braden, a renowned author and speaker, reveals the evidence of a divine matrix that exists between 1993 and 2000. He explains how this matrix is the bridge between our inner and outer worlds, and how we can use it to create joy, heal suffering, and bring peace to nations.

          - -

          Gregg Braden also shares his personal experiences and stories of people who have tapped into the divine matrix and witnessed amazing results. He teaches us how to speak the language of the matrix, which is based on emotion, intention, and belief. He shows us how to access the power that lives inside of us and use it each day of our lives.

          - -

          Why You Should Download Gregg Braden The Divine Matrix Pdf

          - -

          There are many reasons why you should download Gregg Braden The Divine Matrix Pdf, but here are some of the most important ones:

          - -
            -
          • You will learn about the scientific discoveries that prove the existence of a divine matrix that connects everything in our lives and our world.
          • -
          • You will discover how to use the divine matrix to create your own reality and manifest your desires.
          • -
          • You will understand how to heal yourself and others by using the power of your emotions, intentions, and beliefs.
          • -
          • You will explore the mysteries of time, space, miracles, and belief, and how they relate to the divine matrix.
          • -
          • You will be inspired by the stories of people who have used the divine matrix to achieve extraordinary results in their lives.
          • -
          • You will join Gregg Braden on an extraordinary journey that will bridge science, spirituality, and miracles through the language of the divine matrix.
          • -
          - -

          How to Download Gregg Braden The Divine Matrix Pdf

          - -

          If you are ready to download Gregg Braden The Divine Matrix Pdf, then you can do so by following these simple steps:

          - -
            -
          1. Click on one of the links below that will take you to a website where you can download the book in PDF or EPUB format.
          2. -
          3. Choose the format that suits your device and preferences.
          4. -
          5. Download the file to your device or cloud storage.
          6. -
          7. Enjoy reading the book and applying its teachings to your life.
          8. -
          - -

          Here are some of the links where you can download Gregg Braden The Divine Matrix Pdf:

          -

          - - - -

          Conclusion

          - -

          Gregg Braden The Divine Matrix Pdf is a book that will change your life. It will show you how to connect with the web of energy that links everything in our lives and our world. It will teach you how to use this power to create joy, heal suffering, and bring peace to nations. It will inspire you with stories of people who have used this power to achieve amazing results. It will bridge science, spirituality, and miracles through the language of the divine matrix.

          - -

          If you want to download Gregg Braden The Divine Matrix Pdf, then you can do so by clicking on one of the links above. You will be able to choose between PDF or EPUB format, depending on your device and preferences. You will be able to download the file to your device or cloud storage. You will be able to enjoy reading the book and applying its teachings to your life.

          - -

          Don't miss this opportunity to download Gregg Braden The Divine Matrix Pdf. It is a book that will change your life for the better. It is a book that will show you how to connect with the divine matrix and use its power each day of your life.

          -
          What is the Divine Matrix?
          - -

          The divine matrix is the term that Gregg Braden uses to describe the web of energy that connects everything in our lives and our world. He explains that this matrix is not a new discovery, but a forgotten wisdom that has been known by ancient cultures for thousands of years. He also shows that this matrix is supported by the latest scientific findings in quantum physics, biology, and cosmology.

          - -

          The divine matrix is more than just a field of energy. It is also a mirror that reflects our thoughts, feelings, and beliefs. It is a bridge that links our inner and outer worlds. It is a language that we can use to communicate with the force that creates all of creation. It is a source of power that we can access to create our own reality and manifest our desires.

          - -
          How to Use the Divine Matrix?
          - -

          In his book Gregg Braden The Divine Matrix Pdf, Gregg Braden teaches us how to use the divine matrix to create joy, heal suffering, and bring peace to nations. He shares three keys that will help us unlock the power of the matrix:

          - -
            -
          • The first key is to understand that we are connected to everything and everyone through the divine matrix. We are not separate from the world, but part of it. We are not alone, but part of a larger whole.
          • -
          • The second key is to realize that our emotions, intentions, and beliefs are the language of the divine matrix. They are the signals that we send to the matrix, and they affect the reality that we experience. We can use this language to speak directly to the force that links all of creation.
          • -
          • The third key is to recognize that we have the power to change the world by changing ourselves. We can use the divine matrix to heal ourselves and others, to create our own reality, and to influence the events of our lives. We can use this power each day of our lives.
          • -
          - -

          By using these three keys, we can tap into the divine matrix and witness amazing results in our lives. We can also join Gregg Braden on his extraordinary journey bridging science, spirituality, and miracles through the language of the divine matrix.

          -What are the Benefits of Using the Divine Matrix? - -

          Using the divine matrix can bring many benefits to our lives. Some of the benefits are:

          - -
            -
          • We can create joy in our lives by choosing positive emotions, intentions, and beliefs. We can also attract more joy into our lives by aligning ourselves with the frequency of joy in the divine matrix.
          • -
          • We can heal suffering in our lives by releasing negative emotions, intentions, and beliefs. We can also heal others by sending them healing energy through the divine matrix.
          • -
          • We can bring peace to nations by cultivating peace in ourselves. We can also influence the collective consciousness of humanity by sending peace signals to the divine matrix.
          • -
          • We can achieve our goals and dreams by using the divine matrix as a tool for manifestation. We can also co-create with the divine intelligence that guides all of creation.
          • -
          • We can discover our true potential and purpose by connecting with the divine matrix. We can also access our higher self and intuition by tuning into the divine matrix.
          • -
          - -

          These are just some of the benefits of using the divine matrix. There are many more that we can experience as we explore this amazing web of energy that connects everything in our lives and our world.

          - -How to Download Gregg Braden The Divine Matrix Pdf? - -

          If you are interested in downloading Gregg Braden The Divine Matrix Pdf, then you can do so by following these simple steps:

          - -
            -
          1. Click on one of the links below that will take you to a website where you can download the book in PDF or EPUB format.
          2. -
          3. Choose the format that suits your device and preferences.
          4. -
          5. Download the file to your device or cloud storage.
          6. -
          7. Enjoy reading the book and applying its teachings to your life.
          8. -
          - -

          Here are some of the links where you can download Gregg Braden The Divine Matrix Pdf:

          - - - -Conclusion - -

          Gregg Braden The Divine Matrix Pdf is a book that will change your life. It will show you how to connect with the web of energy that links everything in our lives and our world. It will teach you how to use this power to create joy, heal suffering, and bring peace to nations. It will inspire you with stories of people who have used this power to achieve amazing results. It will bridge science, spirituality, and miracles through the language of the divine matrix.

          - -

          If you want to download Gregg Braden The Divine Matrix Pdf, then you can do so by clicking on one of the links above. You will be able to choose between PDF or EPUB format, depending on your device and preferences. You will be able to download the file to your device or cloud storage. You will be able to enjoy reading the book and applying its teachings to your life.

          - -

          Don't miss this opportunity to download Gregg Braden The Divine Matrix Pdf. It is a book that will change your life for the better. It is a book that will show you how to connect with the divine matrix and use its power each day of your life.

          -

          In conclusion, Gregg Braden The Divine Matrix Pdf is a book that will change your life. It will show you how to connect with the web of energy that links everything in our lives and our world. It will teach you how to use this power to create joy, heal suffering, and bring peace to nations. It will inspire you with stories of people who have used this power to achieve amazing results. It will bridge science, spirituality, and miracles through the language of the divine matrix.

          - -

          If you want to download Gregg Braden The Divine Matrix Pdf, then you can do so by clicking on one of the links above. You will be able to choose between PDF or EPUB format, depending on your device and preferences. You will be able to download the file to your device or cloud storage. You will be able to enjoy reading the book and applying its teachings to your life.

          - -

          Don't miss this opportunity to download Gregg Braden The Divine Matrix Pdf. It is a book that will change your life for the better. It is a book that will show you how to connect with the divine matrix and use its power each day of your life.

          - -

          Click on the link below and get your copy of Gregg Braden The Divine Matrix Pdf today!

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/llmonitor/benchmarks/app/prompts/new/page.js b/spaces/llmonitor/benchmarks/app/prompts/new/page.js deleted file mode 100644 index bcfd672dfb4467ab2ce8a3eefa5e695cee513ea6..0000000000000000000000000000000000000000 --- a/spaces/llmonitor/benchmarks/app/prompts/new/page.js +++ /dev/null @@ -1,47 +0,0 @@ -import UpvoteBtn from "@/components/UpvoteBtn" -import db from "@/utils/db" -import { cookies } from "next/headers" -import Link from "next/link" - -export default async function Dataset() { - const cookiesList = cookies() - - const logged = cookiesList.has("token") - - // get prompts with selected != true joined with sum of votes for each - const promptsWithVotes = - await db`SELECT prompts.*, COUNT(votes.id) AS votes FROM prompts LEFT JOIN votes ON prompts.id = votes.prompt WHERE prompts.selected IS NOT TRUE GROUP BY prompts.id ORDER BY votes DESC` - - return ( - <> - - - {promptsWithVotes.map((prompt, i) => ( - - - - - - ))} - -
          {i + 1} - {prompt.votes} points -
          - {logged ? ( - - ) : ( - - upvote - - )} -
          -
          {prompt.text}
          -
          - - ) -} diff --git a/spaces/luodian/LoRA-DreamBooth-Training-UI/constants.py b/spaces/luodian/LoRA-DreamBooth-Training-UI/constants.py deleted file mode 100644 index baaebbae71058fbb4faed35fd00e7559305dc409..0000000000000000000000000000000000000000 --- a/spaces/luodian/LoRA-DreamBooth-Training-UI/constants.py +++ /dev/null @@ -1,6 +0,0 @@ -import enum - - -class UploadTarget(enum.Enum): - PERSONAL_PROFILE = 'Personal Profile' - LORA_LIBRARY = 'LoRA Library' diff --git a/spaces/lwchen/CodeFormer/CodeFormer/facelib/detection/align_trans.py b/spaces/lwchen/CodeFormer/CodeFormer/facelib/detection/align_trans.py deleted file mode 100644 index 07f1eb365462c2ec5bbac6d1854c786b6fd6be90..0000000000000000000000000000000000000000 --- a/spaces/lwchen/CodeFormer/CodeFormer/facelib/detection/align_trans.py +++ /dev/null @@ -1,219 +0,0 @@ -import cv2 -import numpy as np - -from .matlab_cp2tform import get_similarity_transform_for_cv2 - -# reference facial points, a list of coordinates (x,y) -REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278], - [33.54930115, 92.3655014], [62.72990036, 92.20410156]] - -DEFAULT_CROP_SIZE = (96, 112) - - -class FaceWarpException(Exception): - - def __str__(self): - return 'In File {}:{}'.format(__file__, super.__str__(self)) - - -def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False): - """ - Function: - ---------- - get reference 5 key points according to crop settings: - 0. Set default crop_size: - if default_square: - crop_size = (112, 112) - else: - crop_size = (96, 112) - 1. Pad the crop_size by inner_padding_factor in each side; - 2. Resize crop_size into (output_size - outer_padding*2), - pad into output_size with outer_padding; - 3. Output reference_5point; - Parameters: - ---------- - @output_size: (w, h) or None - size of aligned face image - @inner_padding_factor: (w_factor, h_factor) - padding factor for inner (w, h) - @outer_padding: (w_pad, h_pad) - each row is a pair of coordinates (x, y) - @default_square: True or False - if True: - default crop_size = (112, 112) - else: - default crop_size = (96, 112); - !!! make sure, if output_size is not None: - (output_size - outer_padding) - = some_scale * (default crop_size * (1.0 + - inner_padding_factor)) - Returns: - ---------- - @reference_5point: 5x2 np.array - each row is a pair of transformed coordinates (x, y) - """ - - tmp_5pts = np.array(REFERENCE_FACIAL_POINTS) - tmp_crop_size = np.array(DEFAULT_CROP_SIZE) - - # 0) make the inner region a square - if default_square: - size_diff = max(tmp_crop_size) - tmp_crop_size - tmp_5pts += size_diff / 2 - tmp_crop_size += size_diff - - if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]): - - return tmp_5pts - - if (inner_padding_factor == 0 and outer_padding == (0, 0)): - if output_size is None: - return tmp_5pts - else: - raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size)) - - # check output size - if not (0 <= inner_padding_factor <= 1.0): - raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)') - - if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None): - output_size = tmp_crop_size * \ - (1 + inner_padding_factor * 2).astype(np.int32) - output_size += np.array(outer_padding) - if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]): - raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])') - - # 1) pad the inner region according inner_padding_factor - if inner_padding_factor > 0: - size_diff = tmp_crop_size * inner_padding_factor * 2 - tmp_5pts += size_diff / 2 - tmp_crop_size += np.round(size_diff).astype(np.int32) - - # 2) resize the padded inner region - size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2 - - if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]: - raise FaceWarpException('Must have (output_size - outer_padding)' - '= some_scale * (crop_size * (1.0 + inner_padding_factor)') - - scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0] - tmp_5pts = tmp_5pts * scale_factor - # size_diff = tmp_crop_size * (scale_factor - min(scale_factor)) - # tmp_5pts = tmp_5pts + size_diff / 2 - tmp_crop_size = size_bf_outer_pad - - # 3) add outer_padding to make output_size - reference_5point = tmp_5pts + np.array(outer_padding) - tmp_crop_size = output_size - - return reference_5point - - -def get_affine_transform_matrix(src_pts, dst_pts): - """ - Function: - ---------- - get affine transform matrix 'tfm' from src_pts to dst_pts - Parameters: - ---------- - @src_pts: Kx2 np.array - source points matrix, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points matrix, each row is a pair of coordinates (x, y) - Returns: - ---------- - @tfm: 2x3 np.array - transform matrix from src_pts to dst_pts - """ - - tfm = np.float32([[1, 0, 0], [0, 1, 0]]) - n_pts = src_pts.shape[0] - ones = np.ones((n_pts, 1), src_pts.dtype) - src_pts_ = np.hstack([src_pts, ones]) - dst_pts_ = np.hstack([dst_pts, ones]) - - A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_) - - if rank == 3: - tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]]) - elif rank == 2: - tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]]) - - return tfm - - -def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'): - """ - Function: - ---------- - apply affine transform 'trans' to uv - Parameters: - ---------- - @src_img: 3x3 np.array - input image - @facial_pts: could be - 1)a list of K coordinates (x,y) - or - 2) Kx2 or 2xK np.array - each row or col is a pair of coordinates (x, y) - @reference_pts: could be - 1) a list of K coordinates (x,y) - or - 2) Kx2 or 2xK np.array - each row or col is a pair of coordinates (x, y) - or - 3) None - if None, use default reference facial points - @crop_size: (w, h) - output face image size - @align_type: transform type, could be one of - 1) 'similarity': use similarity transform - 2) 'cv2_affine': use the first 3 points to do affine transform, - by calling cv2.getAffineTransform() - 3) 'affine': use all points to do affine transform - Returns: - ---------- - @face_img: output face image with size (w, h) = @crop_size - """ - - if reference_pts is None: - if crop_size[0] == 96 and crop_size[1] == 112: - reference_pts = REFERENCE_FACIAL_POINTS - else: - default_square = False - inner_padding_factor = 0 - outer_padding = (0, 0) - output_size = crop_size - - reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding, - default_square) - - ref_pts = np.float32(reference_pts) - ref_pts_shp = ref_pts.shape - if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2: - raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2') - - if ref_pts_shp[0] == 2: - ref_pts = ref_pts.T - - src_pts = np.float32(facial_pts) - src_pts_shp = src_pts.shape - if max(src_pts_shp) < 3 or min(src_pts_shp) != 2: - raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2') - - if src_pts_shp[0] == 2: - src_pts = src_pts.T - - if src_pts.shape != ref_pts.shape: - raise FaceWarpException('facial_pts and reference_pts must have the same shape') - - if align_type == 'cv2_affine': - tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3]) - elif align_type == 'affine': - tfm = get_affine_transform_matrix(src_pts, ref_pts) - else: - tfm = get_similarity_transform_for_cv2(src_pts, ref_pts) - - face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1])) - - return face_img diff --git a/spaces/m3hrdadfi/gpt2-persian-qa/regexes/__init__.py b/spaces/m3hrdadfi/gpt2-persian-qa/regexes/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/copy.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/copy.h deleted file mode 100644 index e22535618efd8c896b8e04ba21b636e4832743ea..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/copy.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - - -template -__host__ __device__ - OutputIterator copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -template -__host__ __device__ - OutputIterator copy_n(thrust::execution_policy &exec, - InputIterator first, - Size n, - OutputIterator result); - - -} // end generic -} // end detail -} // end system -} // end thrust - -#include - diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/train_domain_A.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/train_domain_A.py deleted file mode 100644 index 45004938349d674227b2fac3ad9644370c9eda30..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/train_domain_A.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import time -from collections import OrderedDict -from options.train_options import TrainOptions -from data.data_loader import CreateDataLoader -from models.models import create_da_model -import util.util as util -from util.visualizer import Visualizer -import os -import numpy as np -import torch -import torchvision.utils as vutils -from torch.autograd import Variable - -opt = TrainOptions().parse() - -if opt.debug: - opt.display_freq = 1 - opt.print_freq = 1 - opt.niter = 1 - opt.niter_decay = 0 - opt.max_dataset_size = 10 - -data_loader = CreateDataLoader(opt) -dataset = data_loader.load_data() -dataset_size = len(dataset) * opt.batchSize -print('#training images = %d' % dataset_size) - -path = os.path.join(opt.checkpoints_dir, opt.name, 'model.txt') -visualizer = Visualizer(opt) - -iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt') -if opt.continue_train: - try: - start_epoch, epoch_iter = np.loadtxt(iter_path, delimiter=',', dtype=int) - except: - start_epoch, epoch_iter = 1, 0 - visualizer.print_save('Resuming from epoch %d at iteration %d' % (start_epoch - 1, epoch_iter)) -else: - start_epoch, epoch_iter = 1, 0 - -# opt.which_epoch=start_epoch-1 -model = create_da_model(opt) -fd = open(path, 'w') -fd.write(str(model.module.netG)) -fd.write(str(model.module.netD)) -fd.close() - -total_steps = (start_epoch - 1) * dataset_size + epoch_iter - -display_delta = total_steps % opt.display_freq -print_delta = total_steps % opt.print_freq -save_delta = total_steps % opt.save_latest_freq - -for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1): - epoch_start_time = time.time() - if epoch != start_epoch: - epoch_iter = epoch_iter % dataset_size - for i, data in enumerate(dataset, start=epoch_iter): - iter_start_time = time.time() - total_steps += opt.batchSize - epoch_iter += opt.batchSize - - # whether to collect output images - save_fake = total_steps % opt.display_freq == display_delta - - ############## Forward Pass ###################### - losses, generated = model(Variable(data['label']), Variable(data['inst']), - Variable(data['image']), Variable(data['feat']), infer=save_fake) - - # sum per device losses - losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses] - loss_dict = dict(zip(model.module.loss_names, losses)) - - # calculate final loss scalar - loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5 - loss_featD=(loss_dict['featD_fake'] + loss_dict['featD_real']) * 0.5 - loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0) + loss_dict['G_KL'] + loss_dict['G_featD'] - - ############### Backward Pass #################### - # update generator weights - model.module.optimizer_G.zero_grad() - loss_G.backward() - model.module.optimizer_G.step() - - # update discriminator weights - model.module.optimizer_D.zero_grad() - loss_D.backward() - model.module.optimizer_D.step() - - model.module.optimizer_featD.zero_grad() - loss_featD.backward() - model.module.optimizer_featD.step() - - # call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"]) - - ############## Display results and errors ########## - ### print out errors - if total_steps % opt.print_freq == print_delta: - errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} - t = (time.time() - iter_start_time) / opt.batchSize - visualizer.print_current_errors(epoch, epoch_iter, errors, t, model.module.old_lr) - visualizer.plot_current_errors(errors, total_steps) - - ### display output images - if save_fake: - - if not os.path.exists(opt.outputs_dir + opt.name): - os.makedirs(opt.outputs_dir + opt.name) - imgs_num = data['label'].shape[0] - imgs = torch.cat((data['label'], generated.data.cpu(), data['image']), 0) - - imgs = (imgs + 1.) / 2.0 - - try: - image_grid = vutils.save_image(imgs, opt.outputs_dir + opt.name + '/' + str(epoch) + '_' + str( - total_steps) + '.png', - nrow=imgs_num, padding=0, normalize=True) - except OSError as err: - print(err) - - - if epoch_iter >= dataset_size: - break - - # end of epoch - iter_end_time = time.time() - print('End of epoch %d / %d \t Time Taken: %d sec' % - (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) - - ### save model for this epoch - if epoch % opt.save_epoch_freq == 0: - print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) - model.module.save('latest') - model.module.save(epoch) - np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d') - - ### instead of only training the local enhancer, train the entire network after certain iterations - if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global): - model.module.update_fixed_params() - - ### linearly decay learning rate after certain iterations - if epoch > opt.niter: - model.module.update_learning_rate() - diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/compression/encodec_base_24khz.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/compression/encodec_base_24khz.py deleted file mode 100644 index 117b2b1e496ca31b3d614672b472c9213cedb4ad..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/compression/encodec_base_24khz.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid shows how to train a base causal EnCodec model at 24 kHz. -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=8, partition=partitions) - # base causal EnCodec trained on monophonic audio sampled at 24 kHz - launcher.bind_(solver='compression/encodec_base_24khz') - # replace this by the desired dataset - launcher.bind_(dset='audio/example') - # launch xp - launcher() diff --git a/spaces/maxmon/auto_anno/local_config.py b/spaces/maxmon/auto_anno/local_config.py deleted file mode 100644 index ef3bc5139204fad0b267bc0a91b81bea76803975..0000000000000000000000000000000000000000 --- a/spaces/maxmon/auto_anno/local_config.py +++ /dev/null @@ -1 +0,0 @@ -openai_key = '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' diff --git a/spaces/mbazaNLP/Speech-recognition-east-african-languages/app.py b/spaces/mbazaNLP/Speech-recognition-east-african-languages/app.py deleted file mode 100644 index db6d3ae96125fe47fbbfc49ec8f2896d078bcd8a..0000000000000000000000000000000000000000 --- a/spaces/mbazaNLP/Speech-recognition-east-african-languages/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import gradio as gr -import nemo.collections.asr as nemo_asr -from pydub import AudioSegment -import pyaudioconvert as pac - - -hf_model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained( - model_name="mbazaNLP/stt_rw_sw_lg_conformer_ctc_large") - -def convert (audio): - file_name = audio.name - if file_name.endswith("mp3") or file_name.endswith("wav") or file_name.endswith("ogg"): - if file_name.endswith("mp3"): - sound = AudioSegment.from_mp3(audio.name) - sound.export(audio.name, format="wav") - elif file_name.endswith("ogg"): - sound = AudioSegment.from_ogg(audio.name) - sound.export(audio.name, format="wav") - else: - return False - pac.convert_wav_to_16bit_mono(audio.name,audio.name) - return True - -def transcribe(audio, audio_microphone): - audio = audio_microphone if audio_microphone else audio - if convert(audio)== False: - return "The format must be mp3,wav and ogg" - result= hf_model.transcribe([audio.name]) - return result[0] -gradio_ui = gr.Interface( - fn=transcribe, - title="East african languages Speech Recognition", - description="Upload an audio clip or record from browser using microphone, and let AI do the hard work of transcribing. The supported languages are Kinyarwanda, Swahili and Luganda", - inputs=[gr.inputs.Audio(label="Upload Audio File", type="file", optional=True), gr.inputs.Audio(source="microphone", type="file", optional=True, label="Record from microphone")], - outputs=[gr.outputs.Textbox(label="Recognized speech")] -) -gradio_ui.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/merve/data-leak/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/test.html b/spaces/merve/data-leak/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/test.html deleted file mode 100644 index bd51a96a0e44f236d2fef909e99ce49251683407..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/test.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - -
          - - - - - diff --git a/spaces/merve/sorting_hat/app.py b/spaces/merve/sorting_hat/app.py deleted file mode 100644 index 77347696e408b7c281353e7204166d950482d6c3..0000000000000000000000000000000000000000 --- a/spaces/merve/sorting_hat/app.py +++ /dev/null @@ -1,23 +0,0 @@ -from huggingface_hub import from_pretrained_keras -import gradio as gr -import numpy as np -import tensorflow - -model = from_pretrained_keras("merve/riddikulus") - -labels = {0:"Ravenclaw 🦅💙 ", 1:"Gryffindor 🦁", 2:"Ravenclaw 🦅💙",3:"Slytherin🐍💚", - 4:"Hufflepuff 🦡💛", 5:"Death eater detected! ", 6: "Hufflepuff 🦡💛", 7:"Slytherin🐍💚", - 8:"Ravenclaw 🦅💙", 9:"Gryffindor 🦁"} - -canvas = gr.inputs.Image(source="canvas", shape=(28,28)) -text = gr.outputs.Textbox() -def infer(image): - cls = np.argmax(model.predict(np.expand_dims(image, axis = 0)[:,:,:,1])) - if cls == 5: - output = "Death eater detected! 💀" - else: - cls = labels[cls] - output = f"Welcome to {cls}" - return output - -gr.Interface(infer, inputs=[canvas], outputs=[text], title="Welcome to Hogwarts Sorting Hat!", description="Draw something and let the sorting hat sort you! 🎩 ").launch() \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/public/uncertainty-calibration/util.js b/spaces/merve/uncertainty-calibration/public/uncertainty-calibration/util.js deleted file mode 100644 index a0ce5b12a2a642f1186cc4004e90b046a89611f8..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/uncertainty-calibration/util.js +++ /dev/null @@ -1,38 +0,0 @@ -window.initUtil = function(){ - function addAxisLabel(c, xText, yText, xOffset=40, yOffset=-40){ - c.svg.select('.x').append('g') - .translate([c.width/2, xOffset]) - .append('text.axis-label') - .text(xText) - .at({textAnchor: 'middle'}) - .st({fill: '#000', fontSize: 14, fontFamily: 'sans-serif'}) - - c.svg.select('.y') - .append('g') - .translate([yOffset, c.height/2]) - .append('text.axis-label') - .text(yText) - .at({textAnchor: 'middle', transform: 'rotate(-90)'}) - .st({fill: '#000', fontSize: 14, fontFamily: 'sans-serif'}) - } - - function ggPlotBg(c, isBlack=true){ - if (isBlack){ - c.svg.append('rect.bg-rect') - .at({width: c.width, height: c.height, fill: '#eee'}) - .lower() - } - - c.svg.selectAll('.tick').selectAll('line').remove() - c.svg.selectAll('.y .tick') - .append('path').at({d: 'M 0 0 H ' + c.width, stroke: '#fff', strokeWidth: 1}) - c.svg.selectAll('.y text').at({x: -3}) - c.svg.selectAll('.x .tick') - .append('path').at({d: 'M 0 0 V -' + c.height, stroke: '#fff', strokeWidth: 1}) - } - - - return {addAxisLabel, ggPlotBg} -} - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/metricspace/OcTra/nnet/models_vc.py b/spaces/metricspace/OcTra/nnet/models_vc.py deleted file mode 100644 index 312741e90cd1fb2df816f595110e5b2503f378d9..0000000000000000000000000000000000000000 --- a/spaces/metricspace/OcTra/nnet/models_vc.py +++ /dev/null @@ -1,350 +0,0 @@ -import torch - -from torch import nn -from torch.nn import functional as F - -from nnet import commons -from nnet import modules - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from nnet.commons import init_weights, get_padding - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - use_spk, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.use_spk = use_spk - - self.enc_p = Encoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if not self.use_spk: - self.enc_spk = SpeakerEncoder(model_hidden_size=gin_channels, model_embedding_size=gin_channels) - - def forward(self, c, spec, g=None, mel=None, c_lengths=None, spec_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if spec_lengths == None: - spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device) - - if not self.use_spk: - g = self.enc_spk(mel.transpose(1,2)) - g = g.unsqueeze(-1) - - _, m_p, logs_p, _ = self.enc_p(c, c_lengths) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - z_p = self.flow(z, spec_mask, g=g) - - z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, c, g=None, mel=None, c_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if not self.use_spk: - g = self.enc_spk.embed_utterance(mel.transpose(1,2)) - g = g.unsqueeze(-1) - - z_p, m_p, logs_p, c_mask = self.enc_p(c, c_lengths) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g) - - return o diff --git a/spaces/mfrashad/CharacterGAN/netdissect/__main__.py b/spaces/mfrashad/CharacterGAN/netdissect/__main__.py deleted file mode 100644 index e2bd9f630eaa0f45a6a201adcf356a1e092050cb..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/netdissect/__main__.py +++ /dev/null @@ -1,408 +0,0 @@ -import torch, sys, os, argparse, textwrap, numbers, numpy, json, PIL -from torchvision import transforms -from torch.utils.data import TensorDataset -from netdissect.progress import verbose_progress, print_progress -from netdissect import InstrumentedModel, BrodenDataset, dissect -from netdissect import MultiSegmentDataset, GeneratorSegRunner -from netdissect import ImageOnlySegRunner -from netdissect.parallelfolder import ParallelImageFolders -from netdissect.zdataset import z_dataset_for_model -from netdissect.autoeval import autoimport_eval -from netdissect.modelconfig import create_instrumented_model -from netdissect.pidfile import exit_if_job_done, mark_job_done - -help_epilog = '''\ -Example: to dissect three layers of the pretrained alexnet in torchvision: - -python -m netdissect \\ - --model "torchvision.models.alexnet(pretrained=True)" \\ - --layers features.6:conv3 features.8:conv4 features.10:conv5 \\ - --imgsize 227 \\ - --outdir dissect/alexnet-imagenet - -To dissect a progressive GAN model: - -python -m netdissect \\ - --model "proggan.from_pth_file('model/churchoutdoor.pth')" \\ - --gan -''' - -def main(): - # Training settings - def strpair(arg): - p = tuple(arg.split(':')) - if len(p) == 1: - p = p + p - return p - def intpair(arg): - p = arg.split(',') - if len(p) == 1: - p = p + p - return tuple(int(v) for v in p) - - parser = argparse.ArgumentParser(description='Net dissect utility', - prog='python -m netdissect', - epilog=textwrap.dedent(help_epilog), - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('--model', type=str, default=None, - help='constructor for the model to test') - parser.add_argument('--pthfile', type=str, default=None, - help='filename of .pth file for the model') - parser.add_argument('--unstrict', action='store_true', default=False, - help='ignore unexpected pth parameters') - parser.add_argument('--submodule', type=str, default=None, - help='submodule to load from pthfile') - parser.add_argument('--outdir', type=str, default='dissect', - help='directory for dissection output') - parser.add_argument('--layers', type=strpair, nargs='+', - help='space-separated list of layer names to dissect' + - ', in the form layername[:reportedname]') - parser.add_argument('--segments', type=str, default='dataset/broden', - help='directory containing segmentation dataset') - parser.add_argument('--segmenter', type=str, default=None, - help='constructor for asegmenter class') - parser.add_argument('--download', action='store_true', default=False, - help='downloads Broden dataset if needed') - parser.add_argument('--imagedir', type=str, default=None, - help='directory containing image-only dataset') - parser.add_argument('--imgsize', type=intpair, default=(227, 227), - help='input image size to use') - parser.add_argument('--netname', type=str, default=None, - help='name for network in generated reports') - parser.add_argument('--meta', type=str, nargs='+', - help='json files of metadata to add to report') - parser.add_argument('--merge', type=str, - help='json file of unit data to merge in report') - parser.add_argument('--examples', type=int, default=20, - help='number of image examples per unit') - parser.add_argument('--size', type=int, default=10000, - help='dataset subset size to use') - parser.add_argument('--batch_size', type=int, default=100, - help='batch size for forward pass') - parser.add_argument('--num_workers', type=int, default=24, - help='number of DataLoader workers') - parser.add_argument('--quantile_threshold', type=strfloat, default=None, - choices=[FloatRange(0.0, 1.0), 'iqr'], - help='quantile to use for masks') - parser.add_argument('--no-labels', action='store_true', default=False, - help='disables labeling of units') - parser.add_argument('--maxiou', action='store_true', default=False, - help='enables maxiou calculation') - parser.add_argument('--covariance', action='store_true', default=False, - help='enables covariance calculation') - parser.add_argument('--rank_all_labels', action='store_true', default=False, - help='include low-information labels in rankings') - parser.add_argument('--no-images', action='store_true', default=False, - help='disables generation of unit images') - parser.add_argument('--no-report', action='store_true', default=False, - help='disables generation report summary') - parser.add_argument('--no-cuda', action='store_true', default=False, - help='disables CUDA usage') - parser.add_argument('--gen', action='store_true', default=False, - help='test a generator model (e.g., a GAN)') - parser.add_argument('--gan', action='store_true', default=False, - help='synonym for --gen') - parser.add_argument('--perturbation', default=None, - help='filename of perturbation attack to apply') - parser.add_argument('--add_scale_offset', action='store_true', default=None, - help='offsets masks according to stride and padding') - parser.add_argument('--quiet', action='store_true', default=False, - help='silences console output') - if len(sys.argv) == 1: - parser.print_usage(sys.stderr) - sys.exit(1) - args = parser.parse_args() - args.images = not args.no_images - args.report = not args.no_report - args.labels = not args.no_labels - if args.gan: - args.gen = args.gan - - # Set up console output - verbose_progress(not args.quiet) - - # Exit right away if job is already done or being done. - if args.outdir is not None: - exit_if_job_done(args.outdir) - - # Speed up pytorch - torch.backends.cudnn.benchmark = True - - # Special case: download flag without model to test. - if args.model is None and args.download: - from netdissect.broden import ensure_broden_downloaded - for resolution in [224, 227, 384]: - ensure_broden_downloaded(args.segments, resolution, 1) - from netdissect.segmenter import ensure_upp_segmenter_downloaded - ensure_upp_segmenter_downloaded('dataset/segmodel') - sys.exit(0) - - # Help if broden is not present - if not args.gen and not args.imagedir and not os.path.isdir(args.segments): - print_progress('Segmentation dataset not found at %s.' % args.segments) - print_progress('Specify dataset directory using --segments [DIR]') - print_progress('To download Broden, run: netdissect --download') - sys.exit(1) - - # Default segmenter class - if args.gen and args.segmenter is None: - args.segmenter = ("netdissect.segmenter.UnifiedParsingSegmenter(" + - "segsizes=[256], segdiv='quad')") - - # Default threshold - if args.quantile_threshold is None: - if args.gen: - args.quantile_threshold = 'iqr' - else: - args.quantile_threshold = 0.005 - - # Set up CUDA - args.cuda = not args.no_cuda and torch.cuda.is_available() - if args.cuda: - torch.backends.cudnn.benchmark = True - - # Construct the network with specified layers instrumented - if args.model is None: - print_progress('No model specified') - sys.exit(1) - model = create_instrumented_model(args) - - # Update any metadata from files, if any - meta = getattr(model, 'meta', {}) - if args.meta: - for mfilename in args.meta: - with open(mfilename) as f: - meta.update(json.load(f)) - - # Load any merge data from files - mergedata = None - if args.merge: - with open(args.merge) as f: - mergedata = json.load(f) - - # Set up the output directory, verify write access - if args.outdir is None: - args.outdir = os.path.join('dissect', type(model).__name__) - exit_if_job_done(args.outdir) - print_progress('Writing output into %s.' % args.outdir) - os.makedirs(args.outdir, exist_ok=True) - train_dataset = None - - if not args.gen: - # Load dataset for classifier case. - # Load perturbation - perturbation = numpy.load(args.perturbation - ) if args.perturbation else None - segrunner = None - - # Load broden dataset - if args.imagedir is not None: - dataset = try_to_load_images(args.imagedir, args.imgsize, - perturbation, args.size) - segrunner = ImageOnlySegRunner(dataset) - else: - dataset = try_to_load_broden(args.segments, args.imgsize, 1, - perturbation, args.download, args.size) - if dataset is None: - dataset = try_to_load_multiseg(args.segments, args.imgsize, - perturbation, args.size) - if dataset is None: - print_progress('No segmentation dataset found in %s', - args.segments) - print_progress('use --download to download Broden.') - sys.exit(1) - else: - # For segmenter case the dataset is just a random z - dataset = z_dataset_for_model(model, args.size) - train_dataset = z_dataset_for_model(model, args.size, seed=2) - segrunner = GeneratorSegRunner(autoimport_eval(args.segmenter)) - - # Run dissect - dissect(args.outdir, model, dataset, - train_dataset=train_dataset, - segrunner=segrunner, - examples_per_unit=args.examples, - netname=args.netname, - quantile_threshold=args.quantile_threshold, - meta=meta, - merge=mergedata, - make_images=args.images, - make_labels=args.labels, - make_maxiou=args.maxiou, - make_covariance=args.covariance, - make_report=args.report, - make_row_images=args.images, - make_single_images=True, - rank_all_labels=args.rank_all_labels, - batch_size=args.batch_size, - num_workers=args.num_workers, - settings=vars(args)) - - # Mark the directory so that it's not done again. - mark_job_done(args.outdir) - -class AddPerturbation(object): - def __init__(self, perturbation): - self.perturbation = perturbation - - def __call__(self, pic): - if self.perturbation is None: - return pic - # Convert to a numpy float32 array - npyimg = numpy.array(pic, numpy.uint8, copy=False - ).astype(numpy.float32) - # Center the perturbation - oy, ox = ((self.perturbation.shape[d] - npyimg.shape[d]) // 2 - for d in [0, 1]) - npyimg += self.perturbation[ - oy:oy+npyimg.shape[0], ox:ox+npyimg.shape[1]] - # Pytorch conventions: as a float it should be [0..1] - npyimg.clip(0, 255, npyimg) - return npyimg / 255.0 - -def test_dissection(): - verbose_progress(True) - from torchvision.models import alexnet - from torchvision import transforms - model = InstrumentedModel(alexnet(pretrained=True)) - model.eval() - # Load an alexnet - model.retain_layers([ - ('features.0', 'conv1'), - ('features.3', 'conv2'), - ('features.6', 'conv3'), - ('features.8', 'conv4'), - ('features.10', 'conv5') ]) - # load broden dataset - bds = BrodenDataset('dataset/broden', - transform=transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV)]), - size=100) - # run dissect - dissect('dissect/test', model, bds, - examples_per_unit=10) - -def try_to_load_images(directory, imgsize, perturbation, size): - # Load plain image dataset - # TODO: allow other normalizations. - return ParallelImageFolders( - [directory], - transform=transforms.Compose([ - transforms.Resize(imgsize), - AddPerturbation(perturbation), - transforms.ToTensor(), - transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV)]), - size=size) - -def try_to_load_broden(directory, imgsize, broden_version, perturbation, - download, size): - # Load broden dataset - ds_resolution = (224 if max(imgsize) <= 224 else - 227 if max(imgsize) <= 227 else 384) - if not os.path.isfile(os.path.join(directory, - 'broden%d_%d' % (broden_version, ds_resolution), 'index.csv')): - return None - return BrodenDataset(directory, - resolution=ds_resolution, - download=download, - broden_version=broden_version, - transform=transforms.Compose([ - transforms.Resize(imgsize), - AddPerturbation(perturbation), - transforms.ToTensor(), - transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV)]), - size=size) - -def try_to_load_multiseg(directory, imgsize, perturbation, size): - if not os.path.isfile(os.path.join(directory, 'labelnames.json')): - return None - minsize = min(imgsize) if hasattr(imgsize, '__iter__') else imgsize - return MultiSegmentDataset(directory, - transform=(transforms.Compose([ - transforms.Resize(minsize), - transforms.CenterCrop(imgsize), - AddPerturbation(perturbation), - transforms.ToTensor(), - transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV)]), - transforms.Compose([ - transforms.Resize(minsize, interpolation=PIL.Image.NEAREST), - transforms.CenterCrop(imgsize)])), - size=size) - -def add_scale_offset_info(model, layer_names): - ''' - Creates a 'scale_offset' property on the model which guesses - how to offset the featuremap, in cases where the convolutional - padding does not exacly correspond to keeping featuremap pixels - centered on the downsampled regions of the input. This mainly - shows up in AlexNet: ResNet and VGG pad convolutions to keep - them centered and do not need this. - ''' - model.scale_offset = {} - seen = set() - sequence = [] - aka_map = {} - for name in layer_names: - aka = name - if not isinstance(aka, str): - name, aka = name - aka_map[name] = aka - for name, layer in model.named_modules(): - sequence.append(layer) - if name in aka_map: - seen.add(name) - aka = aka_map[name] - model.scale_offset[aka] = sequence_scale_offset(sequence) - for name in aka_map: - assert name in seen, ('Layer %s not found' % name) - -def dilation_scale_offset(dilations): - '''Composes a list of (k, s, p) into a single total scale and offset.''' - if len(dilations) == 0: - return (1, 0) - scale, offset = dilation_scale_offset(dilations[1:]) - kernel, stride, padding = dilations[0] - scale *= stride - offset *= stride - offset += (kernel - 1) / 2.0 - padding - return scale, offset - -def dilations(modulelist): - '''Converts a list of modules to (kernel_size, stride, padding)''' - result = [] - for module in modulelist: - settings = tuple(getattr(module, n, d) - for n, d in (('kernel_size', 1), ('stride', 1), ('padding', 0))) - settings = (((s, s) if not isinstance(s, tuple) else s) - for s in settings) - if settings != ((1, 1), (1, 1), (0, 0)): - result.append(zip(*settings)) - return zip(*result) - -def sequence_scale_offset(modulelist): - '''Returns (yscale, yoffset), (xscale, xoffset) given a list of modules''' - return tuple(dilation_scale_offset(d) for d in dilations(modulelist)) - - -def strfloat(s): - try: - return float(s) - except: - return s - -class FloatRange(object): - def __init__(self, start, end): - self.start = start - self.end = end - def __eq__(self, other): - return isinstance(other, float) and self.start <= other <= self.end - def __repr__(self): - return '[%g-%g]' % (self.start, self.end) - -# Many models use this normalization. -IMAGE_MEAN = [0.485, 0.456, 0.406] -IMAGE_STDEV = [0.229, 0.224, 0.225] - -if __name__ == '__main__': - main() diff --git a/spaces/mfrashad/ClothingGAN/models/biggan/pytorch_biggan/scripts/download_tf_hub_models.sh b/spaces/mfrashad/ClothingGAN/models/biggan/pytorch_biggan/scripts/download_tf_hub_models.sh deleted file mode 100644 index 57655fbd4b77791f03d72b3dfeb3bbb89ccc2fdc..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/models/biggan/pytorch_biggan/scripts/download_tf_hub_models.sh +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2019-present, Thomas Wolf, Huggingface Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# - -set -e -set -x - -models="128 256 512" - -mkdir -p models/model_128 -mkdir -p models/model_256 -mkdir -p models/model_512 - -# Download TF Hub models. -for model in $models -do - curl -L "https://tfhub.dev/deepmind/biggan-deep-$model/1?tf-hub-format=compressed" | tar -zxvC models/model_$model -done diff --git a/spaces/mithril-security/blind_chat/src/lib/server/websearch/summarizeWeb.ts b/spaces/mithril-security/blind_chat/src/lib/server/websearch/summarizeWeb.ts deleted file mode 100644 index 2998f79e6939f16f6d5c6ff2967bead5729470e7..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/lib/server/websearch/summarizeWeb.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { HF_ACCESS_TOKEN } from "$env/static/private"; -import { HfInference } from "@huggingface/inference"; -import { defaultModel } from "$lib/server/models"; -import type { BackendModel } from "../models"; -import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint"; - -export async function summarizeWeb(content: string, query: string, model: BackendModel) { - // if HF_ACCESS_TOKEN is set, we use a HF dedicated endpoint for summarization - try { - if (HF_ACCESS_TOKEN) { - const summary = ( - await new HfInference(HF_ACCESS_TOKEN).summarization({ - model: "facebook/bart-large-cnn", - inputs: content, - parameters: { - max_length: 512, - }, - }) - ).summary_text; - return summary; - } - } catch (e) { - console.log(e); - } - - // else we use the LLM to generate a summary - const summaryPrompt = defaultModel.webSearchSummaryPromptRender({ - answer: content - .split(" ") - .slice(0, model.parameters?.truncate ?? 0) - .join(" "), - query: query, - }); - const summary = await generateFromDefaultEndpoint(summaryPrompt).then((txt: string) => - txt.trim() - ); - - return summary; -} diff --git a/spaces/monra/freegpt-webui/g4f/Provider/Providers/Vercel.py b/spaces/monra/freegpt-webui/g4f/Provider/Providers/Vercel.py deleted file mode 100644 index e5df9cf017e4c1a265f5c9d5e48eb5c10a56e60a..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui/g4f/Provider/Providers/Vercel.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -import json -import base64 -import execjs -import queue -import threading - -from curl_cffi import requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://play.vercel.ai' -supports_stream = True -needs_auth = False - -models = { - 'claude-instant-v1': 'anthropic:claude-instant-v1', - 'claude-v1': 'anthropic:claude-v1', - 'alpaca-7b': 'replicate:replicate/alpaca-7b', - 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b', - 'bloom': 'huggingface:bigscience/bloom', - 'bloomz': 'huggingface:bigscience/bloomz', - 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl', - 'flan-ul2': 'huggingface:google/flan-ul2', - 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b', - 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - 'santacoder': 'huggingface:bigcode/santacoder', - 'command-medium-nightly': 'cohere:command-medium-nightly', - 'command-xlarge-nightly': 'cohere:command-xlarge-nightly', - 'code-cushman-001': 'openai:code-cushman-001', - 'code-davinci-002': 'openai:code-davinci-002', - 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo', - 'text-ada-001': 'openai:text-ada-001', - 'text-babbage-001': 'openai:text-babbage-001', - 'text-curie-001': 'openai:text-curie-001', - 'text-davinci-002': 'openai:text-davinci-002', - 'text-davinci-003': 'openai:text-davinci-003' -} -model = models.keys() - -vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': { - 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}} - - -# based on https://github.com/ading2210/vercel-llm-api // modified -class Client: - def __init__(self): - self.session = requests.Session() - self.headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US,en;q=0.5', - 'Te': 'trailers', - 'Upgrade-Insecure-Requests': '1' - } - self.session.headers.update(self.headers) - - def get_token(self): - b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text - data = json.loads(base64.b64decode(b64)) - - code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % ( - data['c'], data['a']) - - token_string = json.dumps(separators=(',', ':'), - obj={'r': execjs.compile(code).call('token'), 't': data['t']}) - - return base64.b64encode(token_string.encode()).decode() - - def get_default_params(self, model_id): - return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()} - - def generate(self, model_id: str, prompt: str, params: dict = {}): - if not ':' in model_id: - model_id = models[model_id] - - defaults = self.get_default_params(model_id) - - payload = defaults | params | { - 'prompt': prompt, - 'model': model_id, - } - - headers = self.headers | { - 'Accept-Encoding': 'gzip, deflate, br', - 'Custom-Encoding': self.get_token(), - 'Host': 'sdk.vercel.ai', - 'Origin': 'https://sdk.vercel.ai', - 'Referrer': 'https://sdk.vercel.ai', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - } - - chunks_queue = queue.Queue() - error = None - response = None - - def callback(data): - chunks_queue.put(data.decode()) - - def request_thread(): - nonlocal response, error - for _ in range(3): - try: - response = self.session.post('https://sdk.vercel.ai/api/generate', - json=payload, headers=headers, content_callback=callback) - response.raise_for_status() - - except Exception as e: - if _ == 2: - error = e - - else: - continue - - thread = threading.Thread(target=request_thread, daemon=True) - thread.start() - - text = '' - index = 0 - while True: - try: - chunk = chunks_queue.get(block=True, timeout=0.1) - - except queue.Empty: - if error: - raise error - - elif response: - break - - else: - continue - - text += chunk - lines = text.split('\n') - - if len(lines) - 1 > index: - new = lines[index:-1] - for word in new: - yield json.loads(word) - index = len(lines) - 1 - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - yield 'Vercel is currently not working.' - return - - conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n' - - for message in messages: - conversation += '%s: %s\n' % (message['role'], message['content']) - - conversation += 'assistant: ' - - completion = Client().generate(model, conversation) - - for token in completion: - yield token - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py deleted file mode 100644 index a30254604311a488a1d4959f941051890ed32b2e..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -from pathlib import Path -from collections import defaultdict -from typing import List, Dict, Tuple - -import pandas as pd -import numpy as np -import torchaudio -from tqdm import tqdm - -from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv - - -log = logging.getLogger(__name__) - -SPLITS = ["train", "dev", "test"] - - -def get_top_n( - root: Path, n_speakers: int = 10, min_n_tokens: int = 5 -) -> pd.DataFrame: - df = load_df_from_tsv(root / "validated.tsv") - df["n_tokens"] = [len(s.split()) for s in df["sentence"]] - df = df[df["n_tokens"] >= min_n_tokens] - df["n_frames"] = [ - torchaudio.info((root / "clips" / p).as_posix()).num_frames - for p in tqdm(df["path"]) - ] - df["id"] = [Path(p).stem for p in df["path"]] - total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"]) - total_duration_ms = total_duration_ms.sort_values("sum", ascending=False) - - top_n_total_duration_ms = total_duration_ms.head(n_speakers) - top_n_client_ids = set(top_n_total_duration_ms.index.tolist()) - df_top_n = df[df["client_id"].isin(top_n_client_ids)] - return df_top_n - - -def get_splits( - df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0 -) -> Tuple[Dict[str, str], List[str]]: - np.random.seed(rand_seed) - dev_split_ratio = (1. - train_split_ratio) / 3 - grouped = list(df.groupby("client_id")) - id_to_split = {} - for _, cur_df in tqdm(grouped): - cur_n_examples = len(cur_df) - if speaker_in_all_splits and cur_n_examples < 3: - continue - cur_n_train = int(cur_n_examples * train_split_ratio) - cur_n_dev = int(cur_n_examples * dev_split_ratio) - cur_n_test = cur_n_examples - cur_n_dev - cur_n_train - if speaker_in_all_splits and cur_n_dev * cur_n_test == 0: - cur_n_dev, cur_n_test = 1, 1 - cur_n_train = cur_n_examples - cur_n_dev - cur_n_test - cur_indices = cur_df.index.tolist() - cur_shuffled_indices = np.random.permutation(cur_n_examples) - cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices] - cur_indices_by_split = { - "train": cur_shuffled_indices[:cur_n_train], - "dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev], - "test": cur_shuffled_indices[cur_n_train + cur_n_dev:] - } - for split in SPLITS: - for i in cur_indices_by_split[split]: - id_ = df["id"].loc[i] - id_to_split[id_] = split - return id_to_split, sorted(df["client_id"].unique()) - - -def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000): - out_root = root / "wav" - out_root.mkdir(exist_ok=True, parents=True) - print("Converting to WAV...") - for n in tqdm(filenames): - in_path = (root / "clips" / n).as_posix() - waveform, sr = torchaudio.load(in_path) - converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor( - waveform, sr, [["rate", str(target_sr)], ["channels", "1"]] - ) - out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix() - torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S", - bits_per_sample=16) - - -def process(args): - data_root = Path(args.data_root).absolute() / args.lang - - # Generate TSV manifest - print("Generating manifest...") - - df_top_n = get_top_n(data_root) - id_to_split, speakers = get_splits(df_top_n) - - if args.convert_to_wav: - convert_to_wav(data_root, df_top_n["path"].tolist()) - - manifest_by_split = {split: defaultdict(list) for split in SPLITS} - for sample in tqdm(df_top_n.to_dict(orient="index").values()): - sample_id = sample["id"] - split = id_to_split[sample_id] - manifest_by_split[split]["id"].append(sample_id) - if args.convert_to_wav: - audio_path = data_root / "wav" / f"{sample_id}.wav" - else: - audio_path = data_root / "clips" / f"{sample_id}.mp3" - manifest_by_split[split]["audio"].append(audio_path.as_posix()) - manifest_by_split[split]["n_frames"].append(sample["n_frames"]) - manifest_by_split[split]["tgt_text"].append(sample["sentence"]) - manifest_by_split[split]["speaker"].append(sample["client_id"]) - manifest_by_split[split]["src_text"].append(sample["sentence"]) - - output_root = Path(args.output_manifest_root).absolute() - output_root.mkdir(parents=True, exist_ok=True) - for split in SPLITS: - save_df_to_tsv( - pd.DataFrame.from_dict(manifest_by_split[split]), - output_root / f"{split}.audio.tsv" - ) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--data-root", "-d", required=True, type=str) - parser.add_argument("--output-manifest-root", "-m", required=True, type=str) - parser.add_argument("--lang", "-l", required=True, type=str) - parser.add_argument("--convert-to-wav", action="store_true") - args = parser.parse_args() - - process(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/roberta/model_xlmr.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/roberta/model_xlmr.py deleted file mode 100644 index cf6e354d53b918dd4c7c78bfcd38ac0d63cab3bd..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/roberta/model_xlmr.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Unsupervised Cross-lingual Representation Learning at Scale -""" - -from fairseq.models import register_model - -from .hub_interface import RobertaHubInterface -from .model import RobertaModel - - -@register_model("xlmr") -class XLMRModel(RobertaModel): - @classmethod - def hub_models(cls): - return { - "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz", - "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz", - "xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz", - "xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz", - } - - @classmethod - def from_pretrained( - cls, - model_name_or_path, - checkpoint_file="model.pt", - data_name_or_path=".", - bpe="sentencepiece", - **kwargs - ): - from fairseq import hub_utils - - x = hub_utils.from_pretrained( - model_name_or_path, - checkpoint_file, - data_name_or_path, - archive_map=cls.hub_models(), - bpe=bpe, - load_checkpoint_heads=True, - **kwargs, - ) - return RobertaHubInterface(x["args"], x["task"], x["models"][0]) diff --git a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/src/components/helpers/maskUtils.tsx b/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/src/components/helpers/maskUtils.tsx deleted file mode 100644 index 709c77e28d2f3fbe457742dcfd2dccf28923e4a5..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/src/components/helpers/maskUtils.tsx +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// All rights reserved. - -// This source code is licensed under the license found in the -// LICENSE file in the root directory of this source tree. - -// Convert the onnx model mask prediction to ImageData -function arrayToImageData(input: any, width: number, height: number) { - const [r, g, b, a] = [0, 114, 189, 255]; // the masks's blue color - const arr = new Uint8ClampedArray(4 * width * height).fill(0); - for (let i = 0; i < input.length; i++) { - - // Threshold the onnx model mask prediction at 0.0 - // This is equivalent to thresholding the mask using predictor.model.mask_threshold - // in python - if (input[i] > 0.0) { - arr[4 * i + 0] = r; - arr[4 * i + 1] = g; - arr[4 * i + 2] = b; - arr[4 * i + 3] = a; - } - } - return new ImageData(arr, height, width); -} - -// Use a Canvas element to produce an image from ImageData -function imageDataToImage(imageData: ImageData) { - const canvas = imageDataToCanvas(imageData); - const image = new Image(); - image.src = canvas.toDataURL(); - return image; -} - -// Canvas elements can be created from ImageData -function imageDataToCanvas(imageData: ImageData) { - const canvas = document.createElement("canvas"); - const ctx = canvas.getContext("2d"); - canvas.width = imageData.width; - canvas.height = imageData.height; - ctx?.putImageData(imageData, 0, 0); - return canvas; -} - -// Convert the onnx model mask output to an HTMLImageElement -export function onnxMaskToImage(input: any, width: number, height: number) { - return imageDataToImage(arrayToImageData(input, width, height)); -} diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ACDC The Complete Collection Collection 2012torrent.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ACDC The Complete Collection Collection 2012torrent.md deleted file mode 100644 index 46580d52cb7ae06d1078ef32b4777aacacff572a..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ACDC The Complete Collection Collection 2012torrent.md +++ /dev/null @@ -1,14 +0,0 @@ -
          -

          AC/DC: The Complete Collection Review

          -

          AC/DC is one of the most iconic rock bands of all time, with a legacy of hard-hitting songs, electrifying performances and unforgettable albums. The Complete Collection is a 2012 box set that contains all 16 studio albums, four live albums and three EPs by the Australian legends, spanning from 1974 to 2008. It is a must-have for any fan of AC/DC or classic rock in general.

          -

          The Complete Collection showcases the evolution of AC/DC's sound and style, from the raw and bluesy early days with Bon Scott on vocals, to the more polished and powerful era with Brian Johnson. The box set includes some of the most influential and popular rock albums ever made, such as Highway to Hell, Back in Black, For Those About to Rock We Salute You and The Razor's Edge. It also features some of the band's lesser-known but equally impressive works, such as Powerage, Flick of the Switch and Stiff Upper Lip.

          -

          ACDC The Complete Collection Collection 2012torrent


          Download File https://urlcod.com/2uIbwz



          -

          The box set also contains some rare and live material that showcases AC/DC's incredible energy and charisma on stage. The Live at River Plate album captures the band's triumphant return to Argentina in 2009, where they played to over 200,000 fans. The Live from the Atlantic Studios album features a intimate performance from 1977, where the band played some of their classic songs in front of a small audience. The '74 Jailbreak EP contains some of the band's earliest recordings that were not released in the US until 1984.

          -

          The Complete Collection is a comprehensive and definitive collection of AC/DC's music, that celebrates their remarkable career and impact on rock history. It is a treasure trove of timeless tunes, riffs and lyrics that will never go out of style. It is a tribute to the band's enduring spirit and passion for rock 'n' roll.

          The Complete Collection is not only a great way to enjoy AC/DC's music, but also a valuable piece of rock memorabilia. The box set comes in a sturdy and stylish black case, with the band's logo and name embossed on the front. Inside, each album is presented in a mini-LP replica sleeve, with the original artwork and liner notes. The box set also includes a 24-page booklet, with photos, credits and an introduction by David Fricke.

          -

          The Complete Collection is a must-have for any AC/DC fan or collector, as it offers the ultimate AC/DC experience. It is a testament to the band's longevity and relevance, as they continue to rock the world with their music. It is a perfect gift for yourself or someone you love, who appreciates the power and glory of AC/DC.

          The Complete Collection is not only a great way to enjoy AC/DC's music, but also a valuable piece of rock memorabilia. The box set comes in a sturdy and stylish black case, with the band's logo and name embossed on the front. Inside, each album is presented in a mini-LP replica sleeve, with the original artwork and liner notes. The box set also includes a 24-page booklet, with photos, credits and an introduction by David Fricke.

          -

          The Complete Collection is a must-have for any AC/DC fan or collector, as it offers the ultimate AC/DC experience. It is a testament to the band's longevity and relevance, as they continue to rock the world with their music. It is a perfect gift for yourself or someone you love, who appreciates the power and glory of AC/DC.

          -

          If you are looking for a way to get your hands on this amazing box set, you can order it online from various retailers, such as Amazon, eBay or Discogs. You can also check your local record stores or music shops for availability. The price may vary depending on the seller and the condition of the box set, but it is worth every penny for the quality and quantity of music you get.

          -

          -

          Don't miss this opportunity to own a piece of rock history, and enjoy the music of one of the greatest rock bands of all time. The Complete Collection by AC/DC is a box set that will make you feel like you are on a highway to hell, in a good way.

          e93f5a0c3f
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Grigorigrabovoinumberspdf58 ((BETTER)).md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Grigorigrabovoinumberspdf58 ((BETTER)).md deleted file mode 100644 index 8f1d1982fa9302b64484cf8ed5958aa4e05cf7ab..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Grigorigrabovoinumberspdf58 ((BETTER)).md +++ /dev/null @@ -1,28 +0,0 @@ - -

          What are Grabovoi Numbers and How to Use Them for Manifestation?

          -

          Grabovoi numbers are special sequences of numbers that can help you manifest your desires into reality. They are based on the teachings of Grigori Grabovoi, a Russian scientist and healer who claimed to have the ability to heal people and restore matter using his mind and numerical codes. Grabovoi numbers are also known as "cheat codes to the universe" because they can help you bypass any obstacles or limitations that may be blocking your manifestation.

          -

          In this article, we will explain what Grabovoi numbers are, how they work, and how to use them effectively for manifestation. We will also provide you with a list of some common Grabovoi numbers that you can use for various purposes, such as money, love, health, and more. Finally, we will share with you a free PDF of 58 Grabovoi numbers that you can download and use anytime you want.

          -

          grigorigrabovoinumberspdf58


          DOWNLOADhttps://urlcod.com/2uI9xi



          -

          What are Grabovoi Numbers?

          -

          Grabovoi numbers are sequences of digits that have specific vibrational frequencies that correspond to different aspects of reality. By focusing on these numbers, you can align your own vibration with the vibration of your desired outcome and attract it into your life. According to Grabovoi, each number has a unique meaning and function, and by combining them in different ways, you can create complex codes that can address any situation or problem.

          -

          For example, the number 1 represents the beginning, unity, wholeness, and God. The number 2 represents duality, balance, harmony, and cooperation. The number 3 represents creativity, expression, communication, and growth. And so on. By using these numbers in specific combinations, you can create codes that can help you manifest anything you want.

          -

          How do Grabovoi Numbers Work?

          -

          Grabovoi numbers work by using the power of your mind and intention to influence reality. According to Grabovoi, everything in the universe is made of information and energy, and by changing the information and energy of something, you can change its physical manifestation. He also claimed that he could access the universal database of information and energy using his mind and numerical codes.

          -

          By using Grabovoi numbers, you can tap into this universal database and access the information and energy of your desired outcome. You can then use your intention and visualization to transfer this information and energy into your own reality and manifest it. The more you focus on the numbers and their meanings, the more you align your vibration with them and attract them into your life.

          -

          How to Use Grabovoi Numbers Effectively for Manifestation?

          -

          There are many ways to use Grabovoi numbers for manifestation, but here are some general guidelines that can help you get started:

          -
            -
          • Choose a Grabovoi number that matches your intention or goal. You can use one of the common numbers listed below or create your own code by combining different numbers.
          • -
          • Write down the number on a piece of paper or on your phone. You can also draw it on your skin, wear it on a bracelet or necklace, or place it under your pillow.
          • -
          • Focus on the number and its meaning for at least 5 minutes a day. You can repeat it out loud or in your mind, visualize it in front of you or in your third eye, or meditate on it.
          • -
          • While focusing on the number, imagine that you already have what you want. Feel the emotions of gratitude, joy, love, and satisfaction that come with having your desire. See yourself living your dream life as if it is already happening.
          • -
          • Let go of any doubts, fears, or worries that may arise. Trust that the universe is working in your favor and that everything is possible with Grabovoi numbers.
          • -
          • Be open to receive your manifestation in any way that it may come. Don't limit yourself to a specific time frame or method. Be flexible and adaptable to the signs and opportunities that may show up along the way.
          • -
          -

          List of Common Grabovoi Numbers

          -

          Here are some common Grabovoi numbers that you can use for different purposes:

          - -
          Purpose -

          cec2833e83
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/NewAutoCADRevitLTSuite2008key.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/NewAutoCADRevitLTSuite2008key.md deleted file mode 100644 index 462e0504fdb3e18d2f28b9287c9ed23d412f5974..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/NewAutoCADRevitLTSuite2008key.md +++ /dev/null @@ -1,30 +0,0 @@ -
          -

          New AutoCAD Revit LT Suite 2008 Key: How to Find and Activate It

          -

          If you are looking for a new product key for AutoCAD Revit LT Suite 2008, you may have some trouble finding it online. This is because Autodesk does not support product versions older than five years back, and AutoCAD Revit LT Suite 2008 is more than a decade old. However, there are still some ways to get a new product key and activate your software.

          -

          NewAutoCADRevitLTSuite2008key


          Download File ››››› https://urlcod.com/2uI9Y7



          -

          One way is to use the interactive lookup tool on Autodesk's website[^1^]. This tool allows you to find product keys for your Autodesk products by selecting the correct product name and year (version). You will need to enter your serial number and request code to get your activation code. If you get an error when you enter your product key, make sure you specified the correct product (or suite) and version in the lookup tool.

          -

          Another way is to contact Autodesk support or your reseller[^2^]. They may be able to provide you with a new product key or help you with the activation process. However, this may take some time and they may not be able to assist you with such an old product version. You may also need to provide proof of purchase or subscription.

          -

          A third way is to upgrade to a newer version of AutoCAD Revit LT Suite[^3^]. This will give you access to the latest features and updates, as well as a valid product key and activation code. You can purchase a subscription or a perpetual license from Autodesk's website or your reseller. You may also be eligible for a discount if you trade in your old license.

          -

          Whichever way you choose, make sure you do not use any illegal or pirated software or product keys. This may expose you to security risks, malware, errors, and legal consequences. Always use genuine Autodesk software and product keys from authorized sources.

          - -

          AutoCAD Revit LT Suite is a software package that combines AutoCAD LT and Revit LT, two powerful tools for 2D and 3D design and documentation. AutoCAD LT allows you to create precise 2D drawings and edit them with ease. Revit LT allows you to create 3D models and generate high-quality renderings and animations. With AutoCAD Revit LT Suite, you can benefit from both workflows and collaborate with other professionals.

          -

          The latest version of AutoCAD Revit LT Suite is 2022, which was released in April 2021. This version offers several improvements and new features, such as:

          -

          -
            -
          • Enhanced performance and stability for both AutoCAD LT and Revit LT.
          • -
          • New drawing history feature in AutoCAD LT that lets you compare past and present versions of your drawings and see the changes.
          • -
          • New generative design feature in Revit LT that lets you explore design options based on your goals and constraints.
          • -
          • New cloud collaboration feature in Revit LT that lets you share your models and work with others online.
          • -
          • New integration with Autodesk Docs that lets you access and manage your project data from anywhere.
          • -
          -

          If you want to upgrade to AutoCAD Revit LT Suite 2022, you have two options: subscription or perpetual license. A subscription gives you access to the latest software updates, technical support, cloud services, and more. A perpetual license gives you ownership of the software without expiration, but you will need to pay for maintenance plans to get updates and support. You can choose the option that suits your budget and needs.

          -

          If you have an old license of AutoCAD Revit LT Suite 2008 or any other Autodesk product, you may be able to trade it in for a discount on a new subscription or perpetual license. This is part of Autodesk's trade-in offer, which aims to help customers transition to the latest software versions. To qualify for the trade-in offer, you need to meet certain criteria, such as:

          -
            -
          • Your old license must be a perpetual license that is not on an active maintenance plan.
          • -
          • Your old license must be for a product that is 14 years old or less (2008 or later).
          • -
          • Your old license must be registered under your name or your company's name.
          • -
          • You must agree to discontinue using your old license after trading it in.
          • -
          -

          If you meet these criteria, you can contact Autodesk or your reseller to request a quote for the trade-in offer. You will need to provide your old serial number and product key. You can then choose the new product and license type that you want to purchase with the discount. The trade-in offer is valid until July 23, 2021, so act fast if you want to take advantage of it.

          e93f5a0c3f
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Olai Chuvadi Tamil Book Free Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Olai Chuvadi Tamil Book Free Download.md deleted file mode 100644 index a05a584bf62ce3d56e081e9da5194ff93a5bdfc9..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Olai Chuvadi Tamil Book Free Download.md +++ /dev/null @@ -1,26 +0,0 @@ - -Here is a possible title and article for the keyword "Olai Chuvadi Tamil Book Free Download". I have used SEO optimization techniques such as using the keyword in the title, headings, and body text, as well as adding relevant links and images. I have also used HTML formatting to create a structured and appealing layout. - -```html -

          Olai Chuvadi Tamil Book Free Download: A Guide to the Ancient Palm Leaf Manuscripts

          -

          Olai Chuvadi is a Tamil term that means "palm leaf manuscript". These are ancient texts that were written on dried palm leaves using a stylus. They contain various forms of knowledge, such as astrology, medicine, history, literature, and spirituality. Olai Chuvadi is considered to be a treasure of Tamil culture and heritage.

          -A palm leaf manuscript -

          If you are interested in learning more about Olai Chuvadi and reading some of the manuscripts yourself, you might be wondering where you can find them. Fortunately, there are some online sources that offer Olai Chuvadi Tamil book free download. In this article, we will introduce you to some of these websites and how you can access them.

          -

          Olai Chuvadi Tamil Book Free Download


          Download Zip ——— https://urlcod.com/2uI9vB



          -

          Olai Chuvadi Tamil Book Free Download: Where to Find Them Online

          -

          There are several websites that offer Olai Chuvadi Tamil book free download. However, not all of them are reliable or authentic. Some of them may contain errors, incomplete texts, or low-quality scans. Therefore, it is important to be careful and selective when choosing a website to download Olai Chuvadi from.

          -

          Here are some of the websites that we recommend for Olai Chuvadi Tamil book free download:

          -

          -
            -
          • Tamil Virtual Academy: This is an official website of the Government of Tamil Nadu that aims to preserve and promote Tamil language and culture. It has a large collection of Olai Chuvadi manuscripts on various topics, such as astrology, medicine, grammar, poetry, and religion. You can browse the manuscripts by category or search by title or author. You can also view the scanned images of the original palm leaves or download them as PDF files.
          • -
          • Scribd: This is a popular online platform that allows users to upload and share documents, books, magazines, and other types of content. It has a few Olai Chuvadi manuscripts that have been uploaded by users. You can read them online or download them as PDF files. However, you may need to create an account or pay a subscription fee to access some of the content.
          • -
          • Internet Archive: This is a non-profit organization that provides free access to millions of digital books, movies, music, and other media. It has a few Olai Chuvadi manuscripts that have been scanned and uploaded by users. You can view them online or download them as PDF files.
          • -
          -

          Olai Chuvadi Tamil Book Free Download: How to Read Them

          -

          Once you have downloaded the Olai Chuvadi manuscripts from one of the websites mentioned above, you might be wondering how to read them. After all, they are written in an ancient script and language that may not be familiar to modern readers.

          -

          Here are some tips on how to read Olai Chuvadi manuscripts:

          -
            -
          1. Learn the basics of Tamil script and language: If you do not know how to read Tamil script or understand Tamil language, you may need to learn some basics before attempting to read Olai Chuvadi manuscripts. You can find some online resources that teach you how to read and write Tamil script here and how to speak and understand Tamil language here.
          2. -
          3. Use a dictionary or a translator: If you encounter words or phrases that you do not understand in the Olai Chuvadi manuscripts, you can use a dictionary or a translator to help you.

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/nomic-ai/kunishou_databricks-dolly-15k-ja/style.css b/spaces/nomic-ai/kunishou_databricks-dolly-15k-ja/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/kunishou_databricks-dolly-15k-ja/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/util/readers.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/util/readers.py deleted file mode 100644 index 71bb1cd3840ff390d4ca186b42d920c1e65494a0..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/data/util/readers.py +++ /dev/null @@ -1,527 +0,0 @@ -import os -import sys -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # NOQA -import argparse -from math import ceil -from glob import glob - -import numpy as np -import cv2 -from PIL import Image, ImageDraw, ImageOps, ImageFont - -from utils.logging_config import logger -from utils.util import make_dirs, bbox_offset - - -DEFAULT_FPS = 6 -MAX_LENGTH = 60 - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - '-fps', '--fps', - type=int, default=DEFAULT_FPS, - help="Output video FPS" - ) - parser.add_argument( - '-v', '--video_dir', - type=str, - help="Video directory name" - ) - parser.add_argument( - '-vs', '--video_dirs', - nargs='+', - type=str, - help="Video directory names" - ) - parser.add_argument( - '-v2', '--video_dir2', - type=str, - help="Video directory name" - ) - parser.add_argument( - '-sd', '--segms_dir', - type=str, - help="Segmentation directory name" - ) - parser.add_argument( - '-fgd', '--fg_dir', - type=str, - help="Foreground directory name" - ) - parser.add_argument( - '-fgfd', '--fg_frames_dir', - type=str, - help="Foreground frames directory name" - ) - parser.add_argument( - '-fgsd', '--fg_segms_dir', - type=str, - help="Foreground segmentations directory name" - ) - parser.add_argument( - '-syfd', '--syn_frames_dir', - type=str, - help="Synthesized frames directory name" - ) - parser.add_argument( - '-bgfd', '--bg_frames_dir', - type=str, - help="Background frames directory name" - ) - parser.add_argument( - '-rt', '--reader_type', - type=str, - help="Type of reader" - ) - parser.add_argument( - '-od', '--output_dir', - type=str, - help="Output directory name" - ) - parser.add_argument( - '-o', '--output_filename', - type=str, required=True, - help="Output output filename" - ) - args = parser.parse_args() - return args - - -class Reader: - def __init__(self, dir_name, read=True, max_length=None, sample_period=1): - self.dir_name = dir_name - self.count = 0 - self.max_length = max_length - self.filenames = [] - self.sample_period = sample_period - if read: - if os.path.exists(dir_name): - # self.filenames = read_filenames_from_dir(dir_name, self.__class__.__name__) - # ^^^^^ yield None when reading some videos of face forensics data - # (related to 'Too many levels of symbolic links'?) - - self.filenames = sorted(glob(os.path.join(dir_name, '*'))) - self.filenames = [f for f in self.filenames if os.path.isfile(f)] - self.filenames = self.filenames[::sample_period][:max_length] - self.files = self.read_files(self.filenames) - else: - self.files = [] - logger.warning(f"Directory {dir_name} not exists!") - else: - self.files = [] - self.current_index = 0 - - def append(self, file_): - self.files.append(file_) - - def set_files(self, files): - self.files = files - - def read_files(self, filenames): - assert type(filenames) == list, f'filenames is not a list; dirname: {self.dir_name}' - filenames.sort() - frames = [] - for filename in filenames: - file_ = self.read_file(filename) - frames.append(file_) - return frames - - def save_files(self, output_dir=None): - make_dirs(output_dir) - logger.info(f"Saving {self.__class__.__name__} files to {output_dir}") - for i, file_ in enumerate(self.files): - self._save_file(output_dir, i, file_) - - def _save_file(self, output_dir, i, file_): - raise NotImplementedError("This is an abstract function") - - def read_file(self, filename): - raise NotImplementedError("This is an abstract function") - - def __iter__(self): - return self - - def __next__(self): - if self.current_index < len(self.files): - file_ = self.files[self.current_index] - self.current_index += 1 - return file_ - else: - self.current_index = 0 - raise StopIteration - - def __getitem__(self, key): - return self.files[key] - - def __len__(self): - return len(self.files) - - -class FrameReader(Reader): - def __init__( - self, dir_name, resize=None, read=True, max_length=MAX_LENGTH, - scale=1, sample_period=1 - ): - self.resize = resize - self.scale = scale - self.sample_period = sample_period - super().__init__(dir_name, read, max_length, sample_period) - - def read_file(self, filename): - origin_frame = Image.open(filename) - size = self.resize if self.resize is not None else origin_frame.size - origin_frame_resized = origin_frame.resize( - (int(size[0] * self.scale), int(size[1] * self.scale)) - ) - return origin_frame_resized - - def _save_file(self, output_dir, i, file_): - if len(self.filenames) == len(self.files): - name = sorted(self.filenames)[i].split('/')[-1] - else: - name = f"frame_{i:04}.png" - filename = os.path.join( - output_dir, name - ) - file_.save(filename, "PNG") - - def write_files_to_video(self, output_filename, fps=DEFAULT_FPS, frame_num_when_repeat_list=[1]): - logger.info( - f"Writeing frames to video {output_filename} with FPS={fps}") - video_writer = cv2.VideoWriter( - output_filename, - cv2.VideoWriter_fourcc(*"MJPG"), - fps, - self.files[0].size - ) - for frame_num_when_repeat in frame_num_when_repeat_list: - for frame in self.files: - frame = frame.convert("RGB") - frame_cv = np.array(frame) - frame_cv = cv2.cvtColor(frame_cv, cv2.COLOR_RGB2BGR) - for i in range(frame_num_when_repeat): - video_writer.write(frame_cv) - video_writer.release() - - -class SynthesizedFrameReader(FrameReader): - def __init__( - self, bg_frames_dir, fg_frames_dir, - fg_segms_dir, segm_bbox_mask_dir, fg_dir, dir_name, - bboxes_list_dir, - fg_scale=0.7, fg_location=(48, 27), mask_only=False - ): - self.bg_reader = FrameReader(bg_frames_dir) - self.size = self.bg_reader[0].size - # TODO: add different location and change scale to var - self.fg_reader = ForegroundReader( - fg_frames_dir, fg_segms_dir, fg_dir, - resize=self.size, - scale=fg_scale - ) - self.fg_location = fg_location - # self.masks = self.fg_reader.masks - # self.bbox_masks = self.fg_reader.bbox_masks - super().__init__(dir_name, read=False) - self.files = self.synthesize_frames( - self.bg_reader, self.fg_reader, mask_only) - self.bbox_masks = MaskGenerator( - segm_bbox_mask_dir, self.size, self.get_bboxeses() - ) - self.bboxes_list_dir = bboxes_list_dir - self.bboxes_list = self.get_bboxeses() - self.save_bboxes() - - def save_bboxes(self): - make_dirs(self.bboxes_list_dir) - logger.info(f"Saving bboxes to {self.bboxes_list_dir}") - for i, bboxes in enumerate(self.bboxes_list): - save_path = os.path.join(self.bboxes_list_dir, f"bboxes_{i:04}.txt") - if len(bboxes) > 0: - np.savetxt(save_path, bboxes[0], fmt='%4u') - - def get_bboxeses(self): - bboxeses = self.fg_reader.segms.bboxeses - new_bboxeses = [] - for bboxes in bboxeses: - new_bboxes = [] - for bbox in bboxes: - offset_bbox = bbox_offset(bbox, self.fg_location) - new_bboxes.append(offset_bbox) - new_bboxeses.append(new_bboxes) - return new_bboxeses - - def synthesize_frames(self, bg_reader, fg_reader, mask_only=False): - logger.info( - f"Synthesizing {bg_reader.dir_name} and {fg_reader.dir_name}" - ) - synthesized_frames = [] - for i, bg in enumerate(bg_reader): - if i == len(fg_reader): - break - fg = fg_reader[i] - mask = fg_reader.get_mask(i) - synthesized_frame = bg.copy() - if mask_only: - synthesized_frame.paste(mask, self.fg_location, mask) - else: - synthesized_frame.paste(fg, self.fg_location, mask) - synthesized_frames.append(synthesized_frame) - return synthesized_frames - - -class WarpedFrameReader(FrameReader): - def __init__(self, dir_name, i, ks): - self.i = i - self.ks = ks - super().__init__(dir_name) - - def _save_file(self, output_dir, i, file_): - filename = os.path.join( - output_dir, - f"warped_frame_{self.i:04}_k{self.ks[i]:02}.png" - ) - file_.save(filename) - - -class SegmentationReader(FrameReader): - def __init__( - self, dir_name, - resize=None, scale=1 - ): - super().__init__( - dir_name, resize=resize, scale=scale - ) - - def read_file(self, filename): - origin_frame = Image.open(filename) - mask = ImageOps.invert(origin_frame.convert("L")) - mask = mask.point(lambda x: 0 if x < 255 else 255, '1') - size = self.resize if self.resize is not None else origin_frame.size - mask_resized = mask.resize( - (int(size[0] * self.scale), int(size[1] * self.scale)) - ) - return mask_resized - - -class MaskReader(Reader): - def __init__(self, dir_name, read=True): - super().__init__(dir_name, read=read) - - def read_file(self, filename): - mask = Image.open(filename) - return mask - - def _save_file(self, output_dir, i, file_): - filename = os.path.join( - output_dir, - f"mask_{i:04}.png" - ) - file_.save(filename) - - def get_bboxes(self, i): - # TODO: save bbox instead of looking for one - mask = self.files[i] - mask = ImageOps.invert(mask.convert("L")).convert("1") - mask = np.array(mask) - image, contours, hier = cv2.findContours( - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) - bboxes = [] - for c in contours: - # get the bounding rect - x, y, w, h = cv2.boundingRect(c) - bbox = ((x, y), (x + w - 1, y + h - 1)) - bboxes.append(bbox) - return bboxes - - def get_bbox(self, i): - # TODO: save bbox instead of looking for one - mask = self.files[i] - mask = ImageOps.invert(mask.convert("L")) - mask = np.array(mask) - image, contours, hier = cv2.findContours( - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) - for c in contours: - # get the bounding rect - x, y, w, h = cv2.boundingRect(c) - bbox = ((x, y), (x + w - 1, y + h - 1)) - return bbox - - -class MaskGenerator(Reader): - def __init__( - self, mask_output_dir, size, bboxeses, save_masks=True - ): - self.bboxeses = bboxeses - self.size = size - super().__init__(mask_output_dir, read=False) - self.files = self.generate_masks() - if save_masks: - make_dirs(mask_output_dir) - self.save_files(mask_output_dir) - - def _save_file(self, output_dir, i, file_): - filename = os.path.join( - output_dir, - f"mask_{i:04}.png" - ) - file_.save(filename) - - def get_bboxes(self, i): - return self.bboxeses[i] - - def generate_masks(self): - masks = [] - for i in range(len(self.bboxeses)): - mask = self.generate_mask(i) - masks.append(mask) - return masks - - def generate_mask(self, i): - bboxes = self.bboxeses[i] - mask = Image.new("1", self.size, 1) - draw = ImageDraw.Draw(mask) - for bbox in bboxes: - draw.rectangle( - bbox, fill=0 - ) - return mask - - -class ForegroundReader(FrameReader): - def __init__( - self, frames_dir, segms_dir, dir_name, - resize=None, scale=1 - ): - self.frames_dir = frames_dir - self.segms_dir = segms_dir - self.frames = FrameReader( - frames_dir, - resize=resize, scale=scale - ) - self.segms = SegmentationReader( - segms_dir, resize=resize, scale=scale - ) - super().__init__(dir_name, read=False) - self.masks = self.segms.masks - # self.bbox_masks = self.segms.bbox_masks - self.files = self.generate_fg_frames(self.frames, self.segms) - - def get_mask(self, i): - return self.masks[i] - - def generate_fg_frames(self, frames, segms): - logger.info( - f"Generating fg frames from {self.frames_dir} and {self.segms_dir}" - ) - fg_frames = [] - for i, frame in enumerate(frames): - mask = self.masks[i] - fg_frame = Image.new("RGB", frame.size, (0, 0, 0)) - fg_frame.paste( - frame, (0, 0), - mask - ) - fg_frames.append(fg_frame) - return fg_frames - - -class CompareFramesReader(FrameReader): - def __init__(self, dir_names, col=2, names=[], mask_dir=None): - self.videos = [] - for dir_name in dir_names: - # If a method fails on this video, use None to indicate the situation - try: - self.videos.append(FrameReader(dir_name)) - except AssertionError: - self.videos.append(None) - if mask_dir is not None: - self.masks = MaskReader(mask_dir) - self.names = names - self.files = self.combine_videos(self.videos, col) - - def combine_videos(self, videos, col=2, edge_offset=35, h_start_offset=35): - combined_frames = [] - w, h = videos[0][0].size - # Prevent the first method fails and have a "None" as its video - i = 0 - while videos[i] is None: - i += 1 - length = len(videos[i]) - video_num = len(videos) - row = ceil(video_num / col) - for frame_idx in range(length): - width = col * w + (col - 1) * edge_offset - height = row * h + (row - 1) * edge_offset + h_start_offset - combined_frame = Image.new("RGBA", (width, height)) - draw = ImageDraw.Draw(combined_frame) - for i, video in enumerate(videos): - # Give the failed method a black output - if video is None or frame_idx >= len(video): - failed = True - frame = Image.new("RGBA", (w, h)) - else: - frame = video[frame_idx].convert("RGBA") - failed = False - - f_x = (i % col) * (w + edge_offset) - f_y = (i // col) * (h + edge_offset) + h_start_offset - combined_frame.paste(frame, (f_x, f_y)) - - # Draw name - font = ImageFont.truetype("DejaVuSans.ttf", 12) - # font = ImageFont.truetype("DejaVuSans-Bold.ttf", 13) - # font = ImageFont.truetype("timesbd.ttf", 14) - name = self.names[i] if not failed else f'{self.names[i]} (failed)' - draw.text( - (f_x + 10, f_y - 20), - name, (255, 255, 255), font=font - ) - - combined_frames.append(combined_frame) - return combined_frames - - -class BoundingBoxesListReader(Reader): - def __init__( - self, dir_name, resize=None, read=True, max_length=MAX_LENGTH, - scale=1 - ): - self.resize = resize - self.scale = scale - super().__init__(dir_name, read, max_length) - - def read_file(self, filename): - bboxes = np.loadtxt(filename, dtype=int) - bboxes = [bboxes.tolist()] - return bboxes - - -def save_frames_to_dir(frames, dirname): - reader = FrameReader(dirname, read=False) - reader.set_files(frames) - reader.save_files(dirname) - - -if __name__ == "__main__": - args = parse_args() - if args.reader_type is None: - reader = FrameReader(args.video_dir) - elif args.reader_type == 'fg': - reader = ForegroundReader( - args.video_dir, args.segms_dir, args.fg_dir) - elif args.reader_type == 'sy': - reader = SynthesizedFrameReader( - args.bg_frames_dir, args.fg_frames_dir, - args.fg_segms_dir, args.fg_dir, args.syn_frames_dir - ) - elif args.reader_type == 'com': - reader = CompareFramesReader( - args.video_dirs - ) - reader.write_files_to_video( - os.path.join(args.output_dir, args.output_filename), - fps=args.fps - ) diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/utils/fbConsistencyCheck.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/utils/fbConsistencyCheck.py deleted file mode 100644 index 7d5b546c6124c8380e13f985199cf079350a8d2d..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/utils/fbConsistencyCheck.py +++ /dev/null @@ -1,127 +0,0 @@ -import torch -import numpy as np -from .sobel2 import SobelLayer, SeperateSobelLayer -import torch.nn as nn -import torch.nn.functional as F - - -def image_warp(image, flow): - ''' - image: 上一帧的图片,torch.Size([1, 3, 256, 256]) - flow: 光流, torch.Size([1, 2, 256, 256]) - final_grid: torch.Size([1, 2, 256, 256]) - ''' - b, c, h, w = image.size() - device = image.device - flow = torch.cat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0), flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], - dim=1) # normalize to [-1~1](from upper left to lower right - flow = flow.permute(0, 2, 3, - 1) # if you wanna use grid_sample function, the channel(band) shape of show must be in the last dimension - x = np.linspace(-1, 1, w) - y = np.linspace(-1, 1, h) - X, Y = np.meshgrid(x, y) - grid = torch.cat((torch.from_numpy(X.astype('float32')).unsqueeze(0).unsqueeze(3), - torch.from_numpy(Y.astype('float32')).unsqueeze(0).unsqueeze(3)), 3).to(device) - output = torch.nn.functional.grid_sample(image, grid + flow, mode='bilinear', padding_mode='zeros') - return output - - -def length_sq(x): - return torch.sum(torch.square(x), dim=1, keepdim=True) - - -def fbConsistencyCheck(flow_fw, flow_bw, alpha1=0.01, alpha2=0.5): - flow_bw_warped = image_warp(flow_bw, flow_fw) # wb(wf(x)) - flow_fw_warped = image_warp(flow_fw, flow_bw) # wf(wb(x)) - flow_diff_fw = flow_fw + flow_bw_warped # wf + wb(wf(x)) - flow_diff_bw = flow_bw + flow_fw_warped # wb + wf(wb(x)) - - mag_sq_fw = length_sq(flow_fw) + length_sq(flow_bw_warped) # |wf| + |wb(wf(x))| - mag_sq_bw = length_sq(flow_bw) + length_sq(flow_fw_warped) # |wb| + |wf(wb(x))| - occ_thresh_fw = alpha1 * mag_sq_fw + alpha2 - occ_thresh_bw = alpha1 * mag_sq_bw + alpha2 - - fb_occ_fw = (length_sq(flow_diff_fw) > occ_thresh_fw).float() - fb_occ_bw = (length_sq(flow_diff_bw) > occ_thresh_bw).float() - - return fb_occ_fw, fb_occ_bw # fb_occ_fw -> frame2 area occluded by frame1, fb_occ_bw -> frame1 area occluded by frame2 - - -def rgb2gray(image): - gray_image = image[:, 0] * 0.299 + image[:, 1] * 0.587 + 0.110 * image[:, 2] - gray_image = gray_image.unsqueeze(1) - return gray_image - - -def ternary_transform(image, max_distance=1): - device = image.device - patch_size = 2 * max_distance + 1 - intensities = rgb2gray(image) * 255 - out_channels = patch_size * patch_size - w = np.eye(out_channels).reshape(out_channels, 1, patch_size, patch_size) - weights = torch.from_numpy(w).float().to(device) - patches = F.conv2d(intensities, weights, stride=1, padding=1) - transf = patches - intensities - transf_norm = transf / torch.sqrt(0.81 + torch.square(transf)) - return transf_norm - - -def hamming_distance(t1, t2): - dist = torch.square(t1 - t2) - dist_norm = dist / (0.1 + dist) - dist_sum = torch.sum(dist_norm, dim=1, keepdim=True) - return dist_sum - - -def create_mask(mask, paddings): - """ - padding: [[top, bottom], [left, right]] - """ - shape = mask.shape - inner_height = shape[2] - (paddings[0][0] + paddings[0][1]) - inner_width = shape[3] - (paddings[1][0] + paddings[1][1]) - inner = torch.ones([inner_height, inner_width]) - - mask2d = F.pad(inner, pad=[paddings[1][0], paddings[1][1], paddings[0][0], paddings[0][1]]) # mask最外边一圈都pad成0了 - mask3d = mask2d.unsqueeze(0) - mask4d = mask3d.unsqueeze(0).repeat(shape[0], 1, 1, 1) - return mask4d.detach() - - -def ternary_loss2(frame1, warp_frame21, confMask, masks, max_distance=1): - """ - - Args: - frame1: torch tensor, with shape [b * t, c, h, w] - warp_frame21: torch tensor, with shape [b * t, c, h, w] - confMask: confidence mask, with shape [b * t, c, h, w] - masks: torch tensor, with shape [b * t, c, h, w] - max_distance: maximum distance. - - Returns: ternary loss - - """ - t1 = ternary_transform(frame1) - t21 = ternary_transform(warp_frame21) - dist = hamming_distance(t1, t21) # 近似求解,其实利用了mask区域和外界边缘交叉的那一部分像素 - loss = torch.mean(dist * confMask * masks) / torch.mean(masks) - return loss - - -def gradient_loss(frame1, frame2, confMask): - device = frame1.device - frame1_edge = SobelLayer(device)(frame1) - frame2_edge = SobelLayer(device)(frame2) - loss = torch.sum(torch.abs(frame1_edge * confMask - frame2_edge * confMask)) / (torch.sum(confMask) + 1) # escape divide 0 - return loss - - -def seperate_gradient_loss(frame1, warp_frame21, confMask): - device = frame1.device - mask_x = create_mask(frame1, [[0, 0], [1, 1]]).to(device) - mask_y = create_mask(frame1, [[1, 1], [0, 0]]).to(device) - gradient_mask = torch.cat([mask_x, mask_y], dim=1).repeat(1, 3, 1, 1) - frame1_edge = SeperateSobelLayer(device)(frame1) - warp_frame21_edge = SeperateSobelLayer(device)(warp_frame21) - loss = nn.L1Loss()(frame1_edge * confMask * gradient_mask, warp_frame21_edge * confMask * gradient_mask) - return loss diff --git a/spaces/omartine/prompt-generator/app.py b/spaces/omartine/prompt-generator/app.py deleted file mode 100644 index a535880ec59270152e13461316b7ad0f06004eb9..0000000000000000000000000000000000000000 --- a/spaces/omartine/prompt-generator/app.py +++ /dev/null @@ -1,56 +0,0 @@ -from transformers import pipeline, set_seed -import gradio as grad -import random -import re - -gpt2_pipe = pipeline('text-generation', model='succinctly/text2image-prompt-generator') - -with open("name.txt", "r") as f: - line = f.readlines() - - -def generate(starting_text): - for count in range(6): - seed = random.randint(100, 1000000) - set_seed(seed) - - # If the text field is empty - if starting_text == "": - starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize() - starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text) - print(starting_text) - - response = gpt2_pipe(starting_text, max_length=random.randint(60, 90), num_return_sequences=8) - response_list = [] - for x in response: - resp = x['generated_text'].strip() - if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False: - response_list.append(resp) - - response_end = "\n".join(response_list) - response_end = re.sub('[^ ]+\.[^ ]+','', response_end) - response_end = response_end.replace("<", "").replace(">", "") - if response_end != "": - return response_end - if count == 5: - return response_end - - -txt = grad.Textbox(lines=1, label="English", placeholder="English Text here") -out = grad.Textbox(lines=6, label="Generated Text") -examples = [["mythology of the Slavs"], ["All-seeing eye monitors these world"], ["astronaut dog"], - ["A monochrome forest of ebony trees"], ["sad view of worker in office,"], - ["Headshot photo portrait of John Lennon"], ["wide field with thousands of blue nemophila,"]] -title = "Midjourney Prompt Generator" -description = "This is an unofficial demo for Midjourney Prompt Generator. To use it, simply send your text, or click one of the examples to load them. Read more at the links below.
            Model: https://huggingface.co/succinctly/text2image-prompt-generator
            Telegram bot: https://t.me/prompt_generator_bot
            [![](https://img.shields.io/twitter/follow/DoEvent?label=@DoEvent&style=social)](https://twitter.com/DoEvent)" -article = "
            visitor badge
            " - -grad.Interface(fn=generate, - inputs=txt, - outputs=out, - examples=examples, - title=title, - description=description, - article=article, - allow_flagging='never', - cache_examples=False).queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True) diff --git a/spaces/openai/openai-detector/detector/index.html b/spaces/openai/openai-detector/detector/index.html deleted file mode 100644 index 92f352e1dfe6f88db541d7910eed4fb6e2108455..0000000000000000000000000000000000000000 --- a/spaces/openai/openai-detector/detector/index.html +++ /dev/null @@ -1,158 +0,0 @@ - - - -GPT-2 Output Detector - - - - -
            -

            GPT-2 Output Detector Demo

            -

            - This is an online demo of the - GPT-2 output detector - model, based on the 🤗/Transformers - implementation of RoBERTa. - Enter some text in the text box; the predicted probabilities will be displayed below. - The results start to get reliable after around 50 tokens. -

            - -
            - - - - - - - - - - -
            RealFake
            -
            - - - diff --git a/spaces/openai/whisper/README.md b/spaces/openai/whisper/README.md deleted file mode 100644 index 9908d27e54703e581dad9e88b7d14dc5a987ddae..0000000000000000000000000000000000000000 --- a/spaces/openai/whisper/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Whisper -emoji: 📉 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/perilli/tortoise-tts-v2/do_tts.py b/spaces/perilli/tortoise-tts-v2/do_tts.py deleted file mode 100644 index fa0347e64c587786a90eeb053f7efb388f323bf9..0000000000000000000000000000000000000000 --- a/spaces/perilli/tortoise-tts-v2/do_tts.py +++ /dev/null @@ -1,34 +0,0 @@ -import argparse -import os - -import torchaudio - -from api import TextToSpeech -from utils.audio import load_audio, get_voices - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.") - parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) ' - 'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='pat') - parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='standard') - parser.add_argument('--voice_diversity_intelligibility_slider', type=float, - help='How to balance vocal diversity with the quality/intelligibility of the spoken text. 0 means highly diverse voice (not recommended), 1 means maximize intellibility', - default=.5) - parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/') - args = parser.parse_args() - os.makedirs(args.output_path, exist_ok=True) - - tts = TextToSpeech() - - voices = get_voices() - selected_voices = args.voice.split(',') - for voice in selected_voices: - cond_paths = voices[voice] - conds = [] - for cond_path in cond_paths: - c = load_audio(cond_path, 22050) - conds.append(c) - gen = tts.tts_with_preset(args.text, conds, preset=args.preset, clvp_cvvp_slider=args.voice_diversity_intelligibility_slider) - torchaudio.save(os.path.join(args.output_path, f'{voice}.wav'), gen.squeeze(0).cpu(), 24000) - diff --git a/spaces/peteralexandercharles/wav2vec2-uk-demo/inference.py b/spaces/peteralexandercharles/wav2vec2-uk-demo/inference.py deleted file mode 100644 index de9ed1de822a59a31d4232cc6ea55bdf929b2597..0000000000000000000000000000000000000000 --- a/spaces/peteralexandercharles/wav2vec2-uk-demo/inference.py +++ /dev/null @@ -1,68 +0,0 @@ -import argparse -import torch -import torchaudio -from pathlib import Path -from transformers import Wav2Vec2ProcessorWithLM, Wav2Vec2ForCTC - - -def main(args): - processor = Wav2Vec2ProcessorWithLM.from_pretrained(args.model_id) - model = Wav2Vec2ForCTC.from_pretrained(args.model_id) - model.to('cpu') - - files = args.path_files.split(',') - - for path_file in files: - print('File:', path_file) - - wav_file_path = str(Path(path_file).absolute()) - waveform, sample_rate = torchaudio.load(wav_file_path) - - if sample_rate != 16000: - resample = torchaudio.transforms.Resample( - sample_rate, 16000, resampling_method='sinc_interpolation') - speech_array = resample(waveform) - sp = speech_array.squeeze().numpy() - else: - sp = waveform.squeeze().numpy() - - # stride_length_s is a tuple of the left and right stride length. - # With only 1 number, both sides get the same stride, by default - # the stride_length on one side is 1/6th of the chunk_length_s - input_values = processor(sp, - sample_rate=16000, - chunk_length_s=args.chunk_length_s, - stride_length_s=(args.stride_length_s_l, args.stride_length_s_r), - return_tensors="pt").input_values - - with torch.no_grad(): - logits = model(input_values).logits - - prediction = processor.batch_decode(logits.numpy()).text - print(prediction[0]) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--path_files", type=str, required=True, help="WAV files to transcribe, separated by a comma" - ) - parser.add_argument( - "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" - ) - parser.add_argument( - "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." - ) - parser.add_argument( - "--stride_length_s_l", type=int, default=None, help="Stride of the audio chunks, left value." - ) - parser.add_argument( - "--stride_length_s_r", type=int, default=None, help="Stride of the audio chunks, right value." - ) - parser.add_argument( - "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." - ) - args = parser.parse_args() - - main(args) diff --git a/spaces/pix2pix-zero-library/pix2pix-zero-demo/utils/gradio_utils.py b/spaces/pix2pix-zero-library/pix2pix-zero-demo/utils/gradio_utils.py deleted file mode 100644 index 1545d586bf46249c6d484ccd2f7abfee3c5ec02c..0000000000000000000000000000000000000000 --- a/spaces/pix2pix-zero-library/pix2pix-zero-demo/utils/gradio_utils.py +++ /dev/null @@ -1,616 +0,0 @@ -import gradio as gr - -def set_visible_true(): - return gr.update(visible=True) - -def set_visible_false(): - return gr.update(visible=False) - - -# HTML_header = f""" -# -#
            -#
            -#

            -# Zero-shot Image-to-Image Translation -#

            -#
            -#

            -# This is the demo for pix2pix-zero. -# Please visit our website and github for more details. -#

            -#

            -# pix2pix-zero is a diffusion-based image-to-image approach that allows users to specify the edit direction on-the-fly -# (e.g., cat to dog). Our method can directly use pre-trained text-to-image diffusion models, such as Stable Diffusion, -# for editing real and synthetic images while preserving the input image's structure. Our method is training-free and prompt-free, -# as it requires neither manual text prompting for each input image nor costly fine-tuning for each task. -#

            - -#
            -# """ - - - - -CSS_main = """ - body { - font-family: "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif; - font-weight:300; - font-size:18px; - margin-left: auto; - margin-right: auto; - padding-left: 10px; - padding-right: 10px; - width: 800px; - } - - h1 { - font-size:32px; - font-weight:300; - text-align: center; - } - - h2 { - font-size:32px; - font-weight:300; - text-align: center; - } - - #lbl_gallery_input{ - font-family: 'Helvetica', 'Arial', sans-serif; - text-align: center; - color: #fff; - font-size: 28px; - display: inline - } - - - #lbl_gallery_comparision{ - font-family: 'Helvetica', 'Arial', sans-serif; - text-align: center; - color: #fff; - font-size: 28px; - } - - .disclaimerbox { - background-color: #eee; - border: 1px solid #eeeeee; - border-radius: 10px ; - -moz-border-radius: 10px ; - -webkit-border-radius: 10px ; - padding: 20px; - } - - video.header-vid { - height: 140px; - border: 1px solid black; - border-radius: 10px ; - -moz-border-radius: 10px ; - -webkit-border-radius: 10px ; - } - - img.header-img { - height: 140px; - border: 1px solid black; - border-radius: 10px ; - -moz-border-radius: 10px ; - -webkit-border-radius: 10px ; - } - - img.rounded { - border: 1px solid #eeeeee; - border-radius: 10px ; - -moz-border-radius: 10px ; - -webkit-border-radius: 10px ; - } - - a:link - { - color: #941120; - text-decoration: none; - } - a:visited - { - color: #941120; - text-decoration: none; - } - a:hover { - color: #941120; - } - - td.dl-link { - height: 160px; - text-align: center; - font-size: 22px; - } - - .layered-paper-big { /* modified from: http://css-tricks.com/snippets/css/layered-paper/ */ - box-shadow: - 0px 0px 1px 1px rgba(0,0,0,0.35), /* The top layer shadow */ - 5px 5px 0 0px #fff, /* The second layer */ - 5px 5px 1px 1px rgba(0,0,0,0.35), /* The second layer shadow */ - 10px 10px 0 0px #fff, /* The third layer */ - 10px 10px 1px 1px rgba(0,0,0,0.35), /* The third layer shadow */ - 15px 15px 0 0px #fff, /* The fourth layer */ - 15px 15px 1px 1px rgba(0,0,0,0.35), /* The fourth layer shadow */ - 20px 20px 0 0px #fff, /* The fifth layer */ - 20px 20px 1px 1px rgba(0,0,0,0.35), /* The fifth layer shadow */ - 25px 25px 0 0px #fff, /* The fifth layer */ - 25px 25px 1px 1px rgba(0,0,0,0.35); /* The fifth layer shadow */ - margin-left: 10px; - margin-right: 45px; - } - - .paper-big { /* modified from: http://css-tricks.com/snippets/css/layered-paper/ */ - box-shadow: - 0px 0px 1px 1px rgba(0,0,0,0.35); /* The top layer shadow */ - - margin-left: 10px; - margin-right: 45px; - } - - - .layered-paper { /* modified from: http://css-tricks.com/snippets/css/layered-paper/ */ - box-shadow: - 0px 0px 1px 1px rgba(0,0,0,0.35), /* The top layer shadow */ - 5px 5px 0 0px #fff, /* The second layer */ - 5px 5px 1px 1px rgba(0,0,0,0.35), /* The second layer shadow */ - 10px 10px 0 0px #fff, /* The third layer */ - 10px 10px 1px 1px rgba(0,0,0,0.35); /* The third layer shadow */ - margin-top: 5px; - margin-left: 10px; - margin-right: 30px; - margin-bottom: 5px; - } - - .vert-cent { - position: relative; - top: 50%; - transform: translateY(-50%); - } - - hr - { - border: 0; - height: 1px; - background-image: linear-gradient(to right, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.75), rgba(0, 0, 0, 0)); - } - - .card { - /* width: 130px; - height: 195px; - width: 1px; - height: 1px; */ - position: relative; - display: inline-block; - /* margin: 50px; */ - } - .card .img-top { - display: none; - position: absolute; - top: 0; - left: 0; - z-index: 99; - } - .card:hover .img-top { - display: inline; - } - details { - user-select: none; - } - - details>summary span.icon { - width: 24px; - height: 24px; - transition: all 0.3s; - margin-left: auto; - } - - details[open] summary span.icon { - transform: rotate(180deg); - } - - summary { - display: flex; - cursor: pointer; - } - - summary::-webkit-details-marker { - display: none; - } - - ul { - display: table; - margin: 0 auto; - text-align: left; - } - - .dark { - padding: 1em 2em; - background-color: #333; - box-shadow: 3px 3px 3px #333; - border: 1px #333; - } - .column { - float: left; - width: 20%; - padding: 0.5%; - } - - .galleryImg { - transition: opacity 0.3s; - -webkit-transition: opacity 0.3s; - filter: grayscale(100%); - /* filter: blur(2px); */ - -webkit-transition : -webkit-filter 250ms linear; - /* opacity: 0.5; */ - cursor: pointer; - } - - - - .selected { - /* outline: 100px solid var(--hover-background) !important; */ - /* outline-offset: -100px; */ - filter: grayscale(0%); - -webkit-transition : -webkit-filter 250ms linear; - /*opacity: 1.0 !important; */ - } - - .galleryImg:hover { - filter: grayscale(0%); - -webkit-transition : -webkit-filter 250ms linear; - - } - - .row { - margin-bottom: 1em; - padding: 0px 1em; - } - /* Clear floats after the columns */ - .row:after { - content: ""; - display: table; - clear: both; - } - - /* The expanding image container */ - #gallery { - position: relative; - /*display: none;*/ - } - - #section_comparison{ - position: relative; - width: 100%; - height: max-content; - } - - /* SLIDER - -------------------------------------------------- */ - - .slider-container { - position: relative; - height: 384px; - width: 512px; - cursor: grab; - overflow: hidden; - margin: auto; - } - .slider-after { - display: block; - position: absolute; - top: 0; - right: 0; - bottom: 0; - left: 0; - width: 100%; - height: 100%; - overflow: hidden; - } - .slider-before { - display: block; - position: absolute; - top: 0; - /* right: 0; */ - bottom: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 15; - overflow: hidden; - } - .slider-before-inset { - position: absolute; - top: 0; - bottom: 0; - left: 0; - } - .slider-after img, - .slider-before img { - object-fit: cover; - position: absolute; - width: 100%; - height: 100%; - object-position: 50% 50%; - top: 0; - bottom: 0; - left: 0; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -o-user-select: none; - user-select: none; - } - - #lbl_inset_left{ - text-align: center; - position: absolute; - top: 384px; - width: 150px; - left: calc(50% - 256px); - z-index: 11; - font-size: 16px; - color: #fff; - margin: 10px; - } - .inset-before { - position: absolute; - width: 150px; - height: 150px; - box-shadow: 3px 3px 3px #333; - border: 1px #333; - border-style: solid; - z-index: 16; - top: 410px; - left: calc(50% - 256px); - margin: 10px; - font-size: 1em; - background-repeat: no-repeat; - pointer-events: none; - } - - #lbl_inset_right{ - text-align: center; - position: absolute; - top: 384px; - width: 150px; - right: calc(50% - 256px); - z-index: 11; - font-size: 16px; - color: #fff; - margin: 10px; - } - .inset-after { - position: absolute; - width: 150px; - height: 150px; - box-shadow: 3px 3px 3px #333; - border: 1px #333; - border-style: solid; - z-index: 16; - top: 410px; - right: calc(50% - 256px); - margin: 10px; - font-size: 1em; - background-repeat: no-repeat; - pointer-events: none; - } - - #lbl_inset_input{ - text-align: center; - position: absolute; - top: 384px; - width: 150px; - left: calc(50% - 256px + 150px + 20px); - z-index: 11; - font-size: 16px; - color: #fff; - margin: 10px; - } - .inset-target { - position: absolute; - width: 150px; - height: 150px; - box-shadow: 3px 3px 3px #333; - border: 1px #333; - border-style: solid; - z-index: 16; - top: 410px; - right: calc(50% - 256px + 150px + 20px); - margin: 10px; - font-size: 1em; - background-repeat: no-repeat; - pointer-events: none; - } - - .slider-beforePosition { - background: #121212; - color: #fff; - left: 0; - pointer-events: none; - border-radius: 0.2rem; - padding: 2px 10px; - } - .slider-afterPosition { - background: #121212; - color: #fff; - right: 0; - pointer-events: none; - border-radius: 0.2rem; - padding: 2px 10px; - } - .beforeLabel { - position: absolute; - top: 0; - margin: 1rem; - font-size: 1em; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -o-user-select: none; - user-select: none; - } - .afterLabel { - position: absolute; - top: 0; - margin: 1rem; - font-size: 1em; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -o-user-select: none; - user-select: none; - } - - .slider-handle { - height: 41px; - width: 41px; - position: absolute; - left: 50%; - top: 50%; - margin-left: -20px; - margin-top: -21px; - border: 2px solid #fff; - border-radius: 1000px; - z-index: 20; - pointer-events: none; - box-shadow: 0 0 10px rgb(12, 12, 12); - } - .handle-left-arrow, - .handle-right-arrow { - width: 0; - height: 0; - border: 6px inset transparent; - position: absolute; - top: 50%; - margin-top: -6px; - } - .handle-left-arrow { - border-right: 6px solid #fff; - left: 50%; - margin-left: -17px; - } - .handle-right-arrow { - border-left: 6px solid #fff; - right: 50%; - margin-right: -17px; - } - .slider-handle::before { - bottom: 50%; - margin-bottom: 20px; - box-shadow: 0 0 10px rgb(12, 12, 12); - } - .slider-handle::after { - top: 50%; - margin-top: 20.5px; - box-shadow: 0 0 5px rgb(12, 12, 12); - } - .slider-handle::before, - .slider-handle::after { - content: " "; - display: block; - width: 2px; - background: #fff; - height: 9999px; - position: absolute; - left: 50%; - margin-left: -1.5px; - } - - - /* - ------------------------------------------------- - The editing results shown below inversion results - ------------------------------------------------- - */ - .edit_labels{ - font-weight:500; - font-size: 24px; - color: #fff; - height: 20px; - margin-left: 20px; - position: relative; - top: 20px; - } - - - .open > a:hover { - color: #555; - background-color: red; - } - - - #directions { padding-top:30; padding-bottom:0; margin-bottom: 0px; height: 20px; } - #custom_task { padding-top:0; padding-bottom:0; margin-bottom: 0px; height: 20px; } - #slider_ddim {accent-color: #941120;} - #slider_ddim::-webkit-slider-thumb {background-color: #941120;} - #slider_xa {accent-color: #941120;} - #slider_xa::-webkit-slider-thumb {background-color: #941120;} - #slider_edit_mul {accent-color: #941120;} - #slider_edit_mul::-webkit-slider-thumb {background-color: #941120;} - - #input_image [data-testid="image"]{ - height: unset; - } - #input_image_synth [data-testid="image"]{ - height: unset; - } - - -""" - - - -HTML_header = f""" - -
            - Zero-shot Image-to-Image Translation - - - - -
            -
            - [Website] - [Code] -
            -
            -
            - -
            -
            -

            - This is a demo for pix2pix-zero, a diffusion-based image-to-image approach that allows users to - specify the edit direction on-the-fly (e.g., cat to dog). Our method can directly use pre-trained text-to-image diffusion models, such as Stable Diffusion, for editing real and synthetic images while preserving the input image's structure. Our method is training-free and prompt-free, as it requires neither manual text prompting for each input image nor costly fine-tuning for each task. -
            - TL;DR: no finetuning required; no text input needed; input structure preserved. -

            -
            -
            - - -
            - -""" - -HTML_input_header = f""" -

            - Step 1: select a real input image. -

            -""" - -HTML_middle_header = f""" -

            - Step 2: select the editing options. -

            -""" - - -HTML_output_header = f""" -

            - Step 3: translated image! -

            -""" \ No newline at end of file diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py deleted file mode 100644 index b32bfc74213d93d434f1f3a47cb5d7d0bf4863d3..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py +++ /dev/null @@ -1,284 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# GB2312 most frequently used character table -# -# Char to FreqOrder table , from hz6763 - -# 512 --> 0.79 -- 0.79 -# 1024 --> 0.92 -- 0.13 -# 2048 --> 0.98 -- 0.06 -# 6768 --> 1.00 -- 0.02 -# -# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79 -# Random Distribution Ration = 512 / (3755 - 512) = 0.157 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR - -GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9 - -GB2312_TABLE_SIZE = 3760 - -# fmt: off -GB2312_CHAR_TO_FREQ_ORDER = ( -1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, -2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, -2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409, - 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670, -1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820, -1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585, - 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566, -1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575, -2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853, -3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061, - 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155, -1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406, - 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816, -2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606, - 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023, -2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414, -1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513, -3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052, - 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570, -1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575, - 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250, -2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506, -1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26, -3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835, -1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686, -2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054, -1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894, - 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105, -3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403, -3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694, - 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873, -3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940, - 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121, -1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648, -3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992, -2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233, -1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157, - 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807, -1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094, -4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258, - 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478, -3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152, -3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909, - 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272, -1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221, -2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252, -1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301, -1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254, - 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070, -3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461, -3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360, -4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124, - 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535, -3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243, -1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713, -1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071, -4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442, - 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946, - 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257, -3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180, -1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427, - 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781, -1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724, -2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937, - 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943, - 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789, - 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552, -3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246, -4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451, -3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310, - 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860, -2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297, -2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780, -2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745, - 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936, -2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032, - 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657, - 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414, - 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976, -3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436, -2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254, -2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536, -1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238, - 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059, -2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741, - 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447, - 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601, -1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269, -1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894, - 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173, - 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994, -1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956, -2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437, -3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154, -2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240, -2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143, -2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634, -3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472, -1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541, -1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143, -2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312, -1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414, -3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754, -1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424, -1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302, -3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739, - 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004, -2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484, -1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739, -4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535, -1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641, -1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307, -3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573, -1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533, - 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965, - 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99, -1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280, - 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505, -1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012, -1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039, - 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982, -3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530, -4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392, -3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656, -2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220, -2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766, -1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535, -3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728, -2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338, -1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627, -1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885, - 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411, -2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671, -2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162, -3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774, -4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524, -3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346, - 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040, -3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188, -2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280, -1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131, - 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947, - 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970, -3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814, -4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557, -2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997, -1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972, -1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369, - 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376, -1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480, -3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610, - 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128, - 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769, -1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207, - 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392, -1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623, - 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782, -2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650, - 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478, -2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773, -2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007, -1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323, -1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598, -2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961, - 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302, -1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409, -1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683, -2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191, -2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616, -3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302, -1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774, -4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147, - 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731, - 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464, -3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377, -1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315, - 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557, -3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903, -1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060, -4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261, -1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092, -2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810, -1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708, - 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658, -1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871, -3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503, - 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229, -2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112, - 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504, -1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389, -1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27, -1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542, -3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861, -2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845, -3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700, -3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469, -3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582, - 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999, -2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274, - 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020, -2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601, - 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628, -1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31, - 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668, - 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778, -1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169, -3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667, -3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881, -1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276, -1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320, -3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751, -2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432, -2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772, -1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843, -3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116, - 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904, -4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652, -1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664, -2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770, -3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283, -3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626, -1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713, - 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333, - 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062, -2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555, - 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014, -1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510, - 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015, -1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459, -1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390, -1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238, -1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232, -1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624, - 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189, - 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512 -) -# fmt: on diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/ranged_response.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/ranged_response.py deleted file mode 100644 index f488776e6c7f3a58ce95375e043680b6c17257da..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/ranged_response.py +++ /dev/null @@ -1,188 +0,0 @@ -# Taken from https://gist.github.com/kevinastone/a6a62db57577b3f24e8a6865ed311463 -# Context: https://github.com/encode/starlette/pull/1090 -from __future__ import annotations - -import os -import re -import stat -from typing import NamedTuple -from urllib.parse import quote - -import aiofiles -from aiofiles.os import stat as aio_stat -from starlette.datastructures import Headers -from starlette.exceptions import HTTPException -from starlette.responses import Response, guess_type -from starlette.staticfiles import StaticFiles -from starlette.types import Receive, Scope, Send - -RANGE_REGEX = re.compile(r"^bytes=(?P\d+)-(?P\d*)$") - - -class ClosedRange(NamedTuple): - start: int - end: int - - def __len__(self) -> int: - return self.end - self.start + 1 - - def __bool__(self) -> bool: - return len(self) > 0 - - -class OpenRange(NamedTuple): - start: int - end: int | None = None - - def clamp(self, start: int, end: int) -> ClosedRange: - begin = max(self.start, start) - end = min(x for x in (self.end, end) if x) - - begin = min(begin, end) - end = max(begin, end) - - return ClosedRange(begin, end) - - -class RangedFileResponse(Response): - chunk_size = 4096 - - def __init__( - self, - path: str | os.PathLike, - range: OpenRange, - headers: dict[str, str] | None = None, - media_type: str | None = None, - filename: str | None = None, - stat_result: os.stat_result | None = None, - method: str | None = None, - ) -> None: - if aiofiles is None: - raise ModuleNotFoundError( - "'aiofiles' must be installed to use FileResponse" - ) - self.path = path - self.range = range - self.filename = filename - self.background = None - self.send_header_only = method is not None and method.upper() == "HEAD" - if media_type is None: - media_type = guess_type(filename or path)[0] or "text/plain" - self.media_type = media_type - self.init_headers(headers or {}) - if self.filename is not None: - content_disposition_filename = quote(self.filename) - if content_disposition_filename != self.filename: - content_disposition = ( - f"attachment; filename*=utf-8''{content_disposition_filename}" - ) - else: - content_disposition = f'attachment; filename="{self.filename}"' - self.headers.setdefault("content-disposition", content_disposition) - self.stat_result = stat_result - - def set_range_headers(self, range: ClosedRange) -> None: - assert self.stat_result - total_length = self.stat_result.st_size - content_length = len(range) - self.headers[ - "content-range" - ] = f"bytes {range.start}-{range.end}/{total_length}" - self.headers["content-length"] = str(content_length) - pass - - async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: - if self.stat_result is None: - try: - stat_result = await aio_stat(self.path) - self.stat_result = stat_result - except FileNotFoundError as fnfe: - raise RuntimeError( - f"File at path {self.path} does not exist." - ) from fnfe - else: - mode = stat_result.st_mode - if not stat.S_ISREG(mode): - raise RuntimeError(f"File at path {self.path} is not a file.") - - byte_range = self.range.clamp(0, self.stat_result.st_size) - self.set_range_headers(byte_range) - - async with aiofiles.open(self.path, mode="rb") as file: - await file.seek(byte_range.start) - await send( - { - "type": "http.response.start", - "status": 206, - "headers": self.raw_headers, - } - ) - if self.send_header_only: - await send( - {"type": "http.response.body", "body": b"", "more_body": False} - ) - else: - remaining_bytes = len(byte_range) - - if not byte_range: - await send( - {"type": "http.response.body", "body": b"", "more_body": False} - ) - return - - while remaining_bytes > 0: - chunk_size = min(self.chunk_size, remaining_bytes) - chunk = await file.read(chunk_size) - remaining_bytes -= len(chunk) - await send( - { - "type": "http.response.body", - "body": chunk, - "more_body": remaining_bytes > 0, - } - ) - - -class RangedStaticFiles(StaticFiles): - def file_response( - self, - full_path: str | os.PathLike, - stat_result: os.stat_result, - scope: Scope, - status_code: int = 200, - ) -> Response: - request_headers = Headers(scope=scope) - - if request_headers.get("range"): - response = self.ranged_file_response( - full_path, stat_result=stat_result, scope=scope - ) - else: - response = super().file_response( - full_path, stat_result=stat_result, scope=scope, status_code=status_code - ) - response.headers["accept-ranges"] = "bytes" - return response - - def ranged_file_response( - self, - full_path: str | os.PathLike, - stat_result: os.stat_result, - scope: Scope, - ) -> Response: - method = scope["method"] - request_headers = Headers(scope=scope) - - range_header = request_headers["range"] - - match = RANGE_REGEX.search(range_header) - if not match: - raise HTTPException(400) - - start, end = match.group("start"), match.group("end") - - range = OpenRange(int(start), int(end) if end else None) - - return RangedFileResponse( - full_path, range, stat_result=stat_result, method=method - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/apply.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/apply.py deleted file mode 100644 index e5683359c2fb95a99d17aea5d27963d48eb44136..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/apply.py +++ /dev/null @@ -1,1833 +0,0 @@ -from __future__ import annotations - -import abc -from collections import defaultdict -from functools import partial -import inspect -from typing import ( - TYPE_CHECKING, - Any, - Callable, - DefaultDict, - Literal, - cast, -) -import warnings - -import numpy as np - -from pandas._config import option_context - -from pandas._libs import lib -from pandas._typing import ( - AggFuncType, - AggFuncTypeBase, - AggFuncTypeDict, - AggObjType, - Axis, - AxisInt, - NDFrameT, - npt, -) -from pandas.errors import SpecificationError -from pandas.util._decorators import cache_readonly -from pandas.util._exceptions import find_stack_level - -from pandas.core.dtypes.cast import is_nested_object -from pandas.core.dtypes.common import ( - is_dict_like, - is_list_like, - is_sequence, -) -from pandas.core.dtypes.dtypes import ( - CategoricalDtype, - ExtensionDtype, -) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCNDFrame, - ABCSeries, -) - -import pandas.core.common as com -from pandas.core.construction import ensure_wrapped_if_datetimelike - -if TYPE_CHECKING: - from collections.abc import ( - Hashable, - Iterable, - Iterator, - Sequence, - ) - - from pandas import ( - DataFrame, - Index, - Series, - ) - from pandas.core.groupby import GroupBy - from pandas.core.resample import Resampler - from pandas.core.window.rolling import BaseWindow - - -ResType = dict[int, Any] - - -def frame_apply( - obj: DataFrame, - func: AggFuncType, - axis: Axis = 0, - raw: bool = False, - result_type: str | None = None, - by_row: Literal[False, "compat"] = "compat", - args=None, - kwargs=None, -) -> FrameApply: - """construct and return a row or column based frame apply object""" - axis = obj._get_axis_number(axis) - klass: type[FrameApply] - if axis == 0: - klass = FrameRowApply - elif axis == 1: - klass = FrameColumnApply - - _, func, _, _ = reconstruct_func(func, **kwargs) - assert func is not None - - return klass( - obj, - func, - raw=raw, - result_type=result_type, - by_row=by_row, - args=args, - kwargs=kwargs, - ) - - -class Apply(metaclass=abc.ABCMeta): - axis: AxisInt - - def __init__( - self, - obj: AggObjType, - func: AggFuncType, - raw: bool, - result_type: str | None, - *, - by_row: Literal[False, "compat", "_compat"] = "compat", - args, - kwargs, - ) -> None: - self.obj = obj - self.raw = raw - - assert by_row is False or by_row in ["compat", "_compat"] - self.by_row = by_row - - self.args = args or () - self.kwargs = kwargs or {} - - if result_type not in [None, "reduce", "broadcast", "expand"]: - raise ValueError( - "invalid value for result_type, must be one " - "of {None, 'reduce', 'broadcast', 'expand'}" - ) - - self.result_type = result_type - - self.func = func - - @abc.abstractmethod - def apply(self) -> DataFrame | Series: - pass - - @abc.abstractmethod - def agg_or_apply_list_like( - self, op_name: Literal["agg", "apply"] - ) -> DataFrame | Series: - pass - - @abc.abstractmethod - def agg_or_apply_dict_like( - self, op_name: Literal["agg", "apply"] - ) -> DataFrame | Series: - pass - - def agg(self) -> DataFrame | Series | None: - """ - Provide an implementation for the aggregators. - - Returns - ------- - Result of aggregation, or None if agg cannot be performed by - this method. - """ - obj = self.obj - func = self.func - args = self.args - kwargs = self.kwargs - - if isinstance(func, str): - return self.apply_str() - - if is_dict_like(func): - return self.agg_dict_like() - elif is_list_like(func): - # we require a list, but not a 'str' - return self.agg_list_like() - - if callable(func): - f = com.get_cython_func(func) - if f and not args and not kwargs: - warn_alias_replacement(obj, func, f) - return getattr(obj, f)() - - # caller can react - return None - - def transform(self) -> DataFrame | Series: - """ - Transform a DataFrame or Series. - - Returns - ------- - DataFrame or Series - Result of applying ``func`` along the given axis of the - Series or DataFrame. - - Raises - ------ - ValueError - If the transform function fails or does not transform. - """ - obj = self.obj - func = self.func - axis = self.axis - args = self.args - kwargs = self.kwargs - - is_series = obj.ndim == 1 - - if obj._get_axis_number(axis) == 1: - assert not is_series - return obj.T.transform(func, 0, *args, **kwargs).T - - if is_list_like(func) and not is_dict_like(func): - func = cast(list[AggFuncTypeBase], func) - # Convert func equivalent dict - if is_series: - func = {com.get_callable_name(v) or v: v for v in func} - else: - func = {col: func for col in obj} - - if is_dict_like(func): - func = cast(AggFuncTypeDict, func) - return self.transform_dict_like(func) - - # func is either str or callable - func = cast(AggFuncTypeBase, func) - try: - result = self.transform_str_or_callable(func) - except TypeError: - raise - except Exception as err: - raise ValueError("Transform function failed") from err - - # Functions that transform may return empty Series/DataFrame - # when the dtype is not appropriate - if ( - isinstance(result, (ABCSeries, ABCDataFrame)) - and result.empty - and not obj.empty - ): - raise ValueError("Transform function failed") - # error: Argument 1 to "__get__" of "AxisProperty" has incompatible type - # "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy, - # DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame, - # Series]" - if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( - obj.index # type: ignore[arg-type] - ): - raise ValueError("Function did not transform") - - return result - - def transform_dict_like(self, func): - """ - Compute transform in the case of a dict-like func - """ - from pandas.core.reshape.concat import concat - - obj = self.obj - args = self.args - kwargs = self.kwargs - - # transform is currently only for Series/DataFrame - assert isinstance(obj, ABCNDFrame) - - if len(func) == 0: - raise ValueError("No transform functions were provided") - - func = self.normalize_dictlike_arg("transform", obj, func) - - results: dict[Hashable, DataFrame | Series] = {} - for name, how in func.items(): - colg = obj._gotitem(name, ndim=1) - results[name] = colg.transform(how, 0, *args, **kwargs) - return concat(results, axis=1) - - def transform_str_or_callable(self, func) -> DataFrame | Series: - """ - Compute transform in the case of a string or callable func - """ - obj = self.obj - args = self.args - kwargs = self.kwargs - - if isinstance(func, str): - return self._apply_str(obj, func, *args, **kwargs) - - if not args and not kwargs: - f = com.get_cython_func(func) - if f: - warn_alias_replacement(obj, func, f) - return getattr(obj, f)() - - # Two possible ways to use a UDF - apply or call directly - try: - return obj.apply(func, args=args, **kwargs) - except Exception: - return func(obj, *args, **kwargs) - - def agg_list_like(self) -> DataFrame | Series: - """ - Compute aggregation in the case of a list-like argument. - - Returns - ------- - Result of aggregation. - """ - return self.agg_or_apply_list_like(op_name="agg") - - def compute_list_like( - self, - op_name: Literal["agg", "apply"], - selected_obj: Series | DataFrame, - kwargs: dict[str, Any], - ) -> tuple[list[Hashable], list[Any]]: - """ - Compute agg/apply results for like-like input. - - Parameters - ---------- - op_name : {"agg", "apply"} - Operation being performed. - selected_obj : Series or DataFrame - Data to perform operation on. - kwargs : dict - Keyword arguments to pass to the functions. - - Returns - ------- - keys : list[hashable] - Index labels for result. - results : list - Data for result. When aggregating with a Series, this can contain any - Python objects. - """ - func = cast(list[AggFuncTypeBase], self.func) - obj = self.obj - - results = [] - keys = [] - - # degenerate case - if selected_obj.ndim == 1: - for a in func: - colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj) - args = ( - [self.axis, *self.args] - if include_axis(op_name, colg) - else self.args - ) - new_res = getattr(colg, op_name)(a, *args, **kwargs) - results.append(new_res) - - # make sure we find a good name - name = com.get_callable_name(a) or a - keys.append(name) - - else: - indices = [] - for index, col in enumerate(selected_obj): - colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index]) - args = ( - [self.axis, *self.args] - if include_axis(op_name, colg) - else self.args - ) - new_res = getattr(colg, op_name)(func, *args, **kwargs) - results.append(new_res) - indices.append(index) - keys = selected_obj.columns.take(indices) - - return keys, results - - def wrap_results_list_like( - self, keys: list[Hashable], results: list[Series | DataFrame] - ): - from pandas.core.reshape.concat import concat - - obj = self.obj - - try: - return concat(results, keys=keys, axis=1, sort=False) - except TypeError as err: - # we are concatting non-NDFrame objects, - # e.g. a list of scalars - from pandas import Series - - result = Series(results, index=keys, name=obj.name) - if is_nested_object(result): - raise ValueError( - "cannot combine transform and aggregation operations" - ) from err - return result - - def agg_dict_like(self) -> DataFrame | Series: - """ - Compute aggregation in the case of a dict-like argument. - - Returns - ------- - Result of aggregation. - """ - return self.agg_or_apply_dict_like(op_name="agg") - - def compute_dict_like( - self, - op_name: Literal["agg", "apply"], - selected_obj: Series | DataFrame, - selection: Hashable | Sequence[Hashable], - kwargs: dict[str, Any], - ) -> tuple[list[Hashable], list[Any]]: - """ - Compute agg/apply results for dict-like input. - - Parameters - ---------- - op_name : {"agg", "apply"} - Operation being performed. - selected_obj : Series or DataFrame - Data to perform operation on. - selection : hashable or sequence of hashables - Used by GroupBy, Window, and Resample if selection is applied to the object. - kwargs : dict - Keyword arguments to pass to the functions. - - Returns - ------- - keys : list[hashable] - Index labels for result. - results : list - Data for result. When aggregating with a Series, this can contain any - Python object. - """ - from pandas.core.groupby.generic import ( - DataFrameGroupBy, - SeriesGroupBy, - ) - - obj = self.obj - is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) - func = cast(AggFuncTypeDict, self.func) - func = self.normalize_dictlike_arg(op_name, selected_obj, func) - - is_non_unique_col = ( - selected_obj.ndim == 2 - and selected_obj.columns.nunique() < len(selected_obj.columns) - ) - - if selected_obj.ndim == 1: - # key only used for output - colg = obj._gotitem(selection, ndim=1) - results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()] - keys = list(func.keys()) - elif not is_groupby and is_non_unique_col: - # key used for column selection and output - # GH#51099 - results = [] - keys = [] - for key, how in func.items(): - indices = selected_obj.columns.get_indexer_for([key]) - labels = selected_obj.columns.take(indices) - label_to_indices = defaultdict(list) - for index, label in zip(indices, labels): - label_to_indices[label].append(index) - - key_data = [ - getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs) - for label, indices in label_to_indices.items() - for indice in indices - ] - - keys += [key] * len(key_data) - results += key_data - else: - # key used for column selection and output - results = [ - getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs) - for key, how in func.items() - ] - keys = list(func.keys()) - - return keys, results - - def wrap_results_dict_like( - self, - selected_obj: Series | DataFrame, - result_index: list[Hashable], - result_data: list, - ): - from pandas import Index - from pandas.core.reshape.concat import concat - - obj = self.obj - - # Avoid making two isinstance calls in all and any below - is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data] - - if all(is_ndframe): - results = dict(zip(result_index, result_data)) - keys_to_use: Iterable[Hashable] - keys_to_use = [k for k in result_index if not results[k].empty] - # Have to check, if at least one DataFrame is not empty. - keys_to_use = keys_to_use if keys_to_use != [] else result_index - if selected_obj.ndim == 2: - # keys are columns, so we can preserve names - ktu = Index(keys_to_use) - ktu._set_names(selected_obj.columns.names) - keys_to_use = ktu - - axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1 - result = concat( - {k: results[k] for k in keys_to_use}, - axis=axis, - keys=keys_to_use, - ) - elif any(is_ndframe): - # There is a mix of NDFrames and scalars - raise ValueError( - "cannot perform both aggregation " - "and transformation operations " - "simultaneously" - ) - else: - from pandas import Series - - # we have a list of scalars - # GH 36212 use name only if obj is a series - if obj.ndim == 1: - obj = cast("Series", obj) - name = obj.name - else: - name = None - - result = Series(result_data, index=result_index, name=name) - - return result - - def apply_str(self) -> DataFrame | Series: - """ - Compute apply in case of a string. - - Returns - ------- - result: Series or DataFrame - """ - # Caller is responsible for checking isinstance(self.f, str) - func = cast(str, self.func) - - obj = self.obj - - from pandas.core.groupby.generic import ( - DataFrameGroupBy, - SeriesGroupBy, - ) - - # Support for `frame.transform('method')` - # Some methods (shift, etc.) require the axis argument, others - # don't, so inspect and insert if necessary. - method = getattr(obj, func, None) - if callable(method): - sig = inspect.getfullargspec(method) - arg_names = (*sig.args, *sig.kwonlyargs) - if self.axis != 0 and ( - "axis" not in arg_names or func in ("corrwith", "skew") - ): - raise ValueError(f"Operation {func} does not support axis=1") - if "axis" in arg_names: - if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)): - # Try to avoid FutureWarning for deprecated axis keyword; - # If self.axis matches the axis we would get by not passing - # axis, we safely exclude the keyword. - - default_axis = 0 - if func in ["idxmax", "idxmin"]: - # DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis, - # whereas other axis keywords default to 0 - default_axis = self.obj.axis - - if default_axis != self.axis: - self.kwargs["axis"] = self.axis - else: - self.kwargs["axis"] = self.axis - return self._apply_str(obj, func, *self.args, **self.kwargs) - - def apply_list_or_dict_like(self) -> DataFrame | Series: - """ - Compute apply in case of a list-like or dict-like. - - Returns - ------- - result: Series, DataFrame, or None - Result when self.func is a list-like or dict-like, None otherwise. - """ - if self.axis == 1 and isinstance(self.obj, ABCDataFrame): - return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T - - func = self.func - kwargs = self.kwargs - - if is_dict_like(func): - result = self.agg_or_apply_dict_like(op_name="apply") - else: - result = self.agg_or_apply_list_like(op_name="apply") - - result = reconstruct_and_relabel_result(result, func, **kwargs) - - return result - - def normalize_dictlike_arg( - self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict - ) -> AggFuncTypeDict: - """ - Handler for dict-like argument. - - Ensures that necessary columns exist if obj is a DataFrame, and - that a nested renamer is not passed. Also normalizes to all lists - when values consists of a mix of list and non-lists. - """ - assert how in ("apply", "agg", "transform") - - # Can't use func.values(); wouldn't work for a Series - if ( - how == "agg" - and isinstance(obj, ABCSeries) - and any(is_list_like(v) for _, v in func.items()) - ) or (any(is_dict_like(v) for _, v in func.items())): - # GH 15931 - deprecation of renaming keys - raise SpecificationError("nested renamer is not supported") - - if obj.ndim != 1: - # Check for missing columns on a frame - from pandas import Index - - cols = Index(list(func.keys())).difference(obj.columns, sort=True) - if len(cols) > 0: - raise KeyError(f"Column(s) {list(cols)} do not exist") - - aggregator_types = (list, tuple, dict) - - # if we have a dict of any non-scalars - # eg. {'A' : ['mean']}, normalize all to - # be list-likes - # Cannot use func.values() because arg may be a Series - if any(isinstance(x, aggregator_types) for _, x in func.items()): - new_func: AggFuncTypeDict = {} - for k, v in func.items(): - if not isinstance(v, aggregator_types): - new_func[k] = [v] - else: - new_func[k] = v - func = new_func - return func - - def _apply_str(self, obj, func: str, *args, **kwargs): - """ - if arg is a string, then try to operate on it: - - try to find a function (or attribute) on obj - - try to find a numpy function - - raise - """ - assert isinstance(func, str) - - if hasattr(obj, func): - f = getattr(obj, func) - if callable(f): - return f(*args, **kwargs) - - # people may aggregate on a non-callable attribute - # but don't let them think they can pass args to it - assert len(args) == 0 - assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0 - return f - elif hasattr(np, func) and hasattr(obj, "__array__"): - # in particular exclude Window - f = getattr(np, func) - return f(obj, *args, **kwargs) - else: - msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object" - raise AttributeError(msg) - - -class NDFrameApply(Apply): - """ - Methods shared by FrameApply and SeriesApply but - not GroupByApply or ResamplerWindowApply - """ - - obj: DataFrame | Series - - @property - def index(self) -> Index: - return self.obj.index - - @property - def agg_axis(self) -> Index: - return self.obj._get_agg_axis(self.axis) - - def agg_or_apply_list_like( - self, op_name: Literal["agg", "apply"] - ) -> DataFrame | Series: - obj = self.obj - kwargs = self.kwargs - - if op_name == "apply": - if isinstance(self, FrameApply): - by_row = self.by_row - - elif isinstance(self, SeriesApply): - by_row = "_compat" if self.by_row else False - else: - by_row = False - kwargs = {**kwargs, "by_row": by_row} - - if getattr(obj, "axis", 0) == 1: - raise NotImplementedError("axis other than 0 is not supported") - - keys, results = self.compute_list_like(op_name, obj, kwargs) - result = self.wrap_results_list_like(keys, results) - return result - - def agg_or_apply_dict_like( - self, op_name: Literal["agg", "apply"] - ) -> DataFrame | Series: - assert op_name in ["agg", "apply"] - obj = self.obj - - kwargs = {} - if op_name == "apply": - by_row = "_compat" if self.by_row else False - kwargs.update({"by_row": by_row}) - - if getattr(obj, "axis", 0) == 1: - raise NotImplementedError("axis other than 0 is not supported") - - selection = None - result_index, result_data = self.compute_dict_like( - op_name, obj, selection, kwargs - ) - result = self.wrap_results_dict_like(obj, result_index, result_data) - return result - - -class FrameApply(NDFrameApply): - obj: DataFrame - - def __init__( - self, - obj: AggObjType, - func: AggFuncType, - raw: bool, - result_type: str | None, - *, - by_row: Literal[False, "compat"] = False, - args, - kwargs, - ) -> None: - if by_row is not False and by_row != "compat": - raise ValueError(f"by_row={by_row} not allowed") - super().__init__( - obj, func, raw, result_type, by_row=by_row, args=args, kwargs=kwargs - ) - - # --------------------------------------------------------------- - # Abstract Methods - - @property - @abc.abstractmethod - def result_index(self) -> Index: - pass - - @property - @abc.abstractmethod - def result_columns(self) -> Index: - pass - - @property - @abc.abstractmethod - def series_generator(self) -> Iterator[Series]: - pass - - @abc.abstractmethod - def wrap_results_for_axis( - self, results: ResType, res_index: Index - ) -> DataFrame | Series: - pass - - # --------------------------------------------------------------- - - @property - def res_columns(self) -> Index: - return self.result_columns - - @property - def columns(self) -> Index: - return self.obj.columns - - @cache_readonly - def values(self): - return self.obj.values - - def apply(self) -> DataFrame | Series: - """compute the results""" - # dispatch to handle list-like or dict-like - if is_list_like(self.func): - return self.apply_list_or_dict_like() - - # all empty - if len(self.columns) == 0 and len(self.index) == 0: - return self.apply_empty_result() - - # string dispatch - if isinstance(self.func, str): - return self.apply_str() - - # ufunc - elif isinstance(self.func, np.ufunc): - with np.errstate(all="ignore"): - results = self.obj._mgr.apply("apply", func=self.func) - # _constructor will retain self.index and self.columns - return self.obj._constructor_from_mgr(results, axes=results.axes) - - # broadcasting - if self.result_type == "broadcast": - return self.apply_broadcast(self.obj) - - # one axis empty - elif not all(self.obj.shape): - return self.apply_empty_result() - - # raw - elif self.raw: - return self.apply_raw() - - return self.apply_standard() - - def agg(self): - obj = self.obj - axis = self.axis - - # TODO: Avoid having to change state - self.obj = self.obj if self.axis == 0 else self.obj.T - self.axis = 0 - - result = None - try: - result = super().agg() - finally: - self.obj = obj - self.axis = axis - - if axis == 1: - result = result.T if result is not None else result - - if result is None: - result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs) - - return result - - def apply_empty_result(self): - """ - we have an empty result; at least 1 axis is 0 - - we will try to apply the function to an empty - series in order to see if this is a reduction function - """ - assert callable(self.func) - - # we are not asked to reduce or infer reduction - # so just return a copy of the existing object - if self.result_type not in ["reduce", None]: - return self.obj.copy() - - # we may need to infer - should_reduce = self.result_type == "reduce" - - from pandas import Series - - if not should_reduce: - try: - if self.axis == 0: - r = self.func( - Series([], dtype=np.float64), *self.args, **self.kwargs - ) - else: - r = self.func( - Series(index=self.columns, dtype=np.float64), - *self.args, - **self.kwargs, - ) - except Exception: - pass - else: - should_reduce = not isinstance(r, Series) - - if should_reduce: - if len(self.agg_axis): - r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs) - else: - r = np.nan - - return self.obj._constructor_sliced(r, index=self.agg_axis) - else: - return self.obj.copy() - - def apply_raw(self): - """apply to the values as a numpy array""" - - def wrap_function(func): - """ - Wrap user supplied function to work around numpy issue. - - see https://github.com/numpy/numpy/issues/8352 - """ - - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - if isinstance(result, str): - result = np.array(result, dtype=object) - return result - - return wrapper - - result = np.apply_along_axis(wrap_function(self.func), self.axis, self.values) - - # TODO: mixed type case - if result.ndim == 2: - return self.obj._constructor(result, index=self.index, columns=self.columns) - else: - return self.obj._constructor_sliced(result, index=self.agg_axis) - - def apply_broadcast(self, target: DataFrame) -> DataFrame: - assert callable(self.func) - - result_values = np.empty_like(target.values) - - # axis which we want to compare compliance - result_compare = target.shape[0] - - for i, col in enumerate(target.columns): - res = self.func(target[col], *self.args, **self.kwargs) - ares = np.asarray(res).ndim - - # must be a scalar or 1d - if ares > 1: - raise ValueError("too many dims to broadcast") - if ares == 1: - # must match return dim - if result_compare != len(res): - raise ValueError("cannot broadcast result") - - result_values[:, i] = res - - # we *always* preserve the original index / columns - result = self.obj._constructor( - result_values, index=target.index, columns=target.columns - ) - return result - - def apply_standard(self): - results, res_index = self.apply_series_generator() - - # wrap results - return self.wrap_results(results, res_index) - - def apply_series_generator(self) -> tuple[ResType, Index]: - assert callable(self.func) - - series_gen = self.series_generator - res_index = self.result_index - - results = {} - - with option_context("mode.chained_assignment", None): - for i, v in enumerate(series_gen): - # ignore SettingWithCopy here in case the user mutates - results[i] = self.func(v, *self.args, **self.kwargs) - if isinstance(results[i], ABCSeries): - # If we have a view on v, we need to make a copy because - # series_generator will swap out the underlying data - results[i] = results[i].copy(deep=False) - - return results, res_index - - def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series: - from pandas import Series - - # see if we can infer the results - if len(results) > 0 and 0 in results and is_sequence(results[0]): - return self.wrap_results_for_axis(results, res_index) - - # dict of scalars - - # the default dtype of an empty Series is `object`, but this - # code can be hit by df.mean() where the result should have dtype - # float64 even if it's an empty Series. - constructor_sliced = self.obj._constructor_sliced - if len(results) == 0 and constructor_sliced is Series: - result = constructor_sliced(results, dtype=np.float64) - else: - result = constructor_sliced(results) - result.index = res_index - - return result - - def apply_str(self) -> DataFrame | Series: - # Caller is responsible for checking isinstance(self.func, str) - # TODO: GH#39993 - Avoid special-casing by replacing with lambda - if self.func == "size": - # Special-cased because DataFrame.size returns a single scalar - obj = self.obj - value = obj.shape[self.axis] - return obj._constructor_sliced(value, index=self.agg_axis) - return super().apply_str() - - -class FrameRowApply(FrameApply): - axis: AxisInt = 0 - - @property - def series_generator(self): - return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) - - @property - def result_index(self) -> Index: - return self.columns - - @property - def result_columns(self) -> Index: - return self.index - - def wrap_results_for_axis( - self, results: ResType, res_index: Index - ) -> DataFrame | Series: - """return the results for the rows""" - - if self.result_type == "reduce": - # e.g. test_apply_dict GH#8735 - res = self.obj._constructor_sliced(results) - res.index = res_index - return res - - elif self.result_type is None and all( - isinstance(x, dict) for x in results.values() - ): - # Our operation was a to_dict op e.g. - # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544 - res = self.obj._constructor_sliced(results) - res.index = res_index - return res - - try: - result = self.obj._constructor(data=results) - except ValueError as err: - if "All arrays must be of the same length" in str(err): - # e.g. result = [[2, 3], [1.5], ['foo', 'bar']] - # see test_agg_listlike_result GH#29587 - res = self.obj._constructor_sliced(results) - res.index = res_index - return res - else: - raise - - if not isinstance(results[0], ABCSeries): - if len(result.index) == len(self.res_columns): - result.index = self.res_columns - - if len(result.columns) == len(res_index): - result.columns = res_index - - return result - - -class FrameColumnApply(FrameApply): - axis: AxisInt = 1 - - def apply_broadcast(self, target: DataFrame) -> DataFrame: - result = super().apply_broadcast(target.T) - return result.T - - @property - def series_generator(self): - values = self.values - values = ensure_wrapped_if_datetimelike(values) - assert len(values) > 0 - - # We create one Series object, and will swap out the data inside - # of it. Kids: don't do this at home. - ser = self.obj._ixs(0, axis=0) - mgr = ser._mgr - - if isinstance(ser.dtype, ExtensionDtype): - # values will be incorrect for this block - # TODO(EA2D): special case would be unnecessary with 2D EAs - obj = self.obj - for i in range(len(obj)): - yield obj._ixs(i, axis=0) - - else: - for arr, name in zip(values, self.index): - # GH#35462 re-pin mgr in case setitem changed it - ser._mgr = mgr - mgr.set_values(arr) - object.__setattr__(ser, "_name", name) - yield ser - - @property - def result_index(self) -> Index: - return self.index - - @property - def result_columns(self) -> Index: - return self.columns - - def wrap_results_for_axis( - self, results: ResType, res_index: Index - ) -> DataFrame | Series: - """return the results for the columns""" - result: DataFrame | Series - - # we have requested to expand - if self.result_type == "expand": - result = self.infer_to_same_shape(results, res_index) - - # we have a non-series and don't want inference - elif not isinstance(results[0], ABCSeries): - result = self.obj._constructor_sliced(results) - result.index = res_index - - # we may want to infer results - else: - result = self.infer_to_same_shape(results, res_index) - - return result - - def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame: - """infer the results to the same shape as the input object""" - result = self.obj._constructor(data=results) - result = result.T - - # set the index - result.index = res_index - - # infer dtypes - result = result.infer_objects(copy=False) - - return result - - -class SeriesApply(NDFrameApply): - obj: Series - axis: AxisInt = 0 - by_row: Literal[False, "compat", "_compat"] # only relevant for apply() - - def __init__( - self, - obj: Series, - func: AggFuncType, - *, - convert_dtype: bool | lib.NoDefault = lib.no_default, - by_row: Literal[False, "compat", "_compat"] = "compat", - args, - kwargs, - ) -> None: - if convert_dtype is lib.no_default: - convert_dtype = True - else: - warnings.warn( - "the convert_dtype parameter is deprecated and will be removed in a " - "future version. Do ``ser.astype(object).apply()`` " - "instead if you want ``convert_dtype=False``.", - FutureWarning, - stacklevel=find_stack_level(), - ) - self.convert_dtype = convert_dtype - - super().__init__( - obj, - func, - raw=False, - result_type=None, - by_row=by_row, - args=args, - kwargs=kwargs, - ) - - def apply(self) -> DataFrame | Series: - obj = self.obj - - if len(obj) == 0: - return self.apply_empty_result() - - # dispatch to handle list-like or dict-like - if is_list_like(self.func): - return self.apply_list_or_dict_like() - - if isinstance(self.func, str): - # if we are a string, try to dispatch - return self.apply_str() - - if self.by_row == "_compat": - return self.apply_compat() - - # self.func is Callable - return self.apply_standard() - - def agg(self): - result = super().agg() - if result is None: - obj = self.obj - func = self.func - # string, list-like, and dict-like are entirely handled in super - assert callable(func) - - # GH53325: The setup below is just to keep current behavior while emitting a - # deprecation message. In the future this will all be replaced with a simple - # `result = f(self.obj, *self.args, **self.kwargs)`. - try: - result = obj.apply(func, args=self.args, **self.kwargs) - except (ValueError, AttributeError, TypeError): - result = func(obj, *self.args, **self.kwargs) - else: - msg = ( - f"using {func} in {type(obj).__name__}.agg cannot aggregate and " - f"has been deprecated. Use {type(obj).__name__}.transform to " - f"keep behavior unchanged." - ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) - - return result - - def apply_empty_result(self) -> Series: - obj = self.obj - return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__( - obj, method="apply" - ) - - def apply_compat(self): - """compat apply method for funcs in listlikes and dictlikes. - - Used for each callable when giving listlikes and dictlikes of callables to - apply. Needed for compatibility with Pandas < v2.1. - - .. versionadded:: 2.1.0 - """ - obj = self.obj - func = self.func - - if callable(func): - f = com.get_cython_func(func) - if f and not self.args and not self.kwargs: - return obj.apply(func, by_row=False) - - try: - result = obj.apply(func, by_row="compat") - except (ValueError, AttributeError, TypeError): - result = obj.apply(func, by_row=False) - return result - - def apply_standard(self) -> DataFrame | Series: - # caller is responsible for ensuring that f is Callable - func = cast(Callable, self.func) - obj = self.obj - - if isinstance(func, np.ufunc): - with np.errstate(all="ignore"): - return func(obj, *self.args, **self.kwargs) - elif not self.by_row: - return func(obj, *self.args, **self.kwargs) - - if self.args or self.kwargs: - # _map_values does not support args/kwargs - def curried(x): - return func(x, *self.args, **self.kwargs) - - else: - curried = func - - # row-wise access - # apply doesn't have a `na_action` keyword and for backward compat reasons - # we need to give `na_action="ignore"` for categorical data. - # TODO: remove the `na_action="ignore"` when that default has been changed in - # Categorical (GH51645). - action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None - mapped = obj._map_values( - mapper=curried, na_action=action, convert=self.convert_dtype - ) - - if len(mapped) and isinstance(mapped[0], ABCSeries): - # GH#43986 Need to do list(mapped) in order to get treated as nested - # See also GH#25959 regarding EA support - return obj._constructor_expanddim(list(mapped), index=obj.index) - else: - return obj._constructor(mapped, index=obj.index).__finalize__( - obj, method="apply" - ) - - -class GroupByApply(Apply): - obj: GroupBy | Resampler | BaseWindow - - def __init__( - self, - obj: GroupBy[NDFrameT], - func: AggFuncType, - *, - args, - kwargs, - ) -> None: - kwargs = kwargs.copy() - self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0)) - super().__init__( - obj, - func, - raw=False, - result_type=None, - args=args, - kwargs=kwargs, - ) - - def apply(self): - raise NotImplementedError - - def transform(self): - raise NotImplementedError - - def agg_or_apply_list_like( - self, op_name: Literal["agg", "apply"] - ) -> DataFrame | Series: - obj = self.obj - kwargs = self.kwargs - if op_name == "apply": - kwargs = {**kwargs, "by_row": False} - - if getattr(obj, "axis", 0) == 1: - raise NotImplementedError("axis other than 0 is not supported") - - if obj._selected_obj.ndim == 1: - # For SeriesGroupBy this matches _obj_with_exclusions - selected_obj = obj._selected_obj - else: - selected_obj = obj._obj_with_exclusions - - # Only set as_index=True on groupby objects, not Window or Resample - # that inherit from this class. - with com.temp_setattr( - obj, "as_index", True, condition=hasattr(obj, "as_index") - ): - keys, results = self.compute_list_like(op_name, selected_obj, kwargs) - result = self.wrap_results_list_like(keys, results) - return result - - def agg_or_apply_dict_like( - self, op_name: Literal["agg", "apply"] - ) -> DataFrame | Series: - from pandas.core.groupby.generic import ( - DataFrameGroupBy, - SeriesGroupBy, - ) - - assert op_name in ["agg", "apply"] - - obj = self.obj - kwargs = {} - if op_name == "apply": - by_row = "_compat" if self.by_row else False - kwargs.update({"by_row": by_row}) - - if getattr(obj, "axis", 0) == 1: - raise NotImplementedError("axis other than 0 is not supported") - - selected_obj = obj._selected_obj - selection = obj._selection - - is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) - - # Numba Groupby engine/engine-kwargs passthrough - if is_groupby: - engine = self.kwargs.get("engine", None) - engine_kwargs = self.kwargs.get("engine_kwargs", None) - kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs}) - - with com.temp_setattr( - obj, "as_index", True, condition=hasattr(obj, "as_index") - ): - result_index, result_data = self.compute_dict_like( - op_name, selected_obj, selection, kwargs - ) - result = self.wrap_results_dict_like(selected_obj, result_index, result_data) - return result - - -class ResamplerWindowApply(GroupByApply): - axis: AxisInt = 0 - obj: Resampler | BaseWindow - - def __init__( - self, - obj: Resampler | BaseWindow, - func: AggFuncType, - *, - args, - kwargs, - ) -> None: - super(GroupByApply, self).__init__( - obj, - func, - raw=False, - result_type=None, - args=args, - kwargs=kwargs, - ) - - def apply(self): - raise NotImplementedError - - def transform(self): - raise NotImplementedError - - -def reconstruct_func( - func: AggFuncType | None, **kwargs -) -> tuple[bool, AggFuncType, list[str] | None, npt.NDArray[np.intp] | None]: - """ - This is the internal function to reconstruct func given if there is relabeling - or not and also normalize the keyword to get new order of columns. - - If named aggregation is applied, `func` will be None, and kwargs contains the - column and aggregation function information to be parsed; - If named aggregation is not applied, `func` is either string (e.g. 'min') or - Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name - and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) - - If relabeling is True, will return relabeling, reconstructed func, column - names, and the reconstructed order of columns. - If relabeling is False, the columns and order will be None. - - Parameters - ---------- - func: agg function (e.g. 'min' or Callable) or list of agg functions - (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). - **kwargs: dict, kwargs used in is_multi_agg_with_relabel and - normalize_keyword_aggregation function for relabelling - - Returns - ------- - relabelling: bool, if there is relabelling or not - func: normalized and mangled func - columns: list of column names - order: array of columns indices - - Examples - -------- - >>> reconstruct_func(None, **{"foo": ("col", "min")}) - (True, defaultdict(, {'col': ['min']}), ('foo',), array([0])) - - >>> reconstruct_func("min") - (False, 'min', None, None) - """ - relabeling = func is None and is_multi_agg_with_relabel(**kwargs) - columns: list[str] | None = None - order: npt.NDArray[np.intp] | None = None - - if not relabeling: - if isinstance(func, list) and len(func) > len(set(func)): - # GH 28426 will raise error if duplicated function names are used and - # there is no reassigned name - raise SpecificationError( - "Function names must be unique if there is no new column names " - "assigned" - ) - if func is None: - # nicer error message - raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") - - if relabeling: - func, columns, order = normalize_keyword_aggregation(kwargs) - assert func is not None - - return relabeling, func, columns, order - - -def is_multi_agg_with_relabel(**kwargs) -> bool: - """ - Check whether kwargs passed to .agg look like multi-agg with relabeling. - - Parameters - ---------- - **kwargs : dict - - Returns - ------- - bool - - Examples - -------- - >>> is_multi_agg_with_relabel(a="max") - False - >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min")) - True - >>> is_multi_agg_with_relabel() - False - """ - return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and ( - len(kwargs) > 0 - ) - - -def normalize_keyword_aggregation( - kwargs: dict, -) -> tuple[dict, list[str], npt.NDArray[np.intp]]: - """ - Normalize user-provided "named aggregation" kwargs. - Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs - to the old Dict[str, List[scalar]]]. - - Parameters - ---------- - kwargs : dict - - Returns - ------- - aggspec : dict - The transformed kwargs. - columns : List[str] - The user-provided keys. - col_idx_order : List[int] - List of columns indices. - - Examples - -------- - >>> normalize_keyword_aggregation({"output": ("input", "sum")}) - (defaultdict(, {'input': ['sum']}), ('output',), array([0])) - """ - from pandas.core.indexes.base import Index - - # Normalize the aggregation functions as Mapping[column, List[func]], - # process normally, then fixup the names. - # TODO: aggspec type: typing.Dict[str, List[AggScalar]] - # May be hitting https://github.com/python/mypy/issues/5958 - # saying it doesn't have an attribute __name__ - aggspec: DefaultDict = defaultdict(list) - order = [] - columns, pairs = list(zip(*kwargs.items())) - - for column, aggfunc in pairs: - aggspec[column].append(aggfunc) - order.append((column, com.get_callable_name(aggfunc) or aggfunc)) - - # uniquify aggfunc name if duplicated in order list - uniquified_order = _make_unique_kwarg_list(order) - - # GH 25719, due to aggspec will change the order of assigned columns in aggregation - # uniquified_aggspec will store uniquified order list and will compare it with order - # based on index - aggspec_order = [ - (column, com.get_callable_name(aggfunc) or aggfunc) - for column, aggfuncs in aggspec.items() - for aggfunc in aggfuncs - ] - uniquified_aggspec = _make_unique_kwarg_list(aggspec_order) - - # get the new index of columns by comparison - col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) - return aggspec, columns, col_idx_order - - -def _make_unique_kwarg_list( - seq: Sequence[tuple[Any, Any]] -) -> Sequence[tuple[Any, Any]]: - """ - Uniquify aggfunc name of the pairs in the order list - - Examples: - -------- - >>> kwarg_list = [('a', ''), ('a', ''), ('b', '')] - >>> _make_unique_kwarg_list(kwarg_list) - [('a', '_0'), ('a', '_1'), ('b', '')] - """ - return [ - (pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair - for i, pair in enumerate(seq) - ] - - -def relabel_result( - result: DataFrame | Series, - func: dict[str, list[Callable | str]], - columns: Iterable[Hashable], - order: Iterable[int], -) -> dict[Hashable, Series]: - """ - Internal function to reorder result if relabelling is True for - dataframe.agg, and return the reordered result in dict. - - Parameters: - ---------- - result: Result from aggregation - func: Dict of (column name, funcs) - columns: New columns name for relabelling - order: New order for relabelling - - Examples - -------- - >>> from pandas.core.apply import relabel_result - >>> result = pd.DataFrame( - ... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}, - ... index=["max", "mean", "min"] - ... ) - >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} - >>> columns = ("foo", "aab", "bar", "dat") - >>> order = [0, 1, 2, 3] - >>> result_in_dict = relabel_result(result, funcs, columns, order) - >>> pd.DataFrame(result_in_dict, index=columns) - A C B - foo 2.0 NaN NaN - aab NaN 6.0 NaN - bar NaN NaN 4.0 - dat NaN NaN 2.5 - """ - from pandas.core.indexes.base import Index - - reordered_indexes = [ - pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1]) - ] - reordered_result_in_dict: dict[Hashable, Series] = {} - idx = 0 - - reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1 - for col, fun in func.items(): - s = result[col].dropna() - - # In the `_aggregate`, the callable names are obtained and used in `result`, and - # these names are ordered alphabetically. e.g. - # C2 C1 - # 1 NaN - # amax NaN 4.0 - # max NaN 4.0 - # sum 18.0 6.0 - # Therefore, the order of functions for each column could be shuffled - # accordingly so need to get the callable name if it is not parsed names, and - # reorder the aggregated result for each column. - # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is - # [sum, ], but in `result`, it will be [, sum], and we need to - # reorder so that aggregated values map to their functions regarding the order. - - # However there is only one column being used for aggregation, not need to - # reorder since the index is not sorted, and keep as is in `funcs`, e.g. - # A - # min 1.0 - # mean 1.5 - # mean 1.5 - if reorder_mask: - fun = [ - com.get_callable_name(f) if not isinstance(f, str) else f for f in fun - ] - col_idx_order = Index(s.index).get_indexer(fun) - s = s.iloc[col_idx_order] - - # assign the new user-provided "named aggregation" as index names, and reindex - # it based on the whole user-provided names. - s.index = reordered_indexes[idx : idx + len(fun)] - reordered_result_in_dict[col] = s.reindex(columns, copy=False) - idx = idx + len(fun) - return reordered_result_in_dict - - -def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series: - from pandas import DataFrame - - relabeling, func, columns, order = reconstruct_func(func, **kwargs) - - if relabeling: - # This is to keep the order to columns occurrence unchanged, and also - # keep the order of new columns occurrence unchanged - - # For the return values of reconstruct_func, if relabeling is - # False, columns and order will be None. - assert columns is not None - assert order is not None - - result_in_dict = relabel_result(result, func, columns, order) - result = DataFrame(result_in_dict, index=columns) - - return result - - -# TODO: Can't use, because mypy doesn't like us setting __name__ -# error: "partial[Any]" has no attribute "__name__" -# the type is: -# typing.Sequence[Callable[..., ScalarResult]] -# -> typing.Sequence[Callable[..., ScalarResult]]: - - -def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]: - """ - Possibly mangle a list of aggfuncs. - - Parameters - ---------- - aggfuncs : Sequence - - Returns - ------- - mangled: list-like - A new AggSpec sequence, where lambdas have been converted - to have unique names. - - Notes - ----- - If just one aggfunc is passed, the name will not be mangled. - """ - if len(aggfuncs) <= 1: - # don't mangle for .agg([lambda x: .]) - return aggfuncs - i = 0 - mangled_aggfuncs = [] - for aggfunc in aggfuncs: - if com.get_callable_name(aggfunc) == "": - aggfunc = partial(aggfunc) - aggfunc.__name__ = f"" - i += 1 - mangled_aggfuncs.append(aggfunc) - - return mangled_aggfuncs - - -def maybe_mangle_lambdas(agg_spec: Any) -> Any: - """ - Make new lambdas with unique names. - - Parameters - ---------- - agg_spec : Any - An argument to GroupBy.agg. - Non-dict-like `agg_spec` are pass through as is. - For dict-like `agg_spec` a new spec is returned - with name-mangled lambdas. - - Returns - ------- - mangled : Any - Same type as the input. - - Examples - -------- - >>> maybe_mangle_lambdas('sum') - 'sum' - >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP - [, - .f(*args, **kwargs)>] - """ - is_dict = is_dict_like(agg_spec) - if not (is_dict or is_list_like(agg_spec)): - return agg_spec - mangled_aggspec = type(agg_spec)() # dict or OrderedDict - - if is_dict: - for key, aggfuncs in agg_spec.items(): - if is_list_like(aggfuncs) and not is_dict_like(aggfuncs): - mangled_aggfuncs = _managle_lambda_list(aggfuncs) - else: - mangled_aggfuncs = aggfuncs - - mangled_aggspec[key] = mangled_aggfuncs - else: - mangled_aggspec = _managle_lambda_list(agg_spec) - - return mangled_aggspec - - -def validate_func_kwargs( - kwargs: dict, -) -> tuple[list[str], list[str | Callable[..., Any]]]: - """ - Validates types of user-provided "named aggregation" kwargs. - `TypeError` is raised if aggfunc is not `str` or callable. - - Parameters - ---------- - kwargs : dict - - Returns - ------- - columns : List[str] - List of user-provided keys. - func : List[Union[str, callable[...,Any]]] - List of user-provided aggfuncs - - Examples - -------- - >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) - (['one', 'two'], ['min', 'max']) - """ - tuple_given_message = "func is expected but received {} in **kwargs." - columns = list(kwargs) - func = [] - for col_func in kwargs.values(): - if not (isinstance(col_func, str) or callable(col_func)): - raise TypeError(tuple_given_message.format(type(col_func).__name__)) - func.append(col_func) - if not columns: - no_arg_message = "Must provide 'func' or named aggregation **kwargs." - raise TypeError(no_arg_message) - return columns, func - - -def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool: - return isinstance(colg, ABCDataFrame) or ( - isinstance(colg, ABCSeries) and op_name == "agg" - ) - - -def warn_alias_replacement( - obj: AggObjType, - func: Callable, - alias: str, -) -> None: - if alias.startswith("np."): - full_alias = alias - else: - full_alias = f"{type(obj).__name__}.{alias}" - alias = f'"{alias}"' - warnings.warn( - f"The provided callable {func} is currently using " - f"{full_alias}. In a future version of pandas, " - f"the provided callable will be used directly. To keep current " - f"behavior pass the string {alias} instead.", - category=FutureWarning, - stacklevel=find_stack_level(), - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.py deleted file mode 100644 index 4c7bd6e293ef4a350dfe3b6aa7baa1513ee0946d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np -import pytest - -from pandas.core.dtypes.common import is_integer_dtype - -import pandas as pd -import pandas._testing as tm - -arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES] -arrays += [ - pd.array([0.141, -0.268, 5.895, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES -] - - -@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays]) -def data(request): - """ - Fixture returning parametrized 'data' array with different integer and - floating point types - """ - return request.param - - -@pytest.fixture() -def numpy_dtype(data): - """ - Fixture returning numpy dtype from 'data' input array. - """ - # For integer dtype, the numpy conversion must be done to float - if is_integer_dtype(data): - numpy_dtype = float - else: - numpy_dtype = data.dtype.type - return numpy_dtype - - -def test_round(data, numpy_dtype): - # No arguments - result = data.round() - expected = pd.array( - np.round(data.to_numpy(dtype=numpy_dtype, na_value=None)), dtype=data.dtype - ) - tm.assert_extension_array_equal(result, expected) - - # Decimals argument - result = data.round(decimals=2) - expected = pd.array( - np.round(data.to_numpy(dtype=numpy_dtype, na_value=None), decimals=2), - dtype=data.dtype, - ) - tm.assert_extension_array_equal(result, expected) - - -def test_tolist(data): - result = data.tolist() - expected = list(data) - tm.assert_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.py deleted file mode 100644 index f36fdf0d36ea94760baefb317729a7b6505490be..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.py +++ /dev/null @@ -1,631 +0,0 @@ -from string import ascii_letters as letters - -import numpy as np -import pytest - -from pandas.errors import ( - SettingWithCopyError, - SettingWithCopyWarning, -) -import pandas.util._test_decorators as td - -import pandas as pd -from pandas import ( - DataFrame, - Series, - Timestamp, - date_range, - option_context, -) -import pandas._testing as tm - -msg = "A value is trying to be set on a copy of a slice from a DataFrame" - - -def random_text(nobs=100): - # Construct a DataFrame where each row is a random slice from 'letters' - idxs = np.random.default_rng(2).integers(len(letters), size=(nobs, 2)) - idxs.sort(axis=1) - strings = [letters[x[0] : x[1]] for x in idxs] - - return DataFrame(strings, columns=["letters"]) - - -class TestCaching: - def test_slice_consolidate_invalidate_item_cache(self, using_copy_on_write): - # this is chained assignment, but will 'work' - with option_context("chained_assignment", None): - # #3970 - df = DataFrame({"aa": np.arange(5), "bb": [2.2] * 5}) - - # Creates a second float block - df["cc"] = 0.0 - - # caches a reference to the 'bb' series - df["bb"] - - # repr machinery triggers consolidation - repr(df) - - # Assignment to wrong series - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["bb"].iloc[0] = 0.17 - else: - df["bb"].iloc[0] = 0.17 - df._clear_item_cache() - if not using_copy_on_write: - tm.assert_almost_equal(df["bb"][0], 0.17) - else: - # with ArrayManager, parent is not mutated with chained assignment - tm.assert_almost_equal(df["bb"][0], 2.2) - - @pytest.mark.parametrize("do_ref", [True, False]) - def test_setitem_cache_updating(self, do_ref): - # GH 5424 - cont = ["one", "two", "three", "four", "five", "six", "seven"] - - df = DataFrame({"a": cont, "b": cont[3:] + cont[:3], "c": np.arange(7)}) - - # ref the cache - if do_ref: - df.loc[0, "c"] - - # set it - df.loc[7, "c"] = 1 - - assert df.loc[0, "c"] == 0.0 - assert df.loc[7, "c"] == 1.0 - - def test_setitem_cache_updating_slices(self, using_copy_on_write): - # GH 7084 - # not updating cache on series setting with slices - expected = DataFrame( - {"A": [600, 600, 600]}, index=date_range("5/7/2014", "5/9/2014") - ) - out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014")) - df = DataFrame({"C": ["A", "A", "A"], "D": [100, 200, 300]}) - - # loop through df to update out - six = Timestamp("5/7/2014") - eix = Timestamp("5/9/2014") - for ix, row in df.iterrows(): - out.loc[six:eix, row["C"]] = out.loc[six:eix, row["C"]] + row["D"] - - tm.assert_frame_equal(out, expected) - tm.assert_series_equal(out["A"], expected["A"]) - - # try via a chain indexing - # this actually works - out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014")) - out_original = out.copy() - for ix, row in df.iterrows(): - v = out[row["C"]][six:eix] + row["D"] - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - out[row["C"]][six:eix] = v - else: - out[row["C"]][six:eix] = v - - if not using_copy_on_write: - tm.assert_frame_equal(out, expected) - tm.assert_series_equal(out["A"], expected["A"]) - else: - tm.assert_frame_equal(out, out_original) - tm.assert_series_equal(out["A"], out_original["A"]) - - out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014")) - for ix, row in df.iterrows(): - out.loc[six:eix, row["C"]] += row["D"] - - tm.assert_frame_equal(out, expected) - tm.assert_series_equal(out["A"], expected["A"]) - - def test_altering_series_clears_parent_cache(self, using_copy_on_write): - # GH #33675 - df = DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) - ser = df["A"] - - if using_copy_on_write: - assert "A" not in df._item_cache - else: - assert "A" in df._item_cache - - # Adding a new entry to ser swaps in a new array, so "A" needs to - # be removed from df._item_cache - ser["c"] = 5 - assert len(ser) == 3 - assert "A" not in df._item_cache - assert df["A"] is not ser - assert len(df["A"]) == 2 - - -class TestChaining: - def test_setitem_chained_setfault(self, using_copy_on_write): - # GH6026 - data = ["right", "left", "left", "left", "right", "left", "timeout"] - mdata = ["right", "left", "left", "left", "right", "left", "none"] - - df = DataFrame({"response": np.array(data)}) - mask = df.response == "timeout" - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.response[mask] = "none" - tm.assert_frame_equal(df, DataFrame({"response": data})) - else: - df.response[mask] = "none" - tm.assert_frame_equal(df, DataFrame({"response": mdata})) - - recarray = np.rec.fromarrays([data], names=["response"]) - df = DataFrame(recarray) - mask = df.response == "timeout" - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.response[mask] = "none" - tm.assert_frame_equal(df, DataFrame({"response": data})) - else: - df.response[mask] = "none" - tm.assert_frame_equal(df, DataFrame({"response": mdata})) - - df = DataFrame({"response": data, "response1": data}) - df_original = df.copy() - mask = df.response == "timeout" - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.response[mask] = "none" - tm.assert_frame_equal(df, df_original) - else: - df.response[mask] = "none" - tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data})) - - # GH 6056 - expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]}) - df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["A"].iloc[0] = np.nan - expected = DataFrame({"A": ["foo", "bar", "bah", "foo", "bar"]}) - else: - df["A"].iloc[0] = np.nan - expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]}) - result = df.head() - tm.assert_frame_equal(result, expected) - - df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.A.iloc[0] = np.nan - else: - df.A.iloc[0] = np.nan - result = df.head() - tm.assert_frame_equal(result, expected) - - @pytest.mark.arm_slow - def test_detect_chained_assignment(self, using_copy_on_write): - with option_context("chained_assignment", "raise"): - # work with the chain - expected = DataFrame([[-5, 1], [-6, 3]], columns=list("AB")) - df = DataFrame( - np.arange(4).reshape(2, 2), columns=list("AB"), dtype="int64" - ) - df_original = df.copy() - assert df._is_copy is None - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["A"][0] = -5 - with tm.raises_chained_assignment_error(): - df["A"][1] = -6 - tm.assert_frame_equal(df, df_original) - else: - df["A"][0] = -5 - df["A"][1] = -6 - tm.assert_frame_equal(df, expected) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_raises( - self, using_array_manager, using_copy_on_write - ): - # test with the chaining - df = DataFrame( - { - "A": Series(range(2), dtype="int64"), - "B": np.array(np.arange(2, 4), dtype=np.float64), - } - ) - df_original = df.copy() - assert df._is_copy is None - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["A"][0] = -5 - with tm.raises_chained_assignment_error(): - df["A"][1] = -6 - tm.assert_frame_equal(df, df_original) - elif not using_array_manager: - with pytest.raises(SettingWithCopyError, match=msg): - df["A"][0] = -5 - - with pytest.raises(SettingWithCopyError, match=msg): - df["A"][1] = np.nan - - assert df["A"]._is_copy is None - else: - # INFO(ArrayManager) for ArrayManager it doesn't matter that it's - # a mixed dataframe - df["A"][0] = -5 - df["A"][1] = -6 - expected = DataFrame([[-5, 2], [-6, 3]], columns=list("AB")) - expected["B"] = expected["B"].astype("float64") - tm.assert_frame_equal(df, expected) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_fails(self, using_copy_on_write): - # Using a copy (the chain), fails - df = DataFrame( - { - "A": Series(range(2), dtype="int64"), - "B": np.array(np.arange(2, 4), dtype=np.float64), - } - ) - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.loc[0]["A"] = -5 - else: - with pytest.raises(SettingWithCopyError, match=msg): - df.loc[0]["A"] = -5 - - @pytest.mark.arm_slow - def test_detect_chained_assignment_doc_example(self, using_copy_on_write): - # Doc example - df = DataFrame( - { - "a": ["one", "one", "two", "three", "two", "one", "six"], - "c": Series(range(7), dtype="int64"), - } - ) - assert df._is_copy is None - - if using_copy_on_write: - indexer = df.a.str.startswith("o") - with tm.raises_chained_assignment_error(): - df[indexer]["c"] = 42 - else: - with pytest.raises(SettingWithCopyError, match=msg): - indexer = df.a.str.startswith("o") - df[indexer]["c"] = 42 - - @pytest.mark.arm_slow - def test_detect_chained_assignment_object_dtype( - self, using_array_manager, using_copy_on_write - ): - expected = DataFrame({"A": [111, "bbb", "ccc"], "B": [1, 2, 3]}) - df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]}) - df_original = df.copy() - - if not using_copy_on_write: - with pytest.raises(SettingWithCopyError, match=msg): - df.loc[0]["A"] = 111 - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["A"][0] = 111 - tm.assert_frame_equal(df, df_original) - elif not using_array_manager: - with pytest.raises(SettingWithCopyError, match=msg): - df["A"][0] = 111 - - df.loc[0, "A"] = 111 - tm.assert_frame_equal(df, expected) - else: - # INFO(ArrayManager) for ArrayManager it doesn't matter that it's - # a mixed dataframe - df["A"][0] = 111 - tm.assert_frame_equal(df, expected) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_is_copy_pickle(self): - # gh-5475: Make sure that is_copy is picked up reconstruction - df = DataFrame({"A": [1, 2]}) - assert df._is_copy is None - - with tm.ensure_clean("__tmp__pickle") as path: - df.to_pickle(path) - df2 = pd.read_pickle(path) - df2["B"] = df2["A"] - df2["B"] = df2["A"] - - @pytest.mark.arm_slow - def test_detect_chained_assignment_setting_entire_column(self): - # gh-5597: a spurious raise as we are setting the entire column here - - df = random_text(100000) - - # Always a copy - x = df.iloc[[0, 1, 2]] - assert x._is_copy is not None - - x = df.iloc[[0, 1, 2, 4]] - assert x._is_copy is not None - - # Explicitly copy - indexer = df.letters.apply(lambda x: len(x) > 10) - df = df.loc[indexer].copy() - - assert df._is_copy is None - df["letters"] = df["letters"].apply(str.lower) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_implicit_take(self): - # Implicitly take - df = random_text(100000) - indexer = df.letters.apply(lambda x: len(x) > 10) - df = df.loc[indexer] - - assert df._is_copy is not None - df["letters"] = df["letters"].apply(str.lower) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_implicit_take2(self, using_copy_on_write): - if using_copy_on_write: - pytest.skip("_is_copy is not always set for CoW") - # Implicitly take 2 - df = random_text(100000) - indexer = df.letters.apply(lambda x: len(x) > 10) - - df = df.loc[indexer] - assert df._is_copy is not None - df.loc[:, "letters"] = df["letters"].apply(str.lower) - - # with the enforcement of #45333 in 2.0, the .loc[:, letters] setting - # is inplace, so df._is_copy remains non-None. - assert df._is_copy is not None - - df["letters"] = df["letters"].apply(str.lower) - assert df._is_copy is None - - @pytest.mark.arm_slow - def test_detect_chained_assignment_str(self): - df = random_text(100000) - indexer = df.letters.apply(lambda x: len(x) > 10) - df.loc[indexer, "letters"] = df.loc[indexer, "letters"].apply(str.lower) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_is_copy(self): - # an identical take, so no copy - df = DataFrame({"a": [1]}).dropna() - assert df._is_copy is None - df["a"] += 1 - - @pytest.mark.arm_slow - def test_detect_chained_assignment_sorting(self): - df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) - ser = df.iloc[:, 0].sort_values() - - tm.assert_series_equal(ser, df.iloc[:, 0].sort_values()) - tm.assert_series_equal(ser, df[0].sort_values()) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_false_positives(self): - # see gh-6025: false positives - df = DataFrame({"column1": ["a", "a", "a"], "column2": [4, 8, 9]}) - str(df) - - df["column1"] = df["column1"] + "b" - str(df) - - df = df[df["column2"] != 8] - str(df) - - df["column1"] = df["column1"] + "c" - str(df) - - @pytest.mark.arm_slow - def test_detect_chained_assignment_undefined_column(self, using_copy_on_write): - # from SO: - # https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc - df = DataFrame(np.arange(0, 9), columns=["count"]) - df["group"] = "b" - df_original = df.copy() - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.iloc[0:5]["group"] = "a" - tm.assert_frame_equal(df, df_original) - else: - with pytest.raises(SettingWithCopyError, match=msg): - df.iloc[0:5]["group"] = "a" - - @pytest.mark.arm_slow - def test_detect_chained_assignment_changing_dtype( - self, using_array_manager, using_copy_on_write - ): - # Mixed type setting but same dtype & changing dtype - df = DataFrame( - { - "A": date_range("20130101", periods=5), - "B": np.random.default_rng(2).standard_normal(5), - "C": np.arange(5, dtype="int64"), - "D": ["a", "b", "c", "d", "e"], - } - ) - df_original = df.copy() - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.loc[2]["D"] = "foo" - with tm.raises_chained_assignment_error(): - df.loc[2]["C"] = "foo" - with tm.raises_chained_assignment_error(extra_warnings=(FutureWarning,)): - df["C"][2] = "foo" - tm.assert_frame_equal(df, df_original) - - if not using_copy_on_write: - with pytest.raises(SettingWithCopyError, match=msg): - df.loc[2]["D"] = "foo" - - with pytest.raises(SettingWithCopyError, match=msg): - df.loc[2]["C"] = "foo" - - if not using_array_manager: - with pytest.raises(SettingWithCopyError, match=msg): - df["C"][2] = "foo" - else: - # INFO(ArrayManager) for ArrayManager it doesn't matter if it's - # changing the dtype or not - df["C"][2] = "foo" - assert df.loc[2, "C"] == "foo" - - def test_setting_with_copy_bug(self, using_copy_on_write): - # operating on a copy - df = DataFrame( - {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]} - ) - df_original = df.copy() - mask = pd.isna(df.c) - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df[["c"]][mask] = df[["b"]][mask] - tm.assert_frame_equal(df, df_original) - else: - with pytest.raises(SettingWithCopyError, match=msg): - df[["c"]][mask] = df[["b"]][mask] - - def test_setting_with_copy_bug_no_warning(self): - # invalid warning as we are returning a new object - # GH 8730 - df1 = DataFrame({"x": Series(["a", "b", "c"]), "y": Series(["d", "e", "f"])}) - df2 = df1[["x"]] - - # this should not raise - df2["y"] = ["g", "h", "i"] - - def test_detect_chained_assignment_warnings_errors(self, using_copy_on_write): - df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]}) - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df.loc[0]["A"] = 111 - return - - with option_context("chained_assignment", "warn"): - with tm.assert_produces_warning(SettingWithCopyWarning): - df.loc[0]["A"] = 111 - - with option_context("chained_assignment", "raise"): - with pytest.raises(SettingWithCopyError, match=msg): - df.loc[0]["A"] = 111 - - @pytest.mark.parametrize("rhs", [3, DataFrame({0: [1, 2, 3, 4]})]) - def test_detect_chained_assignment_warning_stacklevel( - self, rhs, using_copy_on_write - ): - # GH#42570 - df = DataFrame(np.arange(25).reshape(5, 5)) - df_original = df.copy() - chained = df.loc[:3] - with option_context("chained_assignment", "warn"): - if not using_copy_on_write: - with tm.assert_produces_warning(SettingWithCopyWarning) as t: - chained[2] = rhs - assert t[0].filename == __file__ - else: - # INFO(CoW) no warning, and original dataframe not changed - with tm.assert_produces_warning(None): - chained[2] = rhs - tm.assert_frame_equal(df, df_original) - - # TODO(ArrayManager) fast_xs with array-like scalars is not yet working - @td.skip_array_manager_not_yet_implemented - def test_chained_getitem_with_lists(self): - # GH6394 - # Regression in chained getitem indexing with embedded list-like from - # 0.12 - - df = DataFrame({"A": 5 * [np.zeros(3)], "B": 5 * [np.ones(3)]}) - expected = df["A"].iloc[2] - result = df.loc[2, "A"] - tm.assert_numpy_array_equal(result, expected) - result2 = df.iloc[2]["A"] - tm.assert_numpy_array_equal(result2, expected) - result3 = df["A"].loc[2] - tm.assert_numpy_array_equal(result3, expected) - result4 = df["A"].iloc[2] - tm.assert_numpy_array_equal(result4, expected) - - def test_cache_updating(self): - # GH 4939, make sure to update the cache on setitem - - df = tm.makeDataFrame() - df["A"] # cache series - df.loc["Hello Friend"] = df.iloc[0] - assert "Hello Friend" in df["A"].index - assert "Hello Friend" in df["B"].index - - def test_cache_updating2(self, using_copy_on_write): - # 10264 - df = DataFrame( - np.zeros((5, 5), dtype="int64"), - columns=["a", "b", "c", "d", "e"], - index=range(5), - ) - df["f"] = 0 - df_orig = df.copy() - if using_copy_on_write: - with pytest.raises(ValueError, match="read-only"): - df.f.values[3] = 1 - tm.assert_frame_equal(df, df_orig) - return - - df.f.values[3] = 1 - - df.f.values[3] = 2 - expected = DataFrame( - np.zeros((5, 6), dtype="int64"), - columns=["a", "b", "c", "d", "e", "f"], - index=range(5), - ) - expected.at[3, "f"] = 2 - tm.assert_frame_equal(df, expected) - expected = Series([0, 0, 0, 2, 0], name="f") - tm.assert_series_equal(df.f, expected) - - def test_iloc_setitem_chained_assignment(self, using_copy_on_write): - # GH#3970 - with option_context("chained_assignment", None): - df = DataFrame({"aa": range(5), "bb": [2.2] * 5}) - df["cc"] = 0.0 - - ck = [True] * len(df) - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["bb"].iloc[0] = 0.13 - else: - df["bb"].iloc[0] = 0.13 - - # GH#3970 this lookup used to break the chained setting to 0.15 - df.iloc[ck] - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["bb"].iloc[0] = 0.15 - else: - df["bb"].iloc[0] = 0.15 - - if not using_copy_on_write: - assert df["bb"].iloc[0] == 0.15 - else: - assert df["bb"].iloc[0] == 2.2 - - def test_getitem_loc_assignment_slice_state(self, using_copy_on_write): - # GH 13569 - df = DataFrame({"a": [10, 20, 30]}) - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["a"].loc[4] = 40 - else: - df["a"].loc[4] = 40 - tm.assert_frame_equal(df, DataFrame({"a": [10, 20, 30]})) - tm.assert_series_equal(df["a"], Series([10, 20, 30], name="a")) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/indexing/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/indexing/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_explode.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_explode.py deleted file mode 100644 index c8a9eb6f89fdef5fb16fe5f2a1d0490c02477ab8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_explode.py +++ /dev/null @@ -1,165 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - - -def test_basic(): - s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo") - result = s.explode() - expected = pd.Series( - [0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo" - ) - tm.assert_series_equal(result, expected) - - -def test_mixed_type(): - s = pd.Series( - [[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo" - ) - result = s.explode() - expected = pd.Series( - [0, 1, 2, np.nan, None, np.nan, "a", "b"], - index=[0, 0, 0, 1, 2, 3, 4, 4], - dtype=object, - name="foo", - ) - tm.assert_series_equal(result, expected) - - -def test_empty(): - s = pd.Series(dtype=object) - result = s.explode() - expected = s.copy() - tm.assert_series_equal(result, expected) - - -def test_nested_lists(): - s = pd.Series([[[1, 2, 3]], [1, 2], 1]) - result = s.explode() - expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2]) - tm.assert_series_equal(result, expected) - - -def test_multi_index(): - s = pd.Series( - [[0, 1, 2], np.nan, [], (3, 4)], - name="foo", - index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]), - ) - result = s.explode() - index = pd.MultiIndex.from_tuples( - [("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)], - names=["foo", "bar"], - ) - expected = pd.Series( - [0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo" - ) - tm.assert_series_equal(result, expected) - - -def test_large(): - s = pd.Series([range(256)]).explode() - result = s.explode() - tm.assert_series_equal(result, s) - - -def test_invert_array(): - df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")}) - - listify = df.apply(lambda x: x.array, axis=1) - result = listify.explode() - tm.assert_series_equal(result, df["a"].rename()) - - -@pytest.mark.parametrize( - "s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))] -) -def test_non_object_dtype(s): - result = s.explode() - tm.assert_series_equal(result, s) - - -def test_typical_usecase(): - df = pd.DataFrame( - [{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}], - columns=["var1", "var2"], - ) - exploded = df.var1.str.split(",").explode() - result = df[["var2"]].join(exploded) - expected = pd.DataFrame( - {"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")}, - columns=["var2", "var1"], - index=[0, 0, 0, 1, 1, 1], - ) - tm.assert_frame_equal(result, expected) - - -def test_nested_EA(): - # a nested EA array - s = pd.Series( - [ - pd.date_range("20170101", periods=3, tz="UTC"), - pd.date_range("20170104", periods=3, tz="UTC"), - ] - ) - result = s.explode() - expected = pd.Series( - pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1] - ) - tm.assert_series_equal(result, expected) - - -def test_duplicate_index(): - # GH 28005 - s = pd.Series([[1, 2], [3, 4]], index=[0, 0]) - result = s.explode() - expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object) - tm.assert_series_equal(result, expected) - - -def test_ignore_index(): - # GH 34932 - s = pd.Series([[1, 2], [3, 4]]) - result = s.explode(ignore_index=True) - expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object) - tm.assert_series_equal(result, expected) - - -def test_explode_sets(): - # https://github.com/pandas-dev/pandas/issues/35614 - s = pd.Series([{"a", "b", "c"}], index=[1]) - result = s.explode().sort_values() - expected = pd.Series(["a", "b", "c"], index=[1, 1, 1]) - tm.assert_series_equal(result, expected) - - -def test_explode_scalars_can_ignore_index(): - # https://github.com/pandas-dev/pandas/issues/40487 - s = pd.Series([1, 2, 3], index=["a", "b", "c"]) - result = s.explode(ignore_index=True) - expected = pd.Series([1, 2, 3]) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("ignore_index", [True, False]) -def test_explode_pyarrow_list_type(ignore_index): - # GH 53602 - pa = pytest.importorskip("pyarrow") - - data = [ - [None, None], - [1], - [], - [2, 3], - None, - ] - ser = pd.Series(data, dtype=pd.ArrowDtype(pa.list_(pa.int64()))) - result = ser.explode(ignore_index=ignore_index) - expected = pd.Series( - data=[None, None, 1, None, 2, 3, None], - index=None if ignore_index else [0, 0, 1, 2, 3, 3, 4], - dtype=pd.ArrowDtype(pa.int64()), - ) - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_cumulative.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_cumulative.py deleted file mode 100644 index e6f7b2a5e69e0a97e2f898c6a665372ba3ec2a6b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_cumulative.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Tests for Series cumulative operations. - -See also --------- -tests.frame.test_cumulative -""" - -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - -methods = { - "cumsum": np.cumsum, - "cumprod": np.cumprod, - "cummin": np.minimum.accumulate, - "cummax": np.maximum.accumulate, -} - - -class TestSeriesCumulativeOps: - @pytest.mark.parametrize("func", [np.cumsum, np.cumprod]) - def test_datetime_series(self, datetime_series, func): - tm.assert_numpy_array_equal( - func(datetime_series).values, - func(np.array(datetime_series)), - check_dtype=True, - ) - - # with missing values - ts = datetime_series.copy() - ts[::2] = np.nan - - result = func(ts)[1::2] - expected = func(np.array(ts.dropna())) - - tm.assert_numpy_array_equal(result.values, expected, check_dtype=False) - - @pytest.mark.parametrize("method", ["cummin", "cummax"]) - def test_cummin_cummax(self, datetime_series, method): - ufunc = methods[method] - - result = getattr(datetime_series, method)().values - expected = ufunc(np.array(datetime_series)) - - tm.assert_numpy_array_equal(result, expected) - ts = datetime_series.copy() - ts[::2] = np.nan - result = getattr(ts, method)()[1::2] - expected = ufunc(ts.dropna()) - - result.index = result.index._with_freq(None) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "ts", - [ - pd.Timedelta(0), - pd.Timestamp("1999-12-31"), - pd.Timestamp("1999-12-31").tz_localize("US/Pacific"), - ], - ) - @pytest.mark.parametrize( - "method, skipna, exp_tdi", - [ - ["cummax", True, ["NaT", "2 days", "NaT", "2 days", "NaT", "3 days"]], - ["cummin", True, ["NaT", "2 days", "NaT", "1 days", "NaT", "1 days"]], - [ - "cummax", - False, - ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT"], - ], - [ - "cummin", - False, - ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT"], - ], - ], - ) - def test_cummin_cummax_datetimelike(self, ts, method, skipna, exp_tdi): - # with ts==pd.Timedelta(0), we are testing td64; with naive Timestamp - # we are testing datetime64[ns]; with Timestamp[US/Pacific] - # we are testing dt64tz - tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "3 days"]) - ser = pd.Series(tdi + ts) - - exp_tdi = pd.to_timedelta(exp_tdi) - expected = pd.Series(exp_tdi + ts) - result = getattr(ser, method)(skipna=skipna) - tm.assert_series_equal(expected, result) - - @pytest.mark.parametrize( - "func, exp", - [ - ("cummin", pd.Period("2012-1-1", freq="D")), - ("cummax", pd.Period("2012-1-2", freq="D")), - ], - ) - def test_cummin_cummax_period(self, func, exp): - # GH#28385 - ser = pd.Series( - [pd.Period("2012-1-1", freq="D"), pd.NaT, pd.Period("2012-1-2", freq="D")] - ) - result = getattr(ser, func)(skipna=False) - expected = pd.Series([pd.Period("2012-1-1", freq="D"), pd.NaT, pd.NaT]) - tm.assert_series_equal(result, expected) - - result = getattr(ser, func)(skipna=True) - expected = pd.Series([pd.Period("2012-1-1", freq="D"), pd.NaT, exp]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "arg", - [ - [False, False, False, True, True, False, False], - [False, False, False, False, False, False, False], - ], - ) - @pytest.mark.parametrize( - "func", [lambda x: x, lambda x: ~x], ids=["identity", "inverse"] - ) - @pytest.mark.parametrize("method", methods.keys()) - def test_cummethods_bool(self, arg, func, method): - # GH#6270 - # checking Series method vs the ufunc applied to the values - - ser = func(pd.Series(arg)) - ufunc = methods[method] - - exp_vals = ufunc(ser.values) - expected = pd.Series(exp_vals) - - result = getattr(ser, method)() - - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "method, expected", - [ - ["cumsum", pd.Series([0, 1, np.nan, 1], dtype=object)], - ["cumprod", pd.Series([False, 0, np.nan, 0])], - ["cummin", pd.Series([False, False, np.nan, False])], - ["cummax", pd.Series([False, True, np.nan, True])], - ], - ) - def test_cummethods_bool_in_object_dtype(self, method, expected): - ser = pd.Series([False, True, np.nan, False]) - result = getattr(ser, method)() - tm.assert_series_equal(result, expected) - - def test_cumprod_timedelta(self): - # GH#48111 - ser = pd.Series([pd.Timedelta(days=1), pd.Timedelta(days=3)]) - with pytest.raises(TypeError, match="cumprod not supported for Timedelta"): - ser.cumprod() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/html5lib/treebuilders/etree.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/html5lib/treebuilders/etree.py deleted file mode 100644 index ea92dc301fe3fcf2ec9839c39c7844ae9f5df614..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/html5lib/treebuilders/etree.py +++ /dev/null @@ -1,343 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -# pylint:disable=protected-access - -from pip._vendor.six import text_type - -import re - -from copy import copy - -from . import base -from .. import _ihatexml -from .. import constants -from ..constants import namespaces -from .._utils import moduleFactoryFactory - -tag_regexp = re.compile("{([^}]*)}(.*)") - - -def getETreeBuilder(ElementTreeImplementation, fullTree=False): - ElementTree = ElementTreeImplementation - ElementTreeCommentType = ElementTree.Comment("asd").tag - - class Element(base.Node): - def __init__(self, name, namespace=None): - self._name = name - self._namespace = namespace - self._element = ElementTree.Element(self._getETreeTag(name, - namespace)) - if namespace is None: - self.nameTuple = namespaces["html"], self._name - else: - self.nameTuple = self._namespace, self._name - self.parent = None - self._childNodes = [] - self._flags = [] - - def _getETreeTag(self, name, namespace): - if namespace is None: - etree_tag = name - else: - etree_tag = "{%s}%s" % (namespace, name) - return etree_tag - - def _setName(self, name): - self._name = name - self._element.tag = self._getETreeTag(self._name, self._namespace) - - def _getName(self): - return self._name - - name = property(_getName, _setName) - - def _setNamespace(self, namespace): - self._namespace = namespace - self._element.tag = self._getETreeTag(self._name, self._namespace) - - def _getNamespace(self): - return self._namespace - - namespace = property(_getNamespace, _setNamespace) - - def _getAttributes(self): - return self._element.attrib - - def _setAttributes(self, attributes): - el_attrib = self._element.attrib - el_attrib.clear() - if attributes: - # calling .items _always_ allocates, and the above truthy check is cheaper than the - # allocation on average - for key, value in attributes.items(): - if isinstance(key, tuple): - name = "{%s}%s" % (key[2], key[1]) - else: - name = key - el_attrib[name] = value - - attributes = property(_getAttributes, _setAttributes) - - def _getChildNodes(self): - return self._childNodes - - def _setChildNodes(self, value): - del self._element[:] - self._childNodes = [] - for element in value: - self.insertChild(element) - - childNodes = property(_getChildNodes, _setChildNodes) - - def hasContent(self): - """Return true if the node has children or text""" - return bool(self._element.text or len(self._element)) - - def appendChild(self, node): - self._childNodes.append(node) - self._element.append(node._element) - node.parent = self - - def insertBefore(self, node, refNode): - index = list(self._element).index(refNode._element) - self._element.insert(index, node._element) - node.parent = self - - def removeChild(self, node): - self._childNodes.remove(node) - self._element.remove(node._element) - node.parent = None - - def insertText(self, data, insertBefore=None): - if not(len(self._element)): - if not self._element.text: - self._element.text = "" - self._element.text += data - elif insertBefore is None: - # Insert the text as the tail of the last child element - if not self._element[-1].tail: - self._element[-1].tail = "" - self._element[-1].tail += data - else: - # Insert the text before the specified node - children = list(self._element) - index = children.index(insertBefore._element) - if index > 0: - if not self._element[index - 1].tail: - self._element[index - 1].tail = "" - self._element[index - 1].tail += data - else: - if not self._element.text: - self._element.text = "" - self._element.text += data - - def cloneNode(self): - element = type(self)(self.name, self.namespace) - if self._element.attrib: - element._element.attrib = copy(self._element.attrib) - return element - - def reparentChildren(self, newParent): - if newParent.childNodes: - newParent.childNodes[-1]._element.tail += self._element.text - else: - if not newParent._element.text: - newParent._element.text = "" - if self._element.text is not None: - newParent._element.text += self._element.text - self._element.text = "" - base.Node.reparentChildren(self, newParent) - - class Comment(Element): - def __init__(self, data): - # Use the superclass constructor to set all properties on the - # wrapper element - self._element = ElementTree.Comment(data) - self.parent = None - self._childNodes = [] - self._flags = [] - - def _getData(self): - return self._element.text - - def _setData(self, value): - self._element.text = value - - data = property(_getData, _setData) - - class DocumentType(Element): - def __init__(self, name, publicId, systemId): - Element.__init__(self, "") - self._element.text = name - self.publicId = publicId - self.systemId = systemId - - def _getPublicId(self): - return self._element.get("publicId", "") - - def _setPublicId(self, value): - if value is not None: - self._element.set("publicId", value) - - publicId = property(_getPublicId, _setPublicId) - - def _getSystemId(self): - return self._element.get("systemId", "") - - def _setSystemId(self, value): - if value is not None: - self._element.set("systemId", value) - - systemId = property(_getSystemId, _setSystemId) - - class Document(Element): - def __init__(self): - Element.__init__(self, "DOCUMENT_ROOT") - - class DocumentFragment(Element): - def __init__(self): - Element.__init__(self, "DOCUMENT_FRAGMENT") - - def testSerializer(element): - rv = [] - - def serializeElement(element, indent=0): - if not(hasattr(element, "tag")): - element = element.getroot() - if element.tag == "": - if element.get("publicId") or element.get("systemId"): - publicId = element.get("publicId") or "" - systemId = element.get("systemId") or "" - rv.append("""""" % - (element.text, publicId, systemId)) - else: - rv.append("" % (element.text,)) - elif element.tag == "DOCUMENT_ROOT": - rv.append("#document") - if element.text is not None: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - if element.tail is not None: - raise TypeError("Document node cannot have tail") - if hasattr(element, "attrib") and len(element.attrib): - raise TypeError("Document node cannot have attributes") - elif element.tag == ElementTreeCommentType: - rv.append("|%s" % (' ' * indent, element.text)) - else: - assert isinstance(element.tag, text_type), \ - "Expected unicode, got %s, %s" % (type(element.tag), element.tag) - nsmatch = tag_regexp.match(element.tag) - - if nsmatch is None: - name = element.tag - else: - ns, name = nsmatch.groups() - prefix = constants.prefixes[ns] - name = "%s %s" % (prefix, name) - rv.append("|%s<%s>" % (' ' * indent, name)) - - if hasattr(element, "attrib"): - attributes = [] - for name, value in element.attrib.items(): - nsmatch = tag_regexp.match(name) - if nsmatch is not None: - ns, name = nsmatch.groups() - prefix = constants.prefixes[ns] - attr_string = "%s %s" % (prefix, name) - else: - attr_string = name - attributes.append((attr_string, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - if element.text: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - indent += 2 - for child in element: - serializeElement(child, indent) - if element.tail: - rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) - serializeElement(element, 0) - - return "\n".join(rv) - - def tostring(element): # pylint:disable=unused-variable - """Serialize an element and its child nodes to a string""" - rv = [] - filter = _ihatexml.InfosetFilter() - - def serializeElement(element): - if isinstance(element, ElementTree.ElementTree): - element = element.getroot() - - if element.tag == "": - if element.get("publicId") or element.get("systemId"): - publicId = element.get("publicId") or "" - systemId = element.get("systemId") or "" - rv.append("""""" % - (element.text, publicId, systemId)) - else: - rv.append("" % (element.text,)) - elif element.tag == "DOCUMENT_ROOT": - if element.text is not None: - rv.append(element.text) - if element.tail is not None: - raise TypeError("Document node cannot have tail") - if hasattr(element, "attrib") and len(element.attrib): - raise TypeError("Document node cannot have attributes") - - for child in element: - serializeElement(child) - - elif element.tag == ElementTreeCommentType: - rv.append("" % (element.text,)) - else: - # This is assumed to be an ordinary element - if not element.attrib: - rv.append("<%s>" % (filter.fromXmlName(element.tag),)) - else: - attr = " ".join(["%s=\"%s\"" % ( - filter.fromXmlName(name), value) - for name, value in element.attrib.items()]) - rv.append("<%s %s>" % (element.tag, attr)) - if element.text: - rv.append(element.text) - - for child in element: - serializeElement(child) - - rv.append("" % (element.tag,)) - - if element.tail: - rv.append(element.tail) - - serializeElement(element) - - return "".join(rv) - - class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable - documentClass = Document - doctypeClass = DocumentType - elementClass = Element - commentClass = Comment - fragmentClass = DocumentFragment - implementation = ElementTreeImplementation - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - if fullTree: - return self.document._element - else: - if self.defaultNamespace is not None: - return self.document._element.find( - "{%s}html" % self.defaultNamespace) - else: - return self.document._element.find("html") - - def getFragment(self): - return base.TreeBuilder.getFragment(self)._element - - return locals() - - -getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pygments/lexer.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pygments/lexer.py deleted file mode 100644 index b6d4b238845b15a3cfed958607ec41d35c9f425e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pygments/lexer.py +++ /dev/null @@ -1,879 +0,0 @@ -""" - pygments.lexer - ~~~~~~~~~~~~~~ - - Base lexer classes. - - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -import sys -import time - -from pip._vendor.pygments.filter import apply_filters, Filter -from pip._vendor.pygments.filters import get_filter_by_name -from pip._vendor.pygments.token import Error, Text, Other, _TokenType -from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ - make_analysator, Future, guess_decode -from pip._vendor.pygments.regexopt import regex_opt - -__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', - 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', - 'default', 'words'] - - -_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), - (b'\xff\xfe\0\0', 'utf-32'), - (b'\0\0\xfe\xff', 'utf-32be'), - (b'\xff\xfe', 'utf-16'), - (b'\xfe\xff', 'utf-16be')] - -_default_analyse = staticmethod(lambda x: 0.0) - - -class LexerMeta(type): - """ - This metaclass automagically converts ``analyse_text`` methods into - static methods which always return float values. - """ - - def __new__(mcs, name, bases, d): - if 'analyse_text' in d: - d['analyse_text'] = make_analysator(d['analyse_text']) - return type.__new__(mcs, name, bases, d) - - -class Lexer(metaclass=LexerMeta): - """ - Lexer for a specific language. - - Basic options recognized: - ``stripnl`` - Strip leading and trailing newlines from the input (default: True). - ``stripall`` - Strip all leading and trailing whitespace from the input - (default: False). - ``ensurenl`` - Make sure that the input ends with a newline (default: True). This - is required for some lexers that consume input linewise. - - .. versionadded:: 1.3 - - ``tabsize`` - If given and greater than 0, expand tabs in the input (default: 0). - ``encoding`` - If given, must be an encoding name. This encoding will be used to - convert the input string to Unicode, if it is not already a Unicode - string (default: ``'guess'``, which uses a simple UTF-8 / Locale / - Latin1 detection. Can also be ``'chardet'`` to use the chardet - library, if it is installed. - ``inencoding`` - Overrides the ``encoding`` if given. - """ - - #: Name of the lexer - name = None - - #: Shortcuts for the lexer - aliases = [] - - #: File name globs - filenames = [] - - #: Secondary file name globs - alias_filenames = [] - - #: MIME types - mimetypes = [] - - #: Priority, should multiple lexers match and no content is provided - priority = 0 - - def __init__(self, **options): - self.options = options - self.stripnl = get_bool_opt(options, 'stripnl', True) - self.stripall = get_bool_opt(options, 'stripall', False) - self.ensurenl = get_bool_opt(options, 'ensurenl', True) - self.tabsize = get_int_opt(options, 'tabsize', 0) - self.encoding = options.get('encoding', 'guess') - self.encoding = options.get('inencoding') or self.encoding - self.filters = [] - for filter_ in get_list_opt(options, 'filters', ()): - self.add_filter(filter_) - - def __repr__(self): - if self.options: - return '' % (self.__class__.__name__, - self.options) - else: - return '' % self.__class__.__name__ - - def add_filter(self, filter_, **options): - """ - Add a new stream filter to this lexer. - """ - if not isinstance(filter_, Filter): - filter_ = get_filter_by_name(filter_, **options) - self.filters.append(filter_) - - def analyse_text(text): - """ - Has to return a float between ``0`` and ``1`` that indicates - if a lexer wants to highlight this text. Used by ``guess_lexer``. - If this method returns ``0`` it won't highlight it in any case, if - it returns ``1`` highlighting with this lexer is guaranteed. - - The `LexerMeta` metaclass automatically wraps this function so - that it works like a static method (no ``self`` or ``cls`` - parameter) and the return value is automatically converted to - `float`. If the return value is an object that is boolean `False` - it's the same as if the return values was ``0.0``. - """ - - def get_tokens(self, text, unfiltered=False): - """ - Return an iterable of (tokentype, value) pairs generated from - `text`. If `unfiltered` is set to `True`, the filtering mechanism - is bypassed even if filters are defined. - - Also preprocess the text, i.e. expand tabs and strip it if - wanted and applies registered filters. - """ - if not isinstance(text, str): - if self.encoding == 'guess': - text, _ = guess_decode(text) - elif self.encoding == 'chardet': - try: - from pip._vendor import chardet - except ImportError as e: - raise ImportError('To enable chardet encoding guessing, ' - 'please install the chardet library ' - 'from http://chardet.feedparser.org/') from e - # check for BOM first - decoded = None - for bom, encoding in _encoding_map: - if text.startswith(bom): - decoded = text[len(bom):].decode(encoding, 'replace') - break - # no BOM found, so use chardet - if decoded is None: - enc = chardet.detect(text[:1024]) # Guess using first 1KB - decoded = text.decode(enc.get('encoding') or 'utf-8', - 'replace') - text = decoded - else: - text = text.decode(self.encoding) - if text.startswith('\ufeff'): - text = text[len('\ufeff'):] - else: - if text.startswith('\ufeff'): - text = text[len('\ufeff'):] - - # text now *is* a unicode string - text = text.replace('\r\n', '\n') - text = text.replace('\r', '\n') - if self.stripall: - text = text.strip() - elif self.stripnl: - text = text.strip('\n') - if self.tabsize > 0: - text = text.expandtabs(self.tabsize) - if self.ensurenl and not text.endswith('\n'): - text += '\n' - - def streamer(): - for _, t, v in self.get_tokens_unprocessed(text): - yield t, v - stream = streamer() - if not unfiltered: - stream = apply_filters(stream, self.filters, self) - return stream - - def get_tokens_unprocessed(self, text): - """ - Return an iterable of (index, tokentype, value) pairs where "index" - is the starting position of the token within the input text. - - In subclasses, implement this method as a generator to - maximize effectiveness. - """ - raise NotImplementedError - - -class DelegatingLexer(Lexer): - """ - This lexer takes two lexer as arguments. A root lexer and - a language lexer. First everything is scanned using the language - lexer, afterwards all ``Other`` tokens are lexed using the root - lexer. - - The lexers from the ``template`` lexer package use this base lexer. - """ - - def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): - self.root_lexer = _root_lexer(**options) - self.language_lexer = _language_lexer(**options) - self.needle = _needle - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - buffered = '' - insertions = [] - lng_buffer = [] - for i, t, v in self.language_lexer.get_tokens_unprocessed(text): - if t is self.needle: - if lng_buffer: - insertions.append((len(buffered), lng_buffer)) - lng_buffer = [] - buffered += v - else: - lng_buffer.append((i, t, v)) - if lng_buffer: - insertions.append((len(buffered), lng_buffer)) - return do_insertions(insertions, - self.root_lexer.get_tokens_unprocessed(buffered)) - - -# ------------------------------------------------------------------------------ -# RegexLexer and ExtendedRegexLexer -# - - -class include(str): # pylint: disable=invalid-name - """ - Indicates that a state should include rules from another state. - """ - pass - - -class _inherit: - """ - Indicates the a state should inherit from its superclass. - """ - def __repr__(self): - return 'inherit' - -inherit = _inherit() # pylint: disable=invalid-name - - -class combined(tuple): # pylint: disable=invalid-name - """ - Indicates a state combined from multiple states. - """ - - def __new__(cls, *args): - return tuple.__new__(cls, args) - - def __init__(self, *args): - # tuple.__init__ doesn't do anything - pass - - -class _PseudoMatch: - """ - A pseudo match object constructed from a string. - """ - - def __init__(self, start, text): - self._text = text - self._start = start - - def start(self, arg=None): - return self._start - - def end(self, arg=None): - return self._start + len(self._text) - - def group(self, arg=None): - if arg: - raise IndexError('No such group') - return self._text - - def groups(self): - return (self._text,) - - def groupdict(self): - return {} - - -def bygroups(*args): - """ - Callback that yields multiple actions for each group in the match. - """ - def callback(lexer, match, ctx=None): - for i, action in enumerate(args): - if action is None: - continue - elif type(action) is _TokenType: - data = match.group(i + 1) - if data: - yield match.start(i + 1), action, data - else: - data = match.group(i + 1) - if data is not None: - if ctx: - ctx.pos = match.start(i + 1) - for item in action(lexer, - _PseudoMatch(match.start(i + 1), data), ctx): - if item: - yield item - if ctx: - ctx.pos = match.end() - return callback - - -class _This: - """ - Special singleton used for indicating the caller class. - Used by ``using``. - """ - -this = _This() - - -def using(_other, **kwargs): - """ - Callback that processes the match with a different lexer. - - The keyword arguments are forwarded to the lexer, except `state` which - is handled separately. - - `state` specifies the state that the new lexer will start in, and can - be an enumerable such as ('root', 'inline', 'string') or a simple - string which is assumed to be on top of the root state. - - Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. - """ - gt_kwargs = {} - if 'state' in kwargs: - s = kwargs.pop('state') - if isinstance(s, (list, tuple)): - gt_kwargs['stack'] = s - else: - gt_kwargs['stack'] = ('root', s) - - if _other is this: - def callback(lexer, match, ctx=None): - # if keyword arguments are given the callback - # function has to create a new lexer instance - if kwargs: - # XXX: cache that somehow - kwargs.update(lexer.options) - lx = lexer.__class__(**kwargs) - else: - lx = lexer - s = match.start() - for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): - yield i + s, t, v - if ctx: - ctx.pos = match.end() - else: - def callback(lexer, match, ctx=None): - # XXX: cache that somehow - kwargs.update(lexer.options) - lx = _other(**kwargs) - - s = match.start() - for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): - yield i + s, t, v - if ctx: - ctx.pos = match.end() - return callback - - -class default: - """ - Indicates a state or state action (e.g. #pop) to apply. - For example default('#pop') is equivalent to ('', Token, '#pop') - Note that state tuples may be used as well. - - .. versionadded:: 2.0 - """ - def __init__(self, state): - self.state = state - - -class words(Future): - """ - Indicates a list of literal words that is transformed into an optimized - regex that matches any of the words. - - .. versionadded:: 2.0 - """ - def __init__(self, words, prefix='', suffix=''): - self.words = words - self.prefix = prefix - self.suffix = suffix - - def get(self): - return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) - - -class RegexLexerMeta(LexerMeta): - """ - Metaclass for RegexLexer, creates the self._tokens attribute from - self.tokens on the first instantiation. - """ - - def _process_regex(cls, regex, rflags, state): - """Preprocess the regular expression component of a token definition.""" - if isinstance(regex, Future): - regex = regex.get() - return re.compile(regex, rflags).match - - def _process_token(cls, token): - """Preprocess the token component of a token definition.""" - assert type(token) is _TokenType or callable(token), \ - 'token type must be simple type or callable, not %r' % (token,) - return token - - def _process_new_state(cls, new_state, unprocessed, processed): - """Preprocess the state transition action of a token definition.""" - if isinstance(new_state, str): - # an existing state - if new_state == '#pop': - return -1 - elif new_state in unprocessed: - return (new_state,) - elif new_state == '#push': - return new_state - elif new_state[:5] == '#pop:': - return -int(new_state[5:]) - else: - assert False, 'unknown new state %r' % new_state - elif isinstance(new_state, combined): - # combine a new state from existing ones - tmp_state = '_tmp_%d' % cls._tmpname - cls._tmpname += 1 - itokens = [] - for istate in new_state: - assert istate != new_state, 'circular state ref %r' % istate - itokens.extend(cls._process_state(unprocessed, - processed, istate)) - processed[tmp_state] = itokens - return (tmp_state,) - elif isinstance(new_state, tuple): - # push more than one state - for istate in new_state: - assert (istate in unprocessed or - istate in ('#pop', '#push')), \ - 'unknown new state ' + istate - return new_state - else: - assert False, 'unknown new state def %r' % new_state - - def _process_state(cls, unprocessed, processed, state): - """Preprocess a single state definition.""" - assert type(state) is str, "wrong state name %r" % state - assert state[0] != '#', "invalid state name %r" % state - if state in processed: - return processed[state] - tokens = processed[state] = [] - rflags = cls.flags - for tdef in unprocessed[state]: - if isinstance(tdef, include): - # it's a state reference - assert tdef != state, "circular state reference %r" % state - tokens.extend(cls._process_state(unprocessed, processed, - str(tdef))) - continue - if isinstance(tdef, _inherit): - # should be processed already, but may not in the case of: - # 1. the state has no counterpart in any parent - # 2. the state includes more than one 'inherit' - continue - if isinstance(tdef, default): - new_state = cls._process_new_state(tdef.state, unprocessed, processed) - tokens.append((re.compile('').match, None, new_state)) - continue - - assert type(tdef) is tuple, "wrong rule def %r" % tdef - - try: - rex = cls._process_regex(tdef[0], rflags, state) - except Exception as err: - raise ValueError("uncompilable regex %r in state %r of %r: %s" % - (tdef[0], state, cls, err)) from err - - token = cls._process_token(tdef[1]) - - if len(tdef) == 2: - new_state = None - else: - new_state = cls._process_new_state(tdef[2], - unprocessed, processed) - - tokens.append((rex, token, new_state)) - return tokens - - def process_tokendef(cls, name, tokendefs=None): - """Preprocess a dictionary of token definitions.""" - processed = cls._all_tokens[name] = {} - tokendefs = tokendefs or cls.tokens[name] - for state in list(tokendefs): - cls._process_state(tokendefs, processed, state) - return processed - - def get_tokendefs(cls): - """ - Merge tokens from superclasses in MRO order, returning a single tokendef - dictionary. - - Any state that is not defined by a subclass will be inherited - automatically. States that *are* defined by subclasses will, by - default, override that state in the superclass. If a subclass wishes to - inherit definitions from a superclass, it can use the special value - "inherit", which will cause the superclass' state definition to be - included at that point in the state. - """ - tokens = {} - inheritable = {} - for c in cls.__mro__: - toks = c.__dict__.get('tokens', {}) - - for state, items in toks.items(): - curitems = tokens.get(state) - if curitems is None: - # N.b. because this is assigned by reference, sufficiently - # deep hierarchies are processed incrementally (e.g. for - # A(B), B(C), C(RegexLexer), B will be premodified so X(B) - # will not see any inherits in B). - tokens[state] = items - try: - inherit_ndx = items.index(inherit) - except ValueError: - continue - inheritable[state] = inherit_ndx - continue - - inherit_ndx = inheritable.pop(state, None) - if inherit_ndx is None: - continue - - # Replace the "inherit" value with the items - curitems[inherit_ndx:inherit_ndx+1] = items - try: - # N.b. this is the index in items (that is, the superclass - # copy), so offset required when storing below. - new_inh_ndx = items.index(inherit) - except ValueError: - pass - else: - inheritable[state] = inherit_ndx + new_inh_ndx - - return tokens - - def __call__(cls, *args, **kwds): - """Instantiate cls after preprocessing its token definitions.""" - if '_tokens' not in cls.__dict__: - cls._all_tokens = {} - cls._tmpname = 0 - if hasattr(cls, 'token_variants') and cls.token_variants: - # don't process yet - pass - else: - cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) - - return type.__call__(cls, *args, **kwds) - - -class RegexLexer(Lexer, metaclass=RegexLexerMeta): - """ - Base for simple stateful regular expression-based lexers. - Simplifies the lexing process so that you need only - provide a list of states and regular expressions. - """ - - #: Flags for compiling the regular expressions. - #: Defaults to MULTILINE. - flags = re.MULTILINE - - #: At all time there is a stack of states. Initially, the stack contains - #: a single state 'root'. The top of the stack is called "the current state". - #: - #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` - #: - #: ``new_state`` can be omitted to signify no state transition. - #: If ``new_state`` is a string, it is pushed on the stack. This ensure - #: the new current state is ``new_state``. - #: If ``new_state`` is a tuple of strings, all of those strings are pushed - #: on the stack and the current state will be the last element of the list. - #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` - #: to signify a new, anonymous state combined from the rules of two - #: or more existing ones. - #: Furthermore, it can be '#pop' to signify going back one step in - #: the state stack, or '#push' to push the current state on the stack - #: again. Note that if you push while in a combined state, the combined - #: state itself is pushed, and not only the state in which the rule is - #: defined. - #: - #: The tuple can also be replaced with ``include('state')``, in which - #: case the rules from the state named by the string are included in the - #: current one. - tokens = {} - - def get_tokens_unprocessed(self, text, stack=('root',)): - """ - Split ``text`` into (tokentype, text) pairs. - - ``stack`` is the inital stack (default: ``['root']``) - """ - pos = 0 - tokendefs = self._tokens - statestack = list(stack) - statetokens = tokendefs[statestack[-1]] - while 1: - for rexmatch, action, new_state in statetokens: - m = rexmatch(text, pos) - if m: - if action is not None: - if type(action) is _TokenType: - yield pos, action, m.group() - else: - yield from action(self, m) - pos = m.end() - if new_state is not None: - # state transition - if isinstance(new_state, tuple): - for state in new_state: - if state == '#pop': - if len(statestack) > 1: - statestack.pop() - elif state == '#push': - statestack.append(statestack[-1]) - else: - statestack.append(state) - elif isinstance(new_state, int): - # pop, but keep at least one state on the stack - # (random code leading to unexpected pops should - # not allow exceptions) - if abs(new_state) >= len(statestack): - del statestack[1:] - else: - del statestack[new_state:] - elif new_state == '#push': - statestack.append(statestack[-1]) - else: - assert False, "wrong state def: %r" % new_state - statetokens = tokendefs[statestack[-1]] - break - else: - # We are here only if all state tokens have been considered - # and there was not a match on any of them. - try: - if text[pos] == '\n': - # at EOL, reset state to "root" - statestack = ['root'] - statetokens = tokendefs['root'] - yield pos, Text, '\n' - pos += 1 - continue - yield pos, Error, text[pos] - pos += 1 - except IndexError: - break - - -class LexerContext: - """ - A helper object that holds lexer position data. - """ - - def __init__(self, text, pos, stack=None, end=None): - self.text = text - self.pos = pos - self.end = end or len(text) # end=0 not supported ;-) - self.stack = stack or ['root'] - - def __repr__(self): - return 'LexerContext(%r, %r, %r)' % ( - self.text, self.pos, self.stack) - - -class ExtendedRegexLexer(RegexLexer): - """ - A RegexLexer that uses a context object to store its state. - """ - - def get_tokens_unprocessed(self, text=None, context=None): - """ - Split ``text`` into (tokentype, text) pairs. - If ``context`` is given, use this lexer context instead. - """ - tokendefs = self._tokens - if not context: - ctx = LexerContext(text, 0) - statetokens = tokendefs['root'] - else: - ctx = context - statetokens = tokendefs[ctx.stack[-1]] - text = ctx.text - while 1: - for rexmatch, action, new_state in statetokens: - m = rexmatch(text, ctx.pos, ctx.end) - if m: - if action is not None: - if type(action) is _TokenType: - yield ctx.pos, action, m.group() - ctx.pos = m.end() - else: - yield from action(self, m, ctx) - if not new_state: - # altered the state stack? - statetokens = tokendefs[ctx.stack[-1]] - # CAUTION: callback must set ctx.pos! - if new_state is not None: - # state transition - if isinstance(new_state, tuple): - for state in new_state: - if state == '#pop': - if len(ctx.stack) > 1: - ctx.stack.pop() - elif state == '#push': - ctx.stack.append(ctx.stack[-1]) - else: - ctx.stack.append(state) - elif isinstance(new_state, int): - # see RegexLexer for why this check is made - if abs(new_state) >= len(ctx.stack): - del ctx.state[1:] - else: - del ctx.stack[new_state:] - elif new_state == '#push': - ctx.stack.append(ctx.stack[-1]) - else: - assert False, "wrong state def: %r" % new_state - statetokens = tokendefs[ctx.stack[-1]] - break - else: - try: - if ctx.pos >= ctx.end: - break - if text[ctx.pos] == '\n': - # at EOL, reset state to "root" - ctx.stack = ['root'] - statetokens = tokendefs['root'] - yield ctx.pos, Text, '\n' - ctx.pos += 1 - continue - yield ctx.pos, Error, text[ctx.pos] - ctx.pos += 1 - except IndexError: - break - - -def do_insertions(insertions, tokens): - """ - Helper for lexers which must combine the results of several - sublexers. - - ``insertions`` is a list of ``(index, itokens)`` pairs. - Each ``itokens`` iterable should be inserted at position - ``index`` into the token stream given by the ``tokens`` - argument. - - The result is a combined token stream. - - TODO: clean up the code here. - """ - insertions = iter(insertions) - try: - index, itokens = next(insertions) - except StopIteration: - # no insertions - yield from tokens - return - - realpos = None - insleft = True - - # iterate over the token stream where we want to insert - # the tokens from the insertion list. - for i, t, v in tokens: - # first iteration. store the postition of first item - if realpos is None: - realpos = i - oldi = 0 - while insleft and i + len(v) >= index: - tmpval = v[oldi:index - i] - if tmpval: - yield realpos, t, tmpval - realpos += len(tmpval) - for it_index, it_token, it_value in itokens: - yield realpos, it_token, it_value - realpos += len(it_value) - oldi = index - i - try: - index, itokens = next(insertions) - except StopIteration: - insleft = False - break # not strictly necessary - if oldi < len(v): - yield realpos, t, v[oldi:] - realpos += len(v) - oldi - - # leftover tokens - while insleft: - # no normal tokens, set realpos to zero - realpos = realpos or 0 - for p, t, v in itokens: - yield realpos, t, v - realpos += len(v) - try: - index, itokens = next(insertions) - except StopIteration: - insleft = False - break # not strictly necessary - - -class ProfilingRegexLexerMeta(RegexLexerMeta): - """Metaclass for ProfilingRegexLexer, collects regex timing info.""" - - def _process_regex(cls, regex, rflags, state): - if isinstance(regex, words): - rex = regex_opt(regex.words, prefix=regex.prefix, - suffix=regex.suffix) - else: - rex = regex - compiled = re.compile(rex, rflags) - - def match_func(text, pos, endpos=sys.maxsize): - info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) - t0 = time.time() - res = compiled.match(text, pos, endpos) - t1 = time.time() - info[0] += 1 - info[1] += t1 - t0 - return res - return match_func - - -class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): - """Drop-in replacement for RegexLexer that does profiling of its regexes.""" - - _prof_data = [] - _prof_sort_index = 4 # defaults to time per call - - def get_tokens_unprocessed(self, text, stack=('root',)): - # this needs to be a stack, since using(this) will produce nested calls - self.__class__._prof_data.append({}) - yield from RegexLexer.get_tokens_unprocessed(self, text, stack) - rawdata = self.__class__._prof_data.pop() - data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], - n, 1000 * t, 1000 * t / n) - for ((s, r), (n, t)) in rawdata.items()), - key=lambda x: x[self._prof_sort_index], - reverse=True) - sum_total = sum(x[3] for x in data) - - print() - print('Profiling result for %s lexing %d chars in %.3f ms' % - (self.__class__.__name__, len(text), sum_total)) - print('=' * 110) - print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) - print('-' * 110) - for d in data: - print('%-20s %-65s %5d %8.4f %8.4f' % d) - print('=' * 110) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tomli/_re.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tomli/_re.py deleted file mode 100644 index 3883fdd9c9069c3655321da77d16499506b49958..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tomli/_re.py +++ /dev/null @@ -1,83 +0,0 @@ -from datetime import date, datetime, time, timedelta, timezone, tzinfo -import re -from typing import TYPE_CHECKING, Any, Optional, Union - -if TYPE_CHECKING: - from re import Match - - from pip._vendor.tomli._parser import ParseFloat - -# E.g. -# - 00:32:00.999999 -# - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?" - -RE_HEX = re.compile(r"[0-9A-Fa-f](?:_?[0-9A-Fa-f])*") -RE_BIN = re.compile(r"[01](?:_?[01])*") -RE_OCT = re.compile(r"[0-7](?:_?[0-7])*") -RE_NUMBER = re.compile( - r"[+-]?(?:0|[1-9](?:_?[0-9])*)" # integer - + r"(?:\.[0-9](?:_?[0-9])*)?" # optional fractional part - + r"(?:[eE][+-]?[0-9](?:_?[0-9])*)?" # optional exponent part -) -RE_LOCALTIME = re.compile(_TIME_RE_STR) -RE_DATETIME = re.compile( - r"([0-9]{4})-(0[1-9]|1[0-2])-(0[1-9]|1[0-9]|2[0-9]|3[01])" # date, e.g. 1988-10-27 - + r"(?:" - + r"[T ]" - + _TIME_RE_STR - + r"(?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))?" # time offset - + r")?" -) - - -def match_to_datetime(match: "Match") -> Union[datetime, date]: - """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. - - Raises ValueError if the match does not correspond to a valid date - or datetime. - """ - ( - year_str, - month_str, - day_str, - hour_str, - minute_str, - sec_str, - micros_str, - zulu_time, - offset_dir_str, - offset_hour_str, - offset_minute_str, - ) = match.groups() - year, month, day = int(year_str), int(month_str), int(day_str) - if hour_str is None: - return date(year, month, day) - hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) - micros = int(micros_str[1:].ljust(6, "0")[:6]) if micros_str else 0 - if offset_dir_str: - offset_dir = 1 if offset_dir_str == "+" else -1 - tz: Optional[tzinfo] = timezone( - timedelta( - hours=offset_dir * int(offset_hour_str), - minutes=offset_dir * int(offset_minute_str), - ) - ) - elif zulu_time: - tz = timezone.utc - else: # local date-time - tz = None - return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) - - -def match_to_localtime(match: "Match") -> time: - hour_str, minute_str, sec_str, micros_str = match.groups() - micros = int(micros_str[1:].ljust(6, "0")[:6]) if micros_str else 0 - return time(int(hour_str), int(minute_str), int(sec_str), micros) - - -def match_to_number(match: "Match", parse_float: "ParseFloat") -> Any: - match_str = match.group() - if "." in match_str or "e" in match_str or "E" in match_str: - return parse_float(match_str) - return int(match_str) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/_types.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/_types.py deleted file mode 100644 index cc1847b5e69447bb934076be14b66766aedb22c1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/_types.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import TypeVar - - -WT = TypeVar("WT", bound="WrapperType") - -if TYPE_CHECKING: # pragma: no cover - # Define _CustomList and _CustomDict as a workaround for: - # https://github.com/python/mypy/issues/11427 - # - # According to this issue, the typeshed contains a "lie" - # (it adds MutableSequence to the ancestry of list and MutableMapping to - # the ancestry of dict) which completely messes with the type inference for - # Table, InlineTable, Array and Container. - # - # Importing from builtins is preferred over simple assignment, see issues: - # https://github.com/python/mypy/issues/8715 - # https://github.com/python/mypy/issues/10068 - from builtins import dict as _CustomDict # noqa: N812 - from builtins import float as _CustomFloat # noqa: N812 - from builtins import int as _CustomInt # noqa: N812 - from builtins import list as _CustomList # noqa: N812 - from typing import Callable - from typing import Concatenate - from typing import ParamSpec - from typing import Protocol - - P = ParamSpec("P") - - class WrapperType(Protocol): - def _new(self: WT, value: Any) -> WT: - ... - -else: - from collections.abc import MutableMapping - from collections.abc import MutableSequence - from numbers import Integral - from numbers import Real - - class _CustomList(MutableSequence, list): - """Adds MutableSequence mixin while pretending to be a builtin list""" - - class _CustomDict(MutableMapping, dict): - """Adds MutableMapping mixin while pretending to be a builtin dict""" - - class _CustomInt(Integral, int): - """Adds Integral mixin while pretending to be a builtin int""" - - class _CustomFloat(Real, float): - """Adds Real mixin while pretending to be a builtin float""" - - -def wrap_method( - original_method: Callable[Concatenate[WT, P], Any] -) -> Callable[Concatenate[WT, P], Any]: - def wrapper(self: WT, *args: P.args, **kwargs: P.kwargs) -> Any: - result = original_method(self, *args, **kwargs) - if result is NotImplemented: - return result - return self._new(result) - - return wrapper diff --git a/spaces/pycoming/bingo/src/components/ui/select.tsx b/spaces/pycoming/bingo/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/pycoming/bingo/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/qinzhu/diy-girlfriend/monotonic_align/core.py b/spaces/qinzhu/diy-girlfriend/monotonic_align/core.py deleted file mode 100644 index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Bifrost 1.2b Private Build Setup Free [Extra Quality].md b/spaces/quidiaMuxgu/Expedit-SAM/Bifrost 1.2b Private Build Setup Free [Extra Quality].md deleted file mode 100644 index c56702d6398cd725dfaa5b791e06c37aa1c9e051..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Bifrost 1.2b Private Build Setup Free [Extra Quality].md +++ /dev/null @@ -1,32 +0,0 @@ -

            Bifrost 1.2b Private Build Setup Free


            Download ––– https://geags.com/2uCs2O



            -
            -I did not state this but the default keys are no longer supported and the game can break when starting with the wrong key bindings. So, you can ask me to re-make the setup and I will do it for free of charge. - -Description: - -Bifrost 1.2b Private Build Setup Free - -Because of errors in the new Command Blocks, you must have the setup file from the 1.1.1.7 (Forum Post) which is still posted here. Please know that this is for a single Private Build ONLY. I can not change the command blocks or update any of my posts. - -Extra quality (no Update) - -Caution! - -Before this setup file will work, you must replace the old Server.rul lines with the new ones below. This is to prevent the server from starting with the old settings. In addition, you can copy the old client.rul file and place it on your server. - -I am not responsible for any loss of data from installing this build. There is also a video of how this is done. - -For more updates on the new server.rul file and the required changes to your old server.rul, you can click the button below. - -Advertise in this thread - -I currently will not do any work on any issues relating to any patch or any build other than the one I created. For that, you can send me a PM or Email. This section is for topics that do not belong in this thread.There are, at present, several ways to add a custom ribbon (or tab) to Windows Explorer. You can make a new.xml file and move it into the Documents\Windows\explorer subfolder, add the XML and then associate it with Explorer. - -This is not always easy to achieve. In some cases the file is read-only, or you might have no access to a folder containing the subfolder where your ribbon resides. - -The Registry can be used to add a ribbon to Explorer. You can select the registry key to add a custom ribbon by going to Start -> right click on Computer and select Properties. Then navigate to the “Advanced” tab and then press the “Browse…” button. From there you can select the registry key you wish to access. - -So, let’s say you are in the My Computer key. Then press the right arrow button on the keyboard to go to the “Rec 4fefd39f24
            -
            -
            -

            diff --git a/spaces/radames/MusicGen-Continuation/tests/data/test_audio.py b/spaces/radames/MusicGen-Continuation/tests/data/test_audio.py deleted file mode 100644 index 40c0d5ed69eff92a766dc6d176e532f0df6c2b5e..0000000000000000000000000000000000000000 --- a/spaces/radames/MusicGen-Continuation/tests/data/test_audio.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product -import random - -import numpy as np -import torch -import torchaudio - -from audiocraft.data.audio import audio_info, audio_read, audio_write, _av_read - -from ..common_utils import TempDirMixin, get_white_noise, save_wav - - -class TestInfo(TempDirMixin): - - def test_info_mp3(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - wav = get_white_noise(ch, int(sample_rate * duration)) - path = self.get_temp_path('sample_wav.mp3') - save_wav(path, wav, sample_rate) - info = audio_info(path) - assert info.sample_rate == sample_rate - assert info.channels == ch - # we cannot trust torchaudio for num_frames, so we don't check - - def _test_info_format(self, ext: str): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'sample_wav{ext}') - save_wav(path, wav, sample_rate) - info = audio_info(path) - assert info.sample_rate == sample_rate - assert info.channels == ch - assert np.isclose(info.duration, duration, atol=1e-5) - - def test_info_wav(self): - self._test_info_format('.wav') - - def test_info_flac(self): - self._test_info_format('.flac') - - def test_info_ogg(self): - self._test_info_format('.ogg') - - def test_info_m4a(self): - # TODO: generate m4a file programmatically - # self._test_info_format('.m4a') - pass - - -class TestRead(TempDirMixin): - - def test_read_full_wav(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - read_wav, read_sr = audio_read(path) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == wav.shape[1] - assert torch.allclose(read_wav, wav, rtol=1e-03, atol=1e-04) - - def test_read_partial_wav(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - read_duration = torch.rand(1).item() - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - read_frames = int(sample_rate * read_duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - read_wav, read_sr = audio_read(path, 0, read_duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == read_frames - assert torch.allclose(read_wav[..., 0:read_frames], wav[..., 0:read_frames], rtol=1e-03, atol=1e-04) - - def test_read_seek_time_wav(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - read_duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - seek_time = torch.rand(1).item() - read_wav, read_sr = audio_read(path, seek_time, read_duration) - seek_frames = int(sample_rate * seek_time) - expected_frames = n_frames - seek_frames - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == expected_frames - assert torch.allclose(read_wav, wav[..., seek_frames:], rtol=1e-03, atol=1e-04) - - def test_read_seek_time_wav_padded(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - read_duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - read_frames = int(sample_rate * read_duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - seek_time = torch.rand(1).item() - seek_frames = int(sample_rate * seek_time) - expected_frames = n_frames - seek_frames - read_wav, read_sr = audio_read(path, seek_time, read_duration, pad=True) - expected_pad_wav = torch.zeros(wav.shape[0], read_frames - expected_frames) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == read_frames - assert torch.allclose(read_wav[..., :expected_frames], wav[..., seek_frames:], rtol=1e-03, atol=1e-04) - assert torch.allclose(read_wav[..., expected_frames:], expected_pad_wav) - - -class TestAvRead(TempDirMixin): - - def test_avread_seek_base(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 2. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'reference_a_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - for _ in range(100): - # seek will always load a full duration segment in the file - seek_time = random.uniform(0.0, 1.0) - seek_duration = random.uniform(0.001, 1.0) - read_wav, read_sr = _av_read(path, seek_time, seek_duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == int(seek_duration * sample_rate) - - def test_avread_seek_partial(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'reference_b_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - for _ in range(100): - # seek will always load a partial segment - seek_time = random.uniform(0.5, 1.) - seek_duration = 1. - expected_num_frames = n_frames - int(seek_time * sample_rate) - read_wav, read_sr = _av_read(path, seek_time, seek_duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == expected_num_frames - - def test_avread_seek_outofbound(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'reference_c_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - seek_time = 1.5 - read_wav, read_sr = _av_read(path, seek_time, 1.) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == 0 - - def test_avread_seek_edge(self): - sample_rates = [8000, 16_000] - # some of these values will have - # int(((frames - 1) / sample_rate) * sample_rate) != (frames - 1) - n_frames = [1000, 1001, 1002] - channels = [1, 2] - for sample_rate, ch, frames in product(sample_rates, channels, n_frames): - duration = frames / sample_rate - wav = get_white_noise(ch, frames) - path = self.get_temp_path(f'reference_d_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - seek_time = (frames - 1) / sample_rate - seek_frames = int(seek_time * sample_rate) - read_wav, read_sr = _av_read(path, seek_time, duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == (frames - seek_frames) - - -class TestAudioWrite(TempDirMixin): - - def test_audio_write_wav(self): - torch.manual_seed(1234) - sample_rates = [8000, 16_000] - n_frames = [1000, 1001, 1002] - channels = [1, 2] - strategies = ["peak", "clip", "rms"] - formats = ["wav", "mp3"] - for sample_rate, ch, frames in product(sample_rates, channels, n_frames): - for format_, strategy in product(formats, strategies): - wav = get_white_noise(ch, frames) - path = self.get_temp_path(f'pred_{sample_rate}_{ch}') - audio_write(path, wav, sample_rate, format_, strategy=strategy) - read_wav, read_sr = torchaudio.load(f'{path}.{format_}') - if format_ == "wav": - assert read_wav.shape == wav.shape - - if format_ == "wav" and strategy in ["peak", "rms"]: - rescaled_read_wav = read_wav / read_wav.abs().max() * wav.abs().max() - # for a Gaussian, the typical max scale will be less than ~5x the std. - # The error when writing to disk will ~ 1/2**15, and when rescaling, 5x that. - # For RMS target, rescaling leaves more headroom by default, leading - # to a 20x rescaling typically - atol = (5 if strategy == "peak" else 20) / 2**15 - delta = (rescaled_read_wav - wav).abs().max() - assert torch.allclose(wav, rescaled_read_wav, rtol=0, atol=atol), (delta, atol) - formats = ["wav"] # faster unit tests diff --git a/spaces/radames/Real-Time-Latent-Consistency-Model/Dockerfile b/spaces/radames/Real-Time-Latent-Consistency-Model/Dockerfile deleted file mode 100644 index c164c4702f300e9ca3d3bb5f42fee781800f79cd..0000000000000000000000000000000000000000 --- a/spaces/radames/Real-Time-Latent-Consistency-Model/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04 - -ARG DEBIAN_FRONTEND=noninteractive - -ENV PYTHONUNBUFFERED=1 - -RUN apt-get update && apt-get install --no-install-recommends -y \ - build-essential \ - python3.9 \ - python3-pip \ - python3-dev \ - git \ - ffmpeg \ - google-perftools \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH \ - PYTHONPATH=$HOME/app \ - PYTHONUNBUFFERED=1 \ - SYSTEM=spaces - -RUN pip3 install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4 -# CMD ["uvicorn", "app-img2img:app", "--host", "0.0.0.0", "--port", "7860"] -# CMD ["uvicorn", "app-txt2img:app", "--host", "0.0.0.0", "--port", "7860"] -CMD ["uvicorn", "app-controlnet:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/rajababu15/Health_Tracker/README.md b/spaces/rajababu15/Health_Tracker/README.md deleted file mode 100644 index 65a27db12e6b88f853f8b5edb5f5b5f9fc4ea5c7..0000000000000000000000000000000000000000 --- a/spaces/rajababu15/Health_Tracker/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Health Tracker -emoji: 📚 -colorFrom: yellow -colorTo: gray -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ramiin2/AutoGPT/autogpt/agent/agent_manager.py b/spaces/ramiin2/AutoGPT/autogpt/agent/agent_manager.py deleted file mode 100644 index 898767a485e50b5e62625a7883edf1b30d5fddf9..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/autogpt/agent/agent_manager.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Agent manager for managing GPT agents""" -from __future__ import annotations - -from typing import Union - -from autogpt.config.config import Singleton -from autogpt.llm_utils import create_chat_completion - - -class AgentManager(metaclass=Singleton): - """Agent manager for managing GPT agents""" - - def __init__(self): - self.next_key = 0 - self.agents = {} # key, (task, full_message_history, model) - - # Create new GPT agent - # TODO: Centralise use of create_chat_completion() to globally enforce token limit - - def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]: - """Create a new agent and return its key - - Args: - task: The task to perform - prompt: The prompt to use - model: The model to use - - Returns: - The key of the new agent - """ - messages = [ - {"role": "user", "content": prompt}, - ] - - # Start GPT instance - agent_reply = create_chat_completion( - model=model, - messages=messages, - ) - - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) - - key = self.next_key - # This is done instead of len(agents) to make keys unique even if agents - # are deleted - self.next_key += 1 - - self.agents[key] = (task, messages, model) - - return key, agent_reply - - def message_agent(self, key: str | int, message: str) -> str: - """Send a message to an agent and return its response - - Args: - key: The key of the agent to message - message: The message to send to the agent - - Returns: - The agent's response - """ - task, messages, model = self.agents[int(key)] - - # Add user message to message history before sending to agent - messages.append({"role": "user", "content": message}) - - # Start GPT instance - agent_reply = create_chat_completion( - model=model, - messages=messages, - ) - - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) - - return agent_reply - - def list_agents(self) -> list[tuple[str | int, str]]: - """Return a list of all agents - - Returns: - A list of tuples of the form (key, task) - """ - - # Return a list of agent keys and their tasks - return [(key, task) for key, (task, _, _) in self.agents.items()] - - def delete_agent(self, key: Union[str, int]) -> bool: - """Delete an agent from the agent manager - - Args: - key: The key of the agent to delete - - Returns: - True if successful, False otherwise - """ - - try: - del self.agents[int(key)] - return True - except KeyError: - return False diff --git a/spaces/raphaelsty/games/README.md b/spaces/raphaelsty/games/README.md deleted file mode 100644 index 2e455161cbb3650f33a4348e174d4b13faf17897..0000000000000000000000000000000000000000 --- a/spaces/raphaelsty/games/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: End-to-end Neural Search -emoji: 👾 -colorFrom: gray -colorTo: purple -sdk: streamlit -app_file: app.py -pinned: true ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar.md deleted file mode 100644 index 30192d6eef8287caa4c4a9fa67a80d60a6884aff..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar.md +++ /dev/null @@ -1,123 +0,0 @@ - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar: A Must-Have Mod for Your Game

            - -

            If you are a fan of the award-winning real-time strategy game Company of Heroes: Tales of Valor, you might be interested in downloading the COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar file. This is a mod that adds several features and enhancements to your game, such as unlimited resources, instant cooldown, fast building, god mode, and more. In this article, we will show you how to get and use this amazing trainer for your game.

            -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar


            DOWNLOAD - https://urlgoal.com/2uCM9D



            - -

            What is COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar is a file that contains a trainer for the game Company of Heroes: Tales of Valor. A trainer is a program that modifies the game's memory and allows you to activate various cheats and hacks. This trainer works with the version 2.602 of the game, which is the latest patch available.

            - -

            The trainer offers six main features:

            - -
              -
            • Unlimited resources: You can have unlimited manpower, munitions, fuel, and command points.
            • -
            • Add commander points: You can gain about 18 XP every time you press this option.
            • -
            • Instant cooldown: You can use your special abilities without waiting for them to recharge.
            • -
            • Fast building: You can construct buildings and train units instantly.
            • -
            • God mode: You and your allies are invincible and can't be killed by enemies.
            • -
            • Instant train units: You can train units without waiting for them to be ready.
            • -
            - -

            Note that when you turn on the trainer, the population cap will be zero, so you can have as many units as you want.

            - -

            How to Download and Install COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            To download and install COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar, you need to follow these steps:

            - -
              -
            1. Go to this link and click on the "Download now" button.
            2. -
            3. Save the file to your computer and extract it using a program like WinRAR or 7-Zip.
            4. -
            5. Run the file named "Baathist_6_Trainer_for_CoH_ToV_2.602.EXE" as an administrator.
            6. -
            7. Start your game and press F1 to activate the trainer.
            8. -
            9. Use the hotkeys shown on the trainer's interface to enable or disable the features you want.
            10. -
            - -

            You can also watch this video for a visual guide on how to use the trainer.

            -

            - -

            Why Should You Use COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar is a mod that can enhance your gaming experience and make it more fun and enjoyable. You can use it to experiment with different strategies, overcome difficult challenges, or just have some fun with unlimited power. You can also use it to play with different mods that add new units, maps, and modes to your game.

            - -

            The trainer is free to use and does not contain any viruses or malware. However, some antivirus programs may detect it as a false positive and block it from running. If that happens, you need to disable your antivirus or add an exception for the trainer's file.

            - -

            The trainer is also compatible with most of the popular mods for Company of Heroes: Tales of Valor, such as Blitzkrieg Mod, Modern Combat, Eastern Front, Zombie Mod, and many more. However, some mods may have their own trainers or cheats that may conflict with this one. In that case, you need to choose which one you want to use and disable the other one.

            - -

            Conclusion

            - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar is a mod that can make your game more fun and exciting by giving you access to various cheats and hacks. You can download it from Mod DB and install it easily on your computer. You can then use it to play with unlimited resources, instant cooldown, fast building, god mode, instant train units, and more. You can also use it with other mods that add new content to your game.

            - -

            If you are looking for a way to spice up your game and have some fun with it, you should definitely try COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar. It is a must-have mod for any fan of Company of Heroes: Tales of Valor.

            -

            How to Uninstall COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            If you want to uninstall COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar from your computer, you need to follow these steps:

            - -
              -
            1. Close your game and the trainer.
            2. -
            3. Delete the file named "Baathist_6_Trainer_for_CoH_ToV_2.602.EXE" from your computer.
            4. -
            5. Delete the file named "CoH_2.602.rar" from your computer.
            6. -
            7. Restart your computer to remove any traces of the trainer from your system.
            8. -
            - -

            You can also watch this video for a visual guide on how to uninstall the trainer.

            - -

            What are the Benefits of Using COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar is a mod that can give you many benefits and advantages when playing Company of Heroes: Tales of Valor. Some of the benefits are:

            - -
              -
            • You can have more fun and excitement by using various cheats and hacks.
            • -
            • You can experiment with different strategies and tactics without worrying about resources or cooldowns.
            • -
            • You can overcome difficult missions and challenges with ease by using god mode or instant train units.
            • -
            • You can enjoy the game's graphics and sound effects without any lag or slowdown by using fast building or instant buildings.
            • -
            • You can play with different mods that add new content to your game without any compatibility issues by using this trainer.
            • -
            - -

            If you are looking for a way to enhance your gaming experience and make it more enjoyable, you should definitely use COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar. It is a mod that can make your game more fun and exciting.

            - -

            What are the Risks of Using COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar is a mod that can also have some risks and drawbacks when using it. Some of the risks are:

            - -
              -
            • You may encounter some bugs or glitches when using the trainer, such as crashes, freezes, or errors.
            • -
            • You may lose some of the challenge and satisfaction of playing the game by using cheats and hacks.
            • -
            • You may get banned or penalized by the game's developers or publishers if you use the trainer online or in multiplayer mode.
            • -
            • You may damage your computer or your game files if you download the trainer from an untrusted source or if you install it incorrectly.
            • -
            • You may infect your computer with viruses or malware if you download the trainer from an untrusted source or if you run it without disabling your antivirus.
            • -
            - -

            If you want to use COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar, you should be aware of these risks and take precautions to avoid them. You should only download the trainer from a trusted source, such as Mod DB, and install it correctly on your computer. You should also disable your antivirus or add an exception for the trainer's file before running it. You should also avoid using the trainer online or in multiplayer mode, as it may cause problems for you or other players.

            -

            Where to Find More Mods for Company of Heroes: Tales of Valor?

            - -

            If you like COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar, you might also want to check out some other mods that add new content and features to your game. There are many mods available for Company of Heroes: Tales of Valor, such as:

            - -
              -
            • Blitzkrieg Mod: This mod aims to create a more realistic and immersive experience by adding new units, weapons, abilities, doctrines, sounds, and graphics.
            • -
            • Modern Combat: This mod brings the game to the modern era by adding new factions, units, weapons, vehicles, maps, and modes based on the conflicts in Iraq and Afghanistan.
            • -
            • Eastern Front: This mod adds the Soviet Red Army as a new faction, along with new units, maps, doctrines, abilities, and campaigns based on the Eastern Front of World War II.
            • -
            • Zombie Mod: This mod adds a new mode where you have to survive against waves of zombies and mutants using your units and defenses.
            • -
            • And many more!
            • -
            - -

            You can find these mods and more on Mod DB, a website that hosts thousands of mods for various games. You can browse, download, rate, comment, and upload mods on Mod DB. You can also join the community and interact with other modders and players.

            - -

            How to Contact the Creator of COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar?

            - -

            If you have any questions, suggestions, feedbacks, or issues regarding COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar, you can contact the creator of the trainer, Resistant94. Resistant94 is a modder who has created several trainers and mods for various games. You can contact Resistant94 by:

            - -
              -
            • Sending a private message on Mod DB.
            • -
            • Leaving a comment on the trainer's page on Mod DB.
            • -
            • Emailing at resistant94@gmail.com.
            • -
            - -

            Resistant94 is always happy to hear from users who have used his trainer and appreciate his work. He is also open to suggestions and feedbacks on how to improve his trainer or create new ones. However, he may not be able to reply immediately or solve every problem you may encounter. He asks for your understanding and patience.

            -

            Conclusion

            - -

            COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar is a mod that can make your game more fun and exciting by giving you access to various cheats and hacks. You can download it from Mod DB and install it easily on your computer. You can then use it to play with unlimited resources, instant cooldown, fast building, god mode, instant train units, and more. You can also use it with other mods that add new content to your game.

            - -

            If you are looking for a way to spice up your game and have some fun with it, you should definitely try COMPANY OF HEROES TALES OF VALOR MEGA TRAINER 2.602.rar. It is a must-have mod for any fan of Company of Heroes: Tales of Valor.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Descargar Roboguide Fanuc Con Crack 28 PATCHED.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Descargar Roboguide Fanuc Con Crack 28 PATCHED.md deleted file mode 100644 index 0ab516009e55886e6256b3c36c98d9fdb21b32d4..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Descargar Roboguide Fanuc Con Crack 28 PATCHED.md +++ /dev/null @@ -1,6 +0,0 @@ -

            descargar roboguide fanuc con crack 28


            Downloadhttps://urlgoal.com/2uCM0M



            - - 3cee63e6c2
            -
            -
            -

            diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Kitab Sabilal Muhtadin Pdf !!TOP!!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Kitab Sabilal Muhtadin Pdf !!TOP!!.md deleted file mode 100644 index 098f569497700b33409179fdd043ffbd6860819b..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Kitab Sabilal Muhtadin Pdf !!TOP!!.md +++ /dev/null @@ -1,15 +0,0 @@ -
            -

            Download Kitab Sabilal Muhtadin Pdf: A Collection of Prayers and Supplications for the Righteous

            -

            Kitab Sabilal Muhtadin (The Path of the Guided Ones) is a book that contains various prayers and supplications for the righteous, compiled by Sayyid Abdillah bin Alawi bin Hasan al-Athos, a descendant of Prophet Muhammad (peace be upon him). The book includes famous prayers such as Ratib al-Haddad, Ratib al-Athos, Wirid Syakran, Wirdu Latief and many others. These prayers are derived from the traditions of the people of Yemen, especially the habaib (noble scholars) and alawiyin (the Prophet's family).

            -

            Download Kitab Sabilal Muhtadin Pdf


            Download Zip ☆☆☆☆☆ https://urlgoal.com/2uCN4f



            -

            The book is named after another famous book with a similar title, Sabilal Muhtadin lit-Tafaqquh fi Amriddin (The Path of the Guided Ones for Understanding the Matters of Religion), written by Syaikh Muhammad Arsyad al-Banjary, a prominent scholar from Southeast Asia. However, the two books have different contents and purposes. The book by Sayyid Abdillah focuses on prayers and supplications, while the book by Syaikh Muhammad Arsyad covers the topics of Islamic jurisprudence (fiqh).

            -

            If you are interested in downloading Kitab Sabilal Muhtadin Pdf, you can find it online from various sources. One of them is the Internet Archive, which offers free access to millions of books, audio files, videos and other digital materials. You can download Kitab Sabilal Muhtadin Pdf from this link: https://archive.org/details/sabil-al-muhtadin-juzuk-1. The file size is 74.9 MB and it contains 417 pages. You can also read it online or borrow it for 14 days.

            -

            Another source is Santripedia, a website that provides various Islamic resources, such as books, articles, videos and podcasts. You can download Kitab Sabilal Muhtadin Pdf from this link: https://www.santripedia.com/download-kitab-sabilul-muhtadin/. The file size is 74.9 MB and it contains 417 pages as well. You can also read a brief introduction about the book and its author on this website.

            -

            -

            We hope that this article has helped you to find and download Kitab Sabilal Muhtadin Pdf. May Allah bless you and guide you to the right path.

            - -

            Kitab Sabilal Muhtadin Pdf is not only a book of prayers and supplications, but also a book of wisdom and guidance. It contains many verses from the Quran and the Hadith, as well as the sayings and teachings of the Prophet's companions and successors. The book also explains the meanings and benefits of the prayers and supplications, as well as the etiquette and manners of performing them. By reading and practicing this book, one can hope to attain the blessings and mercy of Allah, as well as the intercession and love of the Prophet (peace be upon him).

            -

            One of the most famous prayers in Kitab Sabilal Muhtadin Pdf is Ratib al-Haddad, which was composed by Imam Abdullah bin Alawi al-Haddad, a renowned scholar and saint from Yemen. Ratib al-Haddad is a collection of 33 prayers that are recited daily, preferably after the evening prayer (Maghrib) or before sleeping. Ratib al-Haddad is known for its spiritual effects and benefits, such as protection from harm, forgiveness of sins, fulfillment of needs, removal of difficulties, increase of sustenance and elevation of rank. Many people have testified to the miracles and wonders that they experienced by reciting Ratib al-Haddad with sincerity and devotion.

            -

            Another famous prayer in Kitab Sabilal Muhtadin Pdf is Ratib al-Athos, which was composed by Sayyid Abdillah bin Alawi bin Hasan al-Athos himself. Ratib al-Athos is a collection of 40 prayers that are recited weekly, preferably on Thursday night or Friday morning. Ratib al-Athos is similar to Ratib al-Haddad in its content and purpose, but it has some additional prayers that are specific to the Prophet's family (Ahlul Bayt). Ratib al-Athos is also known for its spiritual effects and benefits, such as strengthening the faith, purifying the heart, increasing the love for Allah and His Messenger (peace be upon him), granting peace and tranquility, attracting good fortune and repelling evil.

            d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Dr Fone Registration 10.3.2 Crack Free Full Code Latest 2020.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Dr Fone Registration 10.3.2 Crack Free Full Code Latest 2020.md deleted file mode 100644 index 2c96dde3058c958da11af27db31e9528d963f85d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Dr Fone Registration 10.3.2 Crack Free Full Code Latest 2020.md +++ /dev/null @@ -1,7 +0,0 @@ - -


            dr.fone registration key 2020 crack is best software in this world to recover your data. wondershare dr.fone registration key may be the best choice to recover your lost data. it has all the features to recover all the files. dr.fone registration key is easy to use and its interface is very user-friendly. it is the best solution to recover your lost data without any complex method.

            -

            dr fone keygen 10.3.2 crack is what you need to get your iphone, ipad, and ipod touch into working condition again. the reason behind using this software is you want to get your ios device back to the original factory. you can also use this software to backup your device to other devices or your computer. if you are not familiar with this software, then you can download and install the serial key on your computer. once you have it installed, then you will be able to select the device from the list that you want to recover. you can save your device to a specific folder or directory and then go into the scan. the user will then have to wait for a while to have the recovery of their phone. you will be able to choose the type of device to scan and you can also choose the screen resolution to be used.

            -

            Dr Fone Registration 10.3.2 Crack Full Code Latest 2020


            Download File · https://urlgoal.com/2uCJid



            -


            dr fone 10.3.2 crack full version is the best possible solution to fix your iphone, ipad, and ipod touch. as of now, there are many solutions to recover your data in your iphone or ipod touch. but, this software is the best software that lets you recover your data from an ios device. it is the best software that includes all the features of itunes and icloud to recover the data of your device. with this, you can also recover your data from a mac computer. you can also recover your icloud account. to get more, you should buy dr.fone 10.2 crack. if you do not want to spend money on subscription charges for itunes, then you can buy this software. if you want to get this software without paying a penny, then click the link given below. you will get it for free and you can use it for ten devices.

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/FRAPS Full Version By flaconeonboi91 5 Setup Free EXCLUSIVE.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/FRAPS Full Version By flaconeonboi91 5 Setup Free EXCLUSIVE.md deleted file mode 100644 index ba49259c0f976346e825666ade0fe191b677c44d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/FRAPS Full Version By flaconeonboi91 5 Setup Free EXCLUSIVE.md +++ /dev/null @@ -1,6 +0,0 @@ -

            FRAPS Full version by {flaconeonboi91} 5 setup free


            Download ––– https://urlgoal.com/2uCKMR



            - - d5da3c52bf
            -
            -
            -

            diff --git a/spaces/riccorl/relik-entity-linking/relik/inference/serve/backend/utils.py b/spaces/riccorl/relik-entity-linking/relik/inference/serve/backend/utils.py deleted file mode 100644 index bdf869c1ece0e260355526ee5fcc2f00da7ef887..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/inference/serve/backend/utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -from dataclasses import dataclass -from typing import Union - - -@dataclass -class ServerParameterManager: - retriver_device: str = os.environ.get("RETRIEVER_DEVICE", "cpu") - reader_device: str = os.environ.get("READER_DEVICE", "cpu") - index_device: str = os.environ.get("INDEX_DEVICE", retriver_device) - precision: Union[str, int] = os.environ.get("PRECISION", "fp32") - index_precision: Union[str, int] = os.environ.get("INDEX_PRECISION", precision) - question_encoder: str = os.environ.get("QUESTION_ENCODER", None) - passage_encoder: str = os.environ.get("PASSAGE_ENCODER", None) - document_index: str = os.environ.get("DOCUMENT_INDEX", None) - reader_encoder: str = os.environ.get("READER_ENCODER", None) - top_k: int = int(os.environ.get("TOP_K", 100)) - use_faiss: bool = os.environ.get("USE_FAISS", False) - window_batch_size: int = int(os.environ.get("WINDOW_BATCH_SIZE", 32)) - window_size: int = int(os.environ.get("WINDOW_SIZE", 32)) - window_stride: int = int(os.environ.get("WINDOW_SIZE", 16)) - split_on_spaces: bool = os.environ.get("SPLIT_ON_SPACES", False) - - -class RayParameterManager: - def __init__(self) -> None: - self.num_gpus = int(os.environ.get("NUM_GPUS", 1)) - self.min_replicas = int(os.environ.get("MIN_REPLICAS", 1)) - self.max_replicas = int(os.environ.get("MAX_REPLICAS", 1)) diff --git a/spaces/rinong/StyleGAN-NADA/op/__init__.py b/spaces/rinong/StyleGAN-NADA/op/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/pisa_retinanet_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/pisa_retinanet_head.py deleted file mode 100644 index 8654ef453a849f038f68c78df64b4fdc4b26549b..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/pisa_retinanet_head.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import images_to_levels -from ..builder import HEADS -from ..losses import carl_loss, isr_p -from .retina_head import RetinaHead - - -@HEADS.register_module() -class PISARetinaHead(RetinaHead): - """PISA Retinanet Head. - - The head owns the same structure with Retinanet Head, but differs in two - aspects: - 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to - change the positive loss weights. - 2. Classification-aware regression loss is adopted as a third loss. - """ - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss, regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - num_imgs = len(img_metas) - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) - for cls_score in cls_scores - ] - flatten_cls_scores = torch.cat( - flatten_cls_scores, dim=1).reshape(-1, - flatten_cls_scores[0].size(-1)) - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_bbox_preds = torch.cat( - flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) - flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) - flatten_label_weights = torch.cat( - label_weights_list, dim=1).reshape(-1) - flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) - flatten_bbox_targets = torch.cat( - bbox_targets_list, dim=1).reshape(-1, 4) - flatten_bbox_weights = torch.cat( - bbox_weights_list, dim=1).reshape(-1, 4) - - # Apply ISR-P - isr_cfg = self.train_cfg.get('isr', None) - if isr_cfg is not None: - all_targets = (flatten_labels, flatten_label_weights, - flatten_bbox_targets, flatten_bbox_weights) - with torch.no_grad(): - all_targets = isr_p( - flatten_cls_scores, - flatten_bbox_preds, - all_targets, - flatten_anchors, - sampling_results_list, - bbox_coder=self.bbox_coder, - loss_cls=self.loss_cls, - num_class=self.num_classes, - **self.train_cfg.isr) - (flatten_labels, flatten_label_weights, flatten_bbox_targets, - flatten_bbox_weights) = all_targets - - # For convenience we compute loss once instead separating by fpn level, - # so that we don't need to separate the weights by level again. - # The result should be the same - losses_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - flatten_label_weights, - avg_factor=num_total_samples) - losses_bbox = self.loss_bbox( - flatten_bbox_preds, - flatten_bbox_targets, - flatten_bbox_weights, - avg_factor=num_total_samples) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - # CARL Loss - carl_cfg = self.train_cfg.get('carl', None) - if carl_cfg is not None: - loss_carl = carl_loss( - flatten_cls_scores, - flatten_labels, - flatten_bbox_preds, - flatten_bbox_targets, - self.loss_bbox, - **self.train_cfg.carl, - avg_factor=num_total_pos, - sigmoid=True, - num_class=self.num_classes) - loss_dict.update(loss_carl) - - return loss_dict diff --git a/spaces/rorallitri/biomedical-language-models/logs/Autodesk AutoCAD Civil 3D V2012 WIN 32-64-ISO The Ultimate Solution for Your Projects.md b/spaces/rorallitri/biomedical-language-models/logs/Autodesk AutoCAD Civil 3D V2012 WIN 32-64-ISO The Ultimate Solution for Your Projects.md deleted file mode 100644 index 4b07f57e0d35790273ce3972a5ecb1ae917539c2..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Autodesk AutoCAD Civil 3D V2012 WIN 32-64-ISO The Ultimate Solution for Your Projects.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Autodesk AutoCAD Civil 3D V2012 WIN 32-64-ISO


            Download ✔✔✔ https://tinurll.com/2uzlFB



            - - aaccfb2cb3
            -
            -
            -

            diff --git a/spaces/rorallitri/biomedical-language-models/logs/How Navigation Carminat Communication Europe V.31.1 Renault 3 Can Enhance Your Travel Experience.md b/spaces/rorallitri/biomedical-language-models/logs/How Navigation Carminat Communication Europe V.31.1 Renault 3 Can Enhance Your Travel Experience.md deleted file mode 100644 index ff2b045322da8b79caeb675ebf51acfb9ac96edc..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/How Navigation Carminat Communication Europe V.31.1 Renault 3 Can Enhance Your Travel Experience.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Navigation Carminat Communication Europe V.31.1 Renault 3


            Download Ziphttps://tinurll.com/2uzoA4



            - - aaccfb2cb3
            -
            -
            -

            diff --git a/spaces/rorallitri/biomedical-language-models/logs/Libro Di Igiene Barbuti Pdf Download WORK.md b/spaces/rorallitri/biomedical-language-models/logs/Libro Di Igiene Barbuti Pdf Download WORK.md deleted file mode 100644 index eeead7397dace1d985808b4d37fd7c9981b61f71..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Libro Di Igiene Barbuti Pdf Download WORK.md +++ /dev/null @@ -1,12 +0,0 @@ -

            Libro Di Igiene Barbuti Pdf Download


            Download >>> https://tinurll.com/2uzoJy



            -
            -May 12, 2020 - disoni d868ddde6e chevbend January 30, 2022) at 8:52 said: to get this battle out of the house ... The Sims 3: Wicked Woohoo mod - sex visual novel ... -3 Apr 2019 ... -Download mod for sims 4 woohoo wickedwhims in english, latest version, sims 4 wickedwhims animations, install and configure mod ... -Sims 4 WickedWhims Sex Animations - Sex Animations ... -May 12, 2020 — disoni d868ddde6e chevbend January 30, 2022 at 8:52 am said: to get this battle out of the house... -11 Dec 2018 ... -Sims 4 Mod 8a78ff9644
            -
            -
            -

            diff --git a/spaces/rosenthal/chess/chessfenbot/helper_functions.py b/spaces/rosenthal/chess/chessfenbot/helper_functions.py deleted file mode 100644 index 691d80ec34cf20e5e757334c2229b4d39048c26c..0000000000000000000000000000000000000000 --- a/spaces/rosenthal/chess/chessfenbot/helper_functions.py +++ /dev/null @@ -1,172 +0,0 @@ -import numpy as np - -# Imports for visualization -import PIL.Image - -# DEBUG for ipython notebook visualizations. -# from IPython.display import clear_output, Image, display - -# def display_array(a, fmt='jpeg', rng=[0,1]): -# """Display an array as a picture.""" -# a = (a - rng[0])/float(rng[1] - rng[0]) # normalized float value -# a = np.uint8(np.clip(a*255, 0, 255)) -# f = StringIO() - -# PIL.Image.fromarray(np.asarray(a, dtype=np.uint8)).save(f, fmt) -# display(Image(data=f.getvalue())) - -# def display_weight(a, fmt='jpeg', rng=[0,1]): -# """Display an array as a color picture.""" -# a = (a - rng[0])/float(rng[1] - rng[0]) # normalized float value -# a = np.uint8(np.clip(a*255, 0, 255)) -# f = StringIO() - -# v = np.asarray(a, dtype=np.uint8) - -# # blue is high intensity, red is low -# # Negative -# r = 255-v.copy() -# r[r<127] = 0 -# r[r>=127] = 255 - -# # None -# g = np.zeros_like(v) - -# # Positive -# b = v.copy() -# b[b<127] = 0 -# b[b>=127] = 255 - -# #np.clip((v-127)/2,0,127)*2 - -# #-1 to 1 -# intensity = np.abs(2.*a-1) - -# rgb = np.uint8(np.dstack([r,g,b]*intensity)) - -# PIL.Image.fromarray(rgb).save(f, fmt) -# display(Image(data=f.getvalue(), width=100)) - -# def display_image(a, fmt='png'): -# """Display an image as a picture in-line.""" -# f = StringIO() - -# PIL.Image.fromarray(np.asarray(a, dtype=np.uint8)).save(f, fmt) -# display(Image(data=f.getvalue())) - -# FEN related -def getFENtileLabel(fen,letter,number): - """Given a fen string and a rank (number) and file (letter), return label vector""" - l2i = lambda l: ord(l)-ord('A') # letter to index - number = 8-number # FEN has order backwards - piece_letter = fen[number*8+number + l2i(letter)] - label = np.zeros(13, dtype=np.uint8) - label['1KQRBNPkqrbnp'.find(piece_letter)] = 1 # note the 1 instead of ' ' due to FEN notation - # We ignore shorter FENs with numbers > 1 because we generate the FENs ourselves - return label - -# We'll define the 12 pieces and 1 spacewith single characters -# KQRBNPkqrbnp -def getLabelForSquare(letter,number): - """Given letter and number (say 'B3'), return one-hot label vector - (12 pieces + 1 space == no piece, so 13-long vector)""" - l2i = lambda l: ord(l)-ord('A') # letter to index - piece2Label = lambda piece: ' KQRBNPkqrbnp'.find(piece) - # build mapping to index - # Starter position - starter_mapping = np.zeros([8,8], dtype=np.uint8) - starter_mapping[0, [l2i('A'), l2i('H')]] = piece2Label('R') - starter_mapping[0, [l2i('B'), l2i('G')]] = piece2Label('N') - starter_mapping[0, [l2i('C'), l2i('F')]] = piece2Label('B') - starter_mapping[0, l2i('D')] = piece2Label('Q') - starter_mapping[0, l2i('E')] = piece2Label('K') - starter_mapping[1, :] = piece2Label('P') - - starter_mapping[7, [l2i('A'), l2i('H')]] = piece2Label('r') - starter_mapping[7, [l2i('B'), l2i('G')]] = piece2Label('n') - starter_mapping[7, [l2i('C'), l2i('F')]] = piece2Label('b') - starter_mapping[7, l2i('D')] = piece2Label('q') - starter_mapping[7, l2i('E')] = piece2Label('k') - starter_mapping[6, :] = piece2Label('p') - # Note: if we display the array, the first row is white, - # normally bottom, but arrays show it as top - - # Generate one-hot label - label = np.zeros(13, dtype=np.uint8) - label[starter_mapping[number-1, l2i(letter), ]] = 1 - return label - -def name2Label(name): - """Convert label vector into name of piece""" - return ' KQRBNPkqrbnp'.find(name) - -def labelIndex2Name(label_index): - """Convert label index into name of piece""" - return ' KQRBNPkqrbnp'[label_index] - -def label2Name(label): - """Convert label vector into name of piece""" - return labelIndex2Name(label.argmax()) - -def shortenFEN(fen): - """Reduce FEN to shortest form (ex. '111p11Q' becomes '3p2Q')""" - return fen.replace('11111111','8').replace('1111111','7') \ - .replace('111111','6').replace('11111','5') \ - .replace('1111','4').replace('111','3').replace('11','2') - -def lengthenFEN(fen): - """Lengthen FEN to 71-character form (ex. '3p2Q' becomes '111p11Q')""" - return fen.replace('8','11111111').replace('7','1111111') \ - .replace('6','111111').replace('5','11111') \ - .replace('4','1111').replace('3','111').replace('2','11') - -def unflipFEN(fen): - if len(fen) < 71: - fen = lengthenFEN(FEN) - return '/'.join([ r[::-1] for r in fen.split('/') ][::-1]) - - -# For Training in IPython Notebooks -def loadFENtiles(image_filepaths): - """Load Tiles with FEN string in filename for labels. - return both images and labels""" - # Each tile is a 32x32 grayscale image, add extra axis for working with MNIST Data format - images = np.zeros([image_filepaths.size, 32, 32, 1], dtype=np.uint8) - labels = np.zeros([image_filepaths.size, 13], dtype=np.float64) - - for i, image_filepath in enumerate(image_filepaths): - if i % 1000 == 0: - #print("On #%d/%d : %s" % (i,image_filepaths.size, image_filepath)) - print(".",) - - # Image - images[i,:,:,0] = np.asarray(PIL.Image.open(image_filepath), dtype=np.uint8) - - # Label - fen = image_filepath[-78:-7] - _rank = image_filepath[-6] - _file = int(image_filepath[-5]) - labels[i,:] = getFENtileLabel(fen, _rank, _file) - print("Done") - return images, labels - -def loadLabels(image_filepaths): - """Load label vectors from list of image filepaths""" - # Each filepath contains which square we're looking at, - # since we're in starter position, we know which - # square has which piece, 12 distinct pieces - # (6 white and 6 black) and 1 as empty = 13 labels - training_data = np.zeros([image_filepaths.size, 13], dtype=np.float64) - for i, image_filepath in enumerate(image_filepaths): - training_data[i,:] = getLabelForSquare(image_filepath[-6],int(image_filepath[-5])) - return training_data - -def loadImages(image_filepaths): - # Each tile is a 32x32 grayscale image, add extra axis for working with MNIST Data format - training_data = np.zeros([image_filepaths.size, 32, 32, 1], dtype=np.uint8) - for i, image_filepath in enumerate(image_filepaths): - if i % 100 == 0: - print("On #%d/%d : %s" % (i,image_filepaths.size, image_filepath)) - img = PIL.Image.open(image_filepath) - training_data[i,:,:,0] = np.asarray(img, dtype=np.uint8) - return training_data diff --git a/spaces/sayakpaul/fivek-retouching-maxim/create_maxim_model.py b/spaces/sayakpaul/fivek-retouching-maxim/create_maxim_model.py deleted file mode 100644 index f6f8ef29093d5defdaa51e3f99ce25fcdc77b513..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/fivek-retouching-maxim/create_maxim_model.py +++ /dev/null @@ -1,37 +0,0 @@ -from tensorflow import keras - -from maxim import maxim -from maxim.configs import MAXIM_CONFIGS - - -def Model(variant=None, input_resolution=(256, 256), **kw) -> keras.Model: - """Factory function to easily create a Model variant like "S". - - Args: - variant: UNet model variants. Options: 'S-1' | 'S-2' | 'S-3' - | 'M-1' | 'M-2' | 'M-3' - input_resolution: Size of the input images. - **kw: Other UNet config dicts. - - Returns: - The MAXIM model. - """ - - if variant is not None: - config = MAXIM_CONFIGS[variant] - for k, v in config.items(): - kw.setdefault(k, v) - - if "variant" in kw: - _ = kw.pop("variant") - if "input_resolution" in kw: - _ = kw.pop("input_resolution") - model_name = kw.pop("name") - - maxim_model = maxim.MAXIM(**kw) - - inputs = keras.Input((*input_resolution, 3)) - outputs = maxim_model(inputs) - final_model = keras.Model(inputs, outputs, name=f"{model_name}_model") - - return final_model diff --git a/spaces/scedlatioru/img-to-music/example/Atnsoft Key Remapper Crack Seria.md b/spaces/scedlatioru/img-to-music/example/Atnsoft Key Remapper Crack Seria.md deleted file mode 100644 index 6f92cb47f1589d91cc5fa113b9a7f79b4ab935a3..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Atnsoft Key Remapper Crack Seria.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Atnsoft Key Remapper Crack Seria


            Download Filehttps://gohhs.com/2uEyTQ



            - - d5da3c52bf
            -
            -
            -

            diff --git a/spaces/scedlatioru/img-to-music/example/PATCHED MAGIX Music Maker 2017 Premium 24.1.5.119 Crack [SadeemPC].md b/spaces/scedlatioru/img-to-music/example/PATCHED MAGIX Music Maker 2017 Premium 24.1.5.119 Crack [SadeemPC].md deleted file mode 100644 index 9e5308f20e936f4a9a5609e94e1bac4ca87fd35d..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/PATCHED MAGIX Music Maker 2017 Premium 24.1.5.119 Crack [SadeemPC].md +++ /dev/null @@ -1,6 +0,0 @@ -

            PATCHED MAGIX Music Maker 2017 Premium 24.1.5.119 Crack [SadeemPC]


            Download ->>->>->> https://gohhs.com/2uEyPO



            -
            - d5da3c52bf
            -
            -
            -

            diff --git a/spaces/sczhou/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/__init__.py b/spaces/sczhou/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sdhsdhk/bingosjj/src/components/ui/dialog.tsx b/spaces/sdhsdhk/bingosjj/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
            - {children} -
            -
            -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
            -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
            -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/segments-tobias/conex/espnet2/diar/espnet_model.py b/spaces/segments-tobias/conex/espnet2/diar/espnet_model.py deleted file mode 100644 index cf923c8b7dca931c3e42124da1240538ae050403..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/diar/espnet_model.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright 2021 Jiatong Shi -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -from contextlib import contextmanager -from distutils.version import LooseVersion -from itertools import permutations -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -import torch -from typeguard import check_argument_types - -from espnet.nets.pytorch_backend.nets_utils import to_device -from espnet2.asr.encoder.abs_encoder import AbsEncoder -from espnet2.asr.frontend.abs_frontend import AbsFrontend -from espnet2.diar.decoder.abs_decoder import AbsDecoder -from espnet2.layers.abs_normalize import AbsNormalize -from espnet2.torch_utils.device_funcs import force_gatherable -from espnet2.train.abs_espnet_model import AbsESPnetModel - - -if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): - from torch.cuda.amp import autocast -else: - # Nothing to do if torch<1.6.0 - @contextmanager - def autocast(enabled=True): - yield - - -class ESPnetDiarizationModel(AbsESPnetModel): - """Speaker Diarization model""" - - def __init__( - self, - frontend: Optional[AbsFrontend], - normalize: Optional[AbsNormalize], - label_aggregator: torch.nn.Module, - encoder: AbsEncoder, - decoder: AbsDecoder, - loss_type: str = "pit", # only support pit loss for now - ): - assert check_argument_types() - - super().__init__() - - self.encoder = encoder - self.decoder = decoder - self.num_spk = decoder.num_spk - self.normalize = normalize - self.frontend = frontend - self.label_aggregator = label_aggregator - self.loss_type = loss_type - - def forward( - self, - speech: torch.Tensor, - speech_lengths: torch.Tensor = None, - spk_labels: torch.Tensor = None, - spk_labels_lengths: torch.Tensor = None, - ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]: - """Frontend + Encoder + Decoder + Calc loss - - Args: - speech: (Batch, samples) - speech_lengths: (Batch,) default None for chunk interator, - because the chunk-iterator does not - have the speech_lengths returned. - see in - espnet2/iterators/chunk_iter_factory.py - spk_labels: (Batch, ) - """ - assert speech.shape[0] == spk_labels.shape[0], (speech.shape, spk_labels.shape) - batch_size = speech.shape[0] - - # 1. Encoder - encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) - - # 2. Decoder (baiscally a predction layer after encoder_out) - pred = self.decoder(encoder_out, encoder_out_lens) - - # 3. Aggregate time-domain labels - spk_labels, spk_labels_lengths = self.label_aggregator( - spk_labels, spk_labels_lengths - ) - - if self.loss_type == "pit": - loss, perm_idx, perm_list, label_perm = self.pit_loss( - pred, spk_labels, encoder_out_lens - ) - - ( - correct, - num_frames, - speech_scored, - speech_miss, - speech_falarm, - speaker_scored, - speaker_miss, - speaker_falarm, - speaker_error, - ) = self.calc_diarization_error(pred, label_perm, encoder_out_lens) - - if speech_scored > 0 and num_frames > 0: - sad_mr, sad_fr, mi, fa, cf, acc, der = ( - speech_miss / speech_scored, - speech_falarm / speech_scored, - speaker_miss / speaker_scored, - speaker_falarm / speaker_scored, - speaker_error / speaker_scored, - correct / num_frames, - (speaker_miss + speaker_falarm + speaker_error) / speaker_scored, - ) - else: - sad_mr, sad_fr, mi, fa, cf, acc, der = 0, 0, 0, 0, 0, 0, 0 - stats = dict( - loss=loss.detach(), - sad_mr=sad_mr, - sad_fr=sad_fr, - mi=mi, - fa=fa, - cf=cf, - acc=acc, - der=der, - ) - else: - raise NotImplementedError - - loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device) - return loss, stats, weight - - def collect_feats( - self, - speech: torch.Tensor, - speech_lengths: torch.Tensor, - spk_labels: torch.Tensor = None, - spk_labels_lengths: torch.Tensor = None, - ) -> Dict[str, torch.Tensor]: - feats, feats_lengths = self._extract_feats(speech, speech_lengths) - return {"feats": feats, "feats_lengths": feats_lengths} - - def encode( - self, speech: torch.Tensor, speech_lengths: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Frontend + Encoder - - Args: - speech: (Batch, Length, ...) - speech_lengths: (Batch,) - """ - with autocast(False): - # 1. Extract feats - feats, feats_lengths = self._extract_feats(speech, speech_lengths) - - # 2. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN - if self.normalize is not None: - feats, feats_lengths = self.normalize(feats, feats_lengths) - - # 3. Forward encoder - # feats: (Batch, Length, Dim) - # -> encoder_out: (Batch, Length2, Dim) - encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths) - - assert encoder_out.size(0) == speech.size(0), ( - encoder_out.size(), - speech.size(0), - ) - assert encoder_out.size(1) <= encoder_out_lens.max(), ( - encoder_out.size(), - encoder_out_lens.max(), - ) - - return encoder_out, encoder_out_lens - - def _extract_feats( - self, speech: torch.Tensor, speech_lengths: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - batch_size = speech.shape[0] - speech_lengths = ( - speech_lengths - if speech_lengths is not None - else torch.ones(batch_size).int() * speech.shape[1] - ) - - assert speech_lengths.dim() == 1, speech_lengths.shape - - # for data-parallel - speech = speech[:, : speech_lengths.max()] - - if self.frontend is not None: - # Frontend - # e.g. STFT and Feature extract - # data_loader may send time-domain signal in this case - # speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim) - feats, feats_lengths = self.frontend(speech, speech_lengths) - else: - # No frontend and no feature extract - feats, feats_lengths = speech, speech_lengths - return feats, feats_lengths - - def pit_loss_single_permute(self, pred, label, length): - bce_loss = torch.nn.BCEWithLogitsLoss(reduction="none") - mask = self.create_length_mask(length, label.size(1), label.size(2)) - loss = bce_loss(pred, label) - loss = loss * mask - loss = torch.sum(torch.mean(loss, dim=2), dim=1) - loss = torch.unsqueeze(loss, dim=1) - return loss - - def pit_loss(self, pred, label, lengths): - # Note (jiatong): Credit to https://github.com/hitachi-speech/EEND - num_output = label.size(2) - permute_list = [np.array(p) for p in permutations(range(num_output))] - loss_list = [] - for p in permute_list: - label_perm = label[:, :, p] - loss_perm = self.pit_loss_single_permute(pred, label_perm, lengths) - loss_list.append(loss_perm) - loss = torch.cat(loss_list, dim=1) - min_loss, min_idx = torch.min(loss, dim=1) - loss = torch.sum(min_loss) / torch.sum(lengths.float()) - batch_size = len(min_idx) - label_list = [] - for i in range(batch_size): - label_list.append(label[i, :, permute_list[min_idx[i]]].data.cpu().numpy()) - label_permute = torch.from_numpy(np.array(label_list)).float() - return loss, min_idx, permute_list, label_permute - - def create_length_mask(self, length, max_len, num_output): - batch_size = len(length) - mask = torch.zeros(batch_size, max_len, num_output) - for i in range(batch_size): - mask[i, : length[i], :] = 1 - mask = to_device(self, mask) - return mask - - @staticmethod - def calc_diarization_error(pred, label, length): - # Note (jiatong): Credit to https://github.com/hitachi-speech/EEND - - (batch_size, max_len, num_output) = label.size() - # mask the padding part - mask = np.zeros((batch_size, max_len, num_output)) - for i in range(batch_size): - mask[i, : length[i], :] = 1 - - # pred and label have the shape (batch_size, max_len, num_output) - label_np = label.data.cpu().numpy().astype(int) - pred_np = (pred.data.cpu().numpy() > 0).astype(int) - label_np = label_np * mask - pred_np = pred_np * mask - length = length.data.cpu().numpy() - - # compute speech activity detection error - n_ref = np.sum(label_np, axis=2) - n_sys = np.sum(pred_np, axis=2) - speech_scored = float(np.sum(n_ref > 0)) - speech_miss = float(np.sum(np.logical_and(n_ref > 0, n_sys == 0))) - speech_falarm = float(np.sum(np.logical_and(n_ref == 0, n_sys > 0))) - - # compute speaker diarization error - speaker_scored = float(np.sum(n_ref)) - speaker_miss = float(np.sum(np.maximum(n_ref - n_sys, 0))) - speaker_falarm = float(np.sum(np.maximum(n_sys - n_ref, 0))) - n_map = np.sum(np.logical_and(label_np == 1, pred_np == 1), axis=2) - speaker_error = float(np.sum(np.minimum(n_ref, n_sys) - n_map)) - correct = float(1.0 * np.sum((label_np == pred_np) * mask) / num_output) - num_frames = np.sum(length) - return ( - correct, - num_frames, - speech_scored, - speech_miss, - speech_falarm, - speaker_scored, - speaker_miss, - speaker_falarm, - speaker_error, - ) diff --git a/spaces/segments/panoptic-segment-anything/GroundingDINO/groundingdino/util/utils.py b/spaces/segments/panoptic-segment-anything/GroundingDINO/groundingdino/util/utils.py deleted file mode 100644 index e9f0318e306fa04bff0ada70486b41aaa69b07c8..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything/GroundingDINO/groundingdino/util/utils.py +++ /dev/null @@ -1,608 +0,0 @@ -import argparse -import json -import warnings -from collections import OrderedDict -from copy import deepcopy -from typing import Any, Dict, List - -import numpy as np -import torch -from transformers import AutoTokenizer - -from groundingdino.util.slconfig import SLConfig - - -def slprint(x, name="x"): - if isinstance(x, (torch.Tensor, np.ndarray)): - print(f"{name}.shape:", x.shape) - elif isinstance(x, (tuple, list)): - print("type x:", type(x)) - for i in range(min(10, len(x))): - slprint(x[i], f"{name}[{i}]") - elif isinstance(x, dict): - for k, v in x.items(): - slprint(v, f"{name}[{k}]") - else: - print(f"{name}.type:", type(x)) - - -def clean_state_dict(state_dict): - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k[:7] == "module.": - k = k[7:] # remove `module.` - new_state_dict[k] = v - return new_state_dict - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class CocoClassMapper: - def __init__(self) -> None: - self.category_map_str = { - "1": 1, - "2": 2, - "3": 3, - "4": 4, - "5": 5, - "6": 6, - "7": 7, - "8": 8, - "9": 9, - "10": 10, - "11": 11, - "13": 12, - "14": 13, - "15": 14, - "16": 15, - "17": 16, - "18": 17, - "19": 18, - "20": 19, - "21": 20, - "22": 21, - "23": 22, - "24": 23, - "25": 24, - "27": 25, - "28": 26, - "31": 27, - "32": 28, - "33": 29, - "34": 30, - "35": 31, - "36": 32, - "37": 33, - "38": 34, - "39": 35, - "40": 36, - "41": 37, - "42": 38, - "43": 39, - "44": 40, - "46": 41, - "47": 42, - "48": 43, - "49": 44, - "50": 45, - "51": 46, - "52": 47, - "53": 48, - "54": 49, - "55": 50, - "56": 51, - "57": 52, - "58": 53, - "59": 54, - "60": 55, - "61": 56, - "62": 57, - "63": 58, - "64": 59, - "65": 60, - "67": 61, - "70": 62, - "72": 63, - "73": 64, - "74": 65, - "75": 66, - "76": 67, - "77": 68, - "78": 69, - "79": 70, - "80": 71, - "81": 72, - "82": 73, - "84": 74, - "85": 75, - "86": 76, - "87": 77, - "88": 78, - "89": 79, - "90": 80, - } - self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()} - self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()} - - def origin2compact(self, idx): - return self.origin2compact_mapper[int(idx)] - - def compact2origin(self, idx): - return self.compact2origin_mapper[int(idx)] - - -def to_device(item, device): - if isinstance(item, torch.Tensor): - return item.to(device) - elif isinstance(item, list): - return [to_device(i, device) for i in item] - elif isinstance(item, dict): - return {k: to_device(v, device) for k, v in item.items()} - else: - raise NotImplementedError( - "Call Shilong if you use other containers! type: {}".format(type(item)) - ) - - -# -def get_gaussian_mean(x, axis, other_axis, softmax=True): - """ - - Args: - x (float): Input images(BxCxHxW) - axis (int): The index for weighted mean - other_axis (int): The other index - - Returns: weighted index for axis, BxC - - """ - mat2line = torch.sum(x, axis=other_axis) - # mat2line = mat2line / mat2line.mean() * 10 - if softmax: - u = torch.softmax(mat2line, axis=2) - else: - u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6) - size = x.shape[axis] - ind = torch.linspace(0, 1, size).to(x.device) - batch = x.shape[0] - channel = x.shape[1] - index = ind.repeat([batch, channel, 1]) - mean_position = torch.sum(index * u, dim=2) - return mean_position - - -def get_expected_points_from_map(hm, softmax=True): - """get_gaussian_map_from_points - B,C,H,W -> B,N,2 float(0, 1) float(0, 1) - softargmax function - - Args: - hm (float): Input images(BxCxHxW) - - Returns: - weighted index for axis, BxCx2. float between 0 and 1. - - """ - # hm = 10*hm - B, C, H, W = hm.shape - y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C - x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C - # return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2) - return torch.stack([x_mean, y_mean], dim=2) - - -# Positional encoding (section 5.1) -# borrow from nerf -class Embedder: - def __init__(self, **kwargs): - self.kwargs = kwargs - self.create_embedding_fn() - - def create_embedding_fn(self): - embed_fns = [] - d = self.kwargs["input_dims"] - out_dim = 0 - if self.kwargs["include_input"]: - embed_fns.append(lambda x: x) - out_dim += d - - max_freq = self.kwargs["max_freq_log2"] - N_freqs = self.kwargs["num_freqs"] - - if self.kwargs["log_sampling"]: - freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs) - else: - freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs) - - for freq in freq_bands: - for p_fn in self.kwargs["periodic_fns"]: - embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) - out_dim += d - - self.embed_fns = embed_fns - self.out_dim = out_dim - - def embed(self, inputs): - return torch.cat([fn(inputs) for fn in self.embed_fns], -1) - - -def get_embedder(multires, i=0): - import torch.nn as nn - - if i == -1: - return nn.Identity(), 3 - - embed_kwargs = { - "include_input": True, - "input_dims": 3, - "max_freq_log2": multires - 1, - "num_freqs": multires, - "log_sampling": True, - "periodic_fns": [torch.sin, torch.cos], - } - - embedder_obj = Embedder(**embed_kwargs) - embed = lambda x, eo=embedder_obj: eo.embed(x) - return embed, embedder_obj.out_dim - - -class APOPMeter: - def __init__(self) -> None: - self.tp = 0 - self.fp = 0 - self.tn = 0 - self.fn = 0 - - def update(self, pred, gt): - """ - Input: - pred, gt: Tensor() - """ - assert pred.shape == gt.shape - self.tp += torch.logical_and(pred == 1, gt == 1).sum().item() - self.fp += torch.logical_and(pred == 1, gt == 0).sum().item() - self.tn += torch.logical_and(pred == 0, gt == 0).sum().item() - self.tn += torch.logical_and(pred == 1, gt == 0).sum().item() - - def update_cm(self, tp, fp, tn, fn): - self.tp += tp - self.fp += fp - self.tn += tn - self.tn += fn - - -def inverse_sigmoid(x, eps=1e-5): - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -def get_raw_dict(args): - """ - return the dicf contained in args. - - e.g: - >>> with open(path, 'w') as f: - json.dump(get_raw_dict(args), f, indent=2) - """ - if isinstance(args, argparse.Namespace): - return vars(args) - elif isinstance(args, dict): - return args - elif isinstance(args, SLConfig): - return args._cfg_dict - else: - raise NotImplementedError("Unknown type {}".format(type(args))) - - -def stat_tensors(tensor): - assert tensor.dim() == 1 - tensor_sm = tensor.softmax(0) - entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum() - - return { - "max": tensor.max(), - "min": tensor.min(), - "mean": tensor.mean(), - "var": tensor.var(), - "std": tensor.var() ** 0.5, - "entropy": entropy, - } - - -class NiceRepr: - """Inherit from this class and define ``__nice__`` to "nicely" print your - objects. - - Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function - Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. - If the inheriting class has a ``__len__``, method then the default - ``__nice__`` method will return its length. - - Example: - >>> class Foo(NiceRepr): - ... def __nice__(self): - ... return 'info' - >>> foo = Foo() - >>> assert str(foo) == '' - >>> assert repr(foo).startswith('>> class Bar(NiceRepr): - ... pass - >>> bar = Bar() - >>> import pytest - >>> with pytest.warns(None) as record: - >>> assert 'object at' in str(bar) - >>> assert 'object at' in repr(bar) - - Example: - >>> class Baz(NiceRepr): - ... def __len__(self): - ... return 5 - >>> baz = Baz() - >>> assert str(baz) == '' - """ - - def __nice__(self): - """str: a "nice" summary string describing this module""" - if hasattr(self, "__len__"): - # It is a common pattern for objects to use __len__ in __nice__ - # As a convenience we define a default __nice__ for these objects - return str(len(self)) - else: - # In all other cases force the subclass to overload __nice__ - raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}") - - def __repr__(self): - """str: the string of the module""" - try: - nice = self.__nice__() - classname = self.__class__.__name__ - return f"<{classname}({nice}) at {hex(id(self))}>" - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - def __str__(self): - """str: the string of the module""" - try: - classname = self.__class__.__name__ - nice = self.__nice__() - return f"<{classname}({nice})>" - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - -def ensure_rng(rng=None): - """Coerces input into a random number generator. - - If the input is None, then a global random state is returned. - - If the input is a numeric value, then that is used as a seed to construct a - random state. Otherwise the input is returned as-is. - - Adapted from [1]_. - - Args: - rng (int | numpy.random.RandomState | None): - if None, then defaults to the global rng. Otherwise this can be an - integer or a RandomState class - Returns: - (numpy.random.RandomState) : rng - - a numpy random number generator - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 - """ - - if rng is None: - rng = np.random.mtrand._rand - elif isinstance(rng, int): - rng = np.random.RandomState(rng) - else: - rng = rng - return rng - - -def random_boxes(num=1, scale=1, rng=None): - """Simple version of ``kwimage.Boxes.random`` - - Returns: - Tensor: shape (n, 4) in x1, y1, x2, y2 format. - - References: - https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 - - Example: - >>> num = 3 - >>> scale = 512 - >>> rng = 0 - >>> boxes = random_boxes(num, scale, rng) - >>> print(boxes) - tensor([[280.9925, 278.9802, 308.6148, 366.1769], - [216.9113, 330.6978, 224.0446, 456.5878], - [405.3632, 196.3221, 493.3953, 270.7942]]) - """ - rng = ensure_rng(rng) - - tlbr = rng.rand(num, 4).astype(np.float32) - - tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) - tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) - br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) - br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) - - tlbr[:, 0] = tl_x * scale - tlbr[:, 1] = tl_y * scale - tlbr[:, 2] = br_x * scale - tlbr[:, 3] = br_y * scale - - boxes = torch.from_numpy(tlbr) - return boxes - - -class ModelEma(torch.nn.Module): - def __init__(self, model, decay=0.9997, device=None): - super(ModelEma, self).__init__() - # make a copy of the model for accumulating moving average of weights - self.module = deepcopy(model) - self.module.eval() - - # import ipdb; ipdb.set_trace() - - self.decay = decay - self.device = device # perform ema on different device from model if set - if self.device is not None: - self.module.to(device=device) - - def _update(self, model, update_fn): - with torch.no_grad(): - for ema_v, model_v in zip( - self.module.state_dict().values(), model.state_dict().values() - ): - if self.device is not None: - model_v = model_v.to(device=self.device) - ema_v.copy_(update_fn(ema_v, model_v)) - - def update(self, model): - self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m) - - def set(self, model): - self._update(model, update_fn=lambda e, m: m) - - -class BestMetricSingle: - def __init__(self, init_res=0.0, better="large") -> None: - self.init_res = init_res - self.best_res = init_res - self.best_ep = -1 - - self.better = better - assert better in ["large", "small"] - - def isbetter(self, new_res, old_res): - if self.better == "large": - return new_res > old_res - if self.better == "small": - return new_res < old_res - - def update(self, new_res, ep): - if self.isbetter(new_res, self.best_res): - self.best_res = new_res - self.best_ep = ep - return True - return False - - def __str__(self) -> str: - return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep) - - def __repr__(self) -> str: - return self.__str__() - - def summary(self) -> dict: - return { - "best_res": self.best_res, - "best_ep": self.best_ep, - } - - -class BestMetricHolder: - def __init__(self, init_res=0.0, better="large", use_ema=False) -> None: - self.best_all = BestMetricSingle(init_res, better) - self.use_ema = use_ema - if use_ema: - self.best_ema = BestMetricSingle(init_res, better) - self.best_regular = BestMetricSingle(init_res, better) - - def update(self, new_res, epoch, is_ema=False): - """ - return if the results is the best. - """ - if not self.use_ema: - return self.best_all.update(new_res, epoch) - else: - if is_ema: - self.best_ema.update(new_res, epoch) - return self.best_all.update(new_res, epoch) - else: - self.best_regular.update(new_res, epoch) - return self.best_all.update(new_res, epoch) - - def summary(self): - if not self.use_ema: - return self.best_all.summary() - - res = {} - res.update({f"all_{k}": v for k, v in self.best_all.summary().items()}) - res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()}) - res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()}) - return res - - def __repr__(self) -> str: - return json.dumps(self.summary(), indent=2) - - def __str__(self) -> str: - return self.__repr__() - - -def targets_to(targets: List[Dict[str, Any]], device): - """Moves the target dicts to the given device.""" - excluded_keys = [ - "questionId", - "tokens_positive", - "strings_positive", - "tokens", - "dataset_name", - "sentence_id", - "original_img_id", - "nb_eval", - "task_id", - "original_id", - "token_span", - "caption", - "dataset_type", - ] - return [ - {k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets - ] - - -def get_phrases_from_posmap( - posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer -): - assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" - if posmap.dim() == 1: - non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() - token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] - return tokenizer.decode(token_ids) - else: - raise NotImplementedError("posmap must be 1-dim") diff --git a/spaces/shabnam91/Sanskrit-TTS/indic_nlp_library/indicnlp/syllable/syllabifier.py b/spaces/shabnam91/Sanskrit-TTS/indic_nlp_library/indicnlp/syllable/syllabifier.py deleted file mode 100644 index 2a0cfb0be6ac9e9c2c9938b4a8b4b84b054d28c8..0000000000000000000000000000000000000000 --- a/spaces/shabnam91/Sanskrit-TTS/indic_nlp_library/indicnlp/syllable/syllabifier.py +++ /dev/null @@ -1,302 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -import codecs, sys -from indicnlp.script import indic_scripts as si -import re - -chillu_char_map= { - '\u0d7a': '\u0d23', - '\u0d7b': '\u0d28', - '\u0d7c': '\u0d30', - '\u0d7d': '\u0d32', - '\u0d7e': '\u0d33', - '\u0d7f': '\u0d15', - } - -char_chillu_map= {} -for k,v in chillu_char_map.items(): - char_chillu_map[v]=k - -def normalize_malayalam(word): - - word_mask=re.sub(r'[0-9]','0',word) - - # instead of chillu characters, use consonant+halant - for chillu,char in chillu_char_map.items(): - word=word.replace(chillu,'{}\u0d4d'.format(char)) - word_mask=word_mask.replace(chillu,'41') - - word_mask=re.sub(r'[^0-9]','0',word_mask) - - return word, word_mask - -def denormalize_malayalam(word, word_mask): - - word=list(word) - word_mask=list(word_mask) - - ## pattern 4 - idx=0 - while idx>=0: - try: - idx=word_mask.index('4',idx) - word[idx:idx+2]=char_chillu_map[word[idx]] - word_mask[idx:idx+2]='0' - start=idx - except ValueError as e: - break - - return ''.join(word) - -def normalize_punjabi(word): - word_mask=re.sub(r'[0-9]','0',word) - - ## replace tippi with anusvaar - word=word.replace('\u0a70','\u0a02') - word_mask=word_mask.replace('\u0a70','2') - - ## replace addak+consonant with consonat+halant+consonant - word=re.sub(r'\u0a71(.)','\\1\u0a4d\\1',word) - word_mask=re.sub(r'\u0a71(.)','311',word_mask) - - word_mask=re.sub(r'[^0-9]','0',word_mask) - - return word, word_mask - -def denormalize_punjabi(word, word_mask): - - word=list(word) - word_mask=list(word_mask) - - ## pattern 2 - idx=0 - while idx>=0: - try: - idx=word_mask.index('2',idx) - word[idx]='\u0a70' - word_mask[idx]='0' - start=idx - except ValueError as e: - break - - ## pattern 3 - idx=0 - while idx>=0: - try: - idx=word_mask.index('3',idx) - word[idx:idx+3]='\u0a71{}'.format(word[idx]) - word_mask[idx:idx+3]='00' - start=idx - except ValueError as e: - break - - return ''.join(word) - -def char_backoff(syllables_list,vocab): - syllables_final=[] - - if vocab is None: - syllables_final=syllables_list - else: - for s in syllables_list: - if s in vocab: - syllables_final.append(s) - else: - for x in s: - syllables_final.append(x) - - return syllables_final - - -def orthographic_syllabify_improved(word,lang,vocab=None): - - word_mask=['0']*len(word) - - if lang=='ml': - word, word_mask = normalize_malayalam(word) - word=word - elif lang=='pa': - word, word_mask = normalize_punjabi(word) - - p_vectors=[si.get_phonetic_feature_vector(c,lang) for c in word] - - syllables=[] - syllables_mask=[] - - for i in range(len(word)): - v=p_vectors[i] - - syllables.append(word[i]) - syllables_mask.append(word_mask[i]) - - ### simplified syllabification - #if i+1= 0: - print('Warning') - - if lang=='ml': - syllables = denormalize_malayalam(syllables,syllables_mask) - elif lang=='pa': - syllables = denormalize_punjabi(syllables,syllables_mask) - - syllables_list = syllables.strip().split(' ') - return(char_backoff(syllables_list,vocab)) - -def orthographic_syllabify(word,lang,vocab=None): - - p_vectors=[si.get_phonetic_feature_vector(c,lang) for c in word] - - syllables=[] - - for i in range(len(word)): - v=p_vectors[i] - - syllables.append(word[i]) - - ### simplified syllabification - #if i+1 str: - """ - this method validates the youtube video link provided. - input : url (str) - outputs: transcript (string/dict) - """ - yt_regex = r"^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|\&v=|\?v=)([^#\&\?]*).*" - matches = re.findall(yt_regex, url) - - assert (len(matches[0][1]) == 11), "Invalid YouTube Link" - - video_id:str = matches[0][1] - - return video_id - - -def zip_transcript(transcript:list) -> dict: - start_times = [] - texts = [] - for item in transcript: - start_times.append(item['start']) - texts.append(item['text'].strip().replace('\n',' ')) - - return { - 'timestamps': start_times, - 'texts': texts - } - - - -def full_text(transcript: list) -> str: - texts = [] - for item in transcript: - texts.append(item['text']) - return ' '.join(texts).strip() - - -def fetch_transcript(url: str) -> list: - - video_id = validate_youtube_link(url) - - try: - transcript:list = YouTubeTranscriptApi.get_transcript(video_id=video_id) - - except YouTubeRequestFailed: - raise Exception('YouTube Request Failed, try again later.') - - return transcript - - - -if __name__ == '__main__': - sample = 'https://www.youtube.com/watch?v=t6V9i8fFADI' - sample2 = 'https://www.youtube.com/watch?v=1nLHIM2IPRY' - fake_sample = 'https://www.youtube.com/watch?v=asdf3' - transcript = fetch_transcript(url=sample) - - times, texts = zip_transcript(transcript) - texts = stride_sentences(texts) - print(texts[0]) - - # with open('sample_group.txt','w') as f: - # for group in groups: - # f.write(f"{group}\n\n") diff --git a/spaces/shriarul5273/Yolov7/utils/metrics.py b/spaces/shriarul5273/Yolov7/utils/metrics.py deleted file mode 100644 index 6d2f53647529ab0fc52f2e69fe2571794b024c94..0000000000000000000000000000000000000000 --- a/spaces/shriarul5273/Yolov7/utils/metrics.py +++ /dev/null @@ -1,227 +0,0 @@ -# Model validation metrics - -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import torch - -from . import general - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def ap_per_class(tp, conf, pred_cls, target_cls, v5_metric=False, plot=False, save_dir='.', names=()): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. - # Arguments - tp: True positives (nparray, nx1 or nx10). - conf: Objectness value from 0-1 (nparray). - pred_cls: Predicted object classes (nparray). - target_cls: True object classes (nparray). - plot: Plot precision-recall curve at mAP@0.5 - save_dir: Plot save directory - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Sort by objectness - i = np.argsort(-conf) - tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] - - # Find unique classes - unique_classes = np.unique(target_cls) - nc = unique_classes.shape[0] # number of classes, number of detections - - # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) - for ci, c in enumerate(unique_classes): - i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels - n_p = i.sum() # number of predictions - - if n_p == 0 or n_l == 0: - continue - else: - # Accumulate FPs and TPs - fpc = (1 - tp[i]).cumsum(0) - tpc = tp[i].cumsum(0) - - # Recall - recall = tpc / (n_l + 1e-16) # recall curve - r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases - - # Precision - precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score - - # AP from recall-precision curve - for j in range(tp.shape[1]): - ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j], v5_metric=v5_metric) - if plot and j == 0: - py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 - - # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) - if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') - - i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') - - -def compute_ap(recall, precision, v5_metric=False): - """ Compute the average precision, given the recall and precision curves - # Arguments - recall: The recall curve (list) - precision: The precision curve (list) - v5_metric: Assume maximum recall to be 1.0, as in YOLOv5, MMDetetion etc. - # Returns - Average precision, precision curve, recall curve - """ - - # Append sentinel values to beginning and end - if v5_metric: # New YOLOv5 metric, same as MMDetection and Detectron2 repositories - mrec = np.concatenate(([0.], recall, [1.0])) - else: # Old YOLOv5 metric, i.e. default YOLOv7 metric - mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) - mpre = np.concatenate(([1.], precision, [0.])) - - # Compute the precision envelope - mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) - - # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': - x = np.linspace(0, 1, 101) # 101-point interp (COCO) - ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate - else: # 'continuous' - i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve - - return ap, mpre, mrec - - -class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix - def __init__(self, nc, conf=0.25, iou_thres=0.45): - self.matrix = np.zeros((nc + 1, nc + 1)) - self.nc = nc # number of classes - self.conf = conf - self.iou_thres = iou_thres - - def process_batch(self, detections, labels): - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - None, updates confusion matrix accordingly - """ - detections = detections[detections[:, 4] > self.conf] - gt_classes = labels[:, 0].int() - detection_classes = detections[:, 5].int() - iou = general.box_iou(labels[:, 1:], detections[:, :4]) - - x = torch.where(iou > self.iou_thres) - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - else: - matches = np.zeros((0, 3)) - - n = matches.shape[0] > 0 - m0, m1, _ = matches.transpose().astype(np.int16) - for i, gc in enumerate(gt_classes): - j = m0 == i - if n and sum(j) == 1: - self.matrix[gc, detection_classes[m1[j]]] += 1 # correct - else: - self.matrix[self.nc, gc] += 1 # background FP - - if n: - for i, dc in enumerate(detection_classes): - if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN - - def matrix(self): - return self.matrix - - def plot(self, save_dir='', names=()): - try: - import seaborn as sn - - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig = plt.figure(figsize=(12, 9), tight_layout=True) - sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size - labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - fig.axes[0].set_xlabel('True') - fig.axes[0].set_ylabel('Predicted') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - except Exception as e: - pass - - def print(self): - for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) - - -# Plots ---------------------------------------------------------------------------------------------------------------- - -def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): - # Precision-recall curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - py = np.stack(py, axis=1) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) - else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) - - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir), dpi=250) - - -def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): - # Metric-confidence curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) - else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) - - y = py.mean(0) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir), dpi=250) diff --git a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/english.py b/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Angry Birds Classic 6.3.0 Mod Apk Why You Should Try the Original Game with Mods.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Angry Birds Classic 6.3.0 Mod Apk Why You Should Try the Original Game with Mods.md deleted file mode 100644 index d66234cff1b5b67077cdf516987e7b739c8b1562..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Angry Birds Classic 6.3.0 Mod Apk Why You Should Try the Original Game with Mods.md +++ /dev/null @@ -1,142 +0,0 @@ -
            -

            Angry Birds Classic 6.3.0 Mod APK: A Fun and Addictive Puzzle Game

            -

            If you are looking for a casual puzzle game that is easy to play but hard to master, you might want to try Angry Birds Classic 6.3.0 Mod APK. This is a modified version of the original Angry Birds game, which was released in 2009 by Rovio Entertainment. The game has become one of the most successful and popular mobile games of all time, with over 3 billion downloads across all platforms.

            -

            In this game, you control a flock of colorful birds that are angry at the green pigs who stole their eggs. You use a slingshot to launch the birds at the pigs' structures, with the aim of destroying them and eliminating all the pigs on the screen. The game features challenging physics-based gameplay, comical style, and low price.

            -

            angry birds classic 6.3.0 mod apk


            Download File →→→ https://ssurll.com/2uNYb4



            -

            The mod version of the game, which is available for Android devices, allows you to gain a lot of money and in-game rewards, such as power-ups, spells, and extra birds. You can use these to enhance your gameplay and overcome difficult levels. The mod also unlocks all the episodes and levels in the game, so you can enjoy them without any restrictions.

            -

            Features of Angry Birds Classic 6.3.0 Mod APK

            -

            Angry Birds Classic 6.3.0 Mod APK has many features that make it fun and addictive to play. Some of them are:

            -
              -
            • Multiple episodes and levels: The game has over 240 levels across 16 episodes, each with a different theme and difficulty level. You can explore various locations, such as tropical islands, snowy mountains, golden beaches, and even outer space.
            • -
            • Different types of birds: The game has 10 types of birds, each with their own unique abilities and personalities. You can use them strategically to destroy different materials, such as wood, glass, stone, metal, TNT, etc. For example, you can use the yellow bird to speed up and break through wood, or use the black bird to explode and demolish concrete.
            • -
            • Special power-ups and spells: The game has various power-ups and spells that can help you in your quest to defeat the pigs. You can use them to boost your birds' abilities, such as making them bigger, stronger, faster, or more accurate. You can also use spells to cause natural disasters, such as lightning strikes, blizzards, hot chili peppers, or golden ducks.
            • -
            • Mighty Eagle: The game has a special feature called Mighty Eagle, which is a giant eagle that can be summoned once per hour to destroy everything on the screen. You can use it when you are stuck on a tricky level or when you want to get a high score. The Mighty Eagle also has its own challenges and achievements that you can complete for extra rewards.
            • -
            • Daily Challenge: The game has a daily challenge mode that gives you a new level every day to test your skills and earn coins. You can also compete with other players around the world and see who can get the highest score on each level.
            • -
            -

            Tips and Tricks for Playing Angry Birds Classic 6.3.0 Mod APK

            -

            Playing Angry Birds Classic 6.3.0 Mod APK can be fun and satisfying, but it can also be frustrating sometimes when you encounter hard levels or miss your shots. Here are some tips and tricks that can help you play better and enjoy more:

            -
              -
            • Aim well: The most important skill in this game is aiming your birds accurately at the pigs' structures. You can use the dotted line that shows the trajectory of your bird and adjust it accordingly. You can also zoom in and out to get a better view of the level. Try to aim for the weak points of the structures, such as joints, supports, or explosives.
            • -
            • Use the right bird: Each bird has its own strength and weakness, so you need to use them wisely depending on the situation. For example, you can use the blue bird to break glass, the white bird to drop eggs on hard-to-reach places, or the green bird to boomerang back and hit hidden targets. You can also switch the order of your birds by tapping on them before launching them.
            • -
            • Use the power-ups and spells sparingly: The power-ups and spells can be very helpful, but they are also limited and costly. You can earn them by playing the game, watching ads, or buying them with real money. You should use them only when you really need them, such as when you are stuck on a level or when you want to get three stars. Don't waste them on easy levels or when you have already failed a level.
            • -
            • Watch the videos and tutorials: The game has many videos and tutorials that can teach you how to play better and learn new tricks. You can watch them by tapping on the video icon on the main menu or on the level selection screen. You can also watch other players' videos and see how they solve the levels. You might get some inspiration and ideas from them.
            • -
            • Have fun: The most important tip is to have fun and enjoy the game. Don't get too frustrated or angry if you fail a level or miss a shot. Remember that it is just a game and it is meant to entertain you. You can always try again or move on to another level. You can also take a break and come back later when you are in a better mood.
            • -
            -

            History of Angry Birds Classic 6.3.0 Mod APK

            -

            Angry Birds Classic 6.3.0 Mod APK is based on the original Angry Birds game, which has a long and interesting history. Here are some facts and milestones about the game:

            -
              -
            • The origin: The game was inspired by a sketch of stylized wingless birds that was made by Jaakko Iisalo, a senior game designer at Rovio Entertainment. The sketch caught the attention of the company's CEO, Mikael Hed, who decided to make a game based on it. The game was initially called Angry Birds Halloween, but it was later changed to Angry Birds Classic to avoid confusion with another game called Angry Birds Seasons.
            • -
            • The release: The game was released for iOS devices on December 11, 2009, and it became an instant hit. It received positive reviews from critics and players, who praised its gameplay, graphics, sound, and humor. It also became one of the top-selling apps on the App Store, reaching over 12 million downloads by October 2010.
            • -
            • The expansion: The game was later ported to other platforms, such as Android, Windows Phone, Symbian, webOS, BlackBerry, Windows, Mac OS X, Linux, Facebook, Roku, etc. It also spawned many spin-offs and sequels, such as Angry Birds Rio, Angry Birds Space, Angry Birds Star Wars, Angry Birds Go!, Angry Birds Epic, etc. It also inspired many merchandise products, such as toys, books, comics, movies, TV shows, etc.
            • -
            • The success: The game has become one of the most successful and popular mobile games of all time, with over 3 billion downloads across all platforms as of July 2015. It has also won many awards and recognitions, such as Guinness World Records for being the most downloaded paid app in most countries and for having the largest mobile app update ever (Angry Birds Space). It has also been praised for its cultural impact and influence on other media and industries.
            • -
            -

            Reviews of Angry Birds Classic 6.3.0 Mod APK

            -

            Angry Birds Classic 6.3.0 Mod APK has received mostly positive reviews from critics and players alike. Here are some examples of what they say about the game and the mod:

            - - - - - - - - - - - - - - - - - - - - - - - - - - -
            Critic/PlayerReviewRating
            IGN"Angry Birds is one of the most addictive and clever games I've played in a long time . It's simple, fun, and challenging, and it has a lot of replay value. The mod version adds more features and options, making it even more enjoyable and rewarding." 9.5/10
            Android Authority"Angry Birds Classic is a classic for a reason. It's one of the best puzzle games ever made, and it's still fun and fresh after all these years. The mod version gives you more freedom and flexibility, allowing you to play the game as you like. You can unlock all the levels, get unlimited money and power-ups, and have a blast with the birds and the pigs." 8.5/10
            Google Play user"I love this game. It's so addictive and fun. I can play it for hours and never get bored. The graphics are great, the sound effects are hilarious, and the gameplay is challenging and satisfying. The mod version is awesome, too. It gives you more options and rewards, and it makes the game easier and more enjoyable. I highly recommend it to anyone who likes puzzle games." 5/5
            Google Play user"This game is good, but the mod version is better. It lets you access all the levels and episodes without having to pay or wait. It also gives you more money and power-ups, which can help you beat the hard levels. The only problem is that it can make the game too easy and boring sometimes. I wish there was a way to adjust the difficulty level or turn off some of the mod features." 4/5
            -

            Conclusion

            -

            Angry Birds Classic 6.3.0 Mod APK is a modified version of the original Angry Birds game, which is one of the most successful and popular mobile games of all time. The game features physics-based puzzle gameplay, colorful graphics, humorous sound effects, and low price. The mod version adds more features and options, such as unlimited money, power-ups, spells, extra birds, and unlocked levels. The mod version can make the game more fun and rewarding, but it can also make it too easy and boring sometimes.

            -

            If you are looking for a casual puzzle game that is easy to play but hard to master, you might want to try Angry Birds Classic 6.3.0 Mod APK. You can download it from various websites or use a QR code scanner to get it directly on your Android device. You can also watch videos and tutorials to learn how to play better and discover new tricks. You can also compete with other players around the world and see who can get the highest score on each level.

            -

            Angry Birds Classic 6.3.0 Mod APK is a fun and addictive puzzle game that can entertain you for hours. You can enjoy the game with or without the mod features, depending on your preference and mood. You can also explore the history and achievements of the game, as well as the reviews and opinions of critics and players. You can also have fun with the different types of birds, power-ups, spells, and Mighty Eagle that can help you in your quest to defeat the pigs.

            -

            angry birds classic mod apk unlimited money
            -angry birds classic hack apk download
            -angry birds classic mod apk android 1
            -angry birds classic mod apk latest version
            -angry birds classic mod apk revdl
            -angry birds classic mod apk happymod
            -angry birds classic mod apk all levels unlocked
            -angry birds classic mod apk unlimited everything
            -angry birds classic mod apk free download
            -angry birds classic mod apk offline
            -angry birds classic premium apk mod
            -angry birds classic mega mod apk
            -angry birds classic mod apk rexdl
            -angry birds classic mod apk no ads
            -angry birds classic mod apk unlimited power ups
            -angry birds classic mod apk all episodes unlocked
            -angry birds classic mod apk old version
            -angry birds classic mod apk unlimited gems
            -angry birds classic mod apk 2023
            -angry birds classic mod apk for pc
            -angry birds classic pro apk mod
            -angry birds classic full mod apk
            -angry birds classic cracked apk mod
            -angry birds classic unlimited coins mod apk
            -angry birds classic 6.3.0 hack apk
            -download game angry birds classic 6.3.0 mod apk
            -how to install angry birds classic 6.3.0 mod apk
            -cara download angry birds classic 6.3.0 mod apk
            -descargar angry birds classic 6.3.0 mod apk
            -baixar angry birds classic 6.3.0 mod apk
            -télécharger angry birds classic 6.3.0 mod apk
            -download permainan angry birds classic 6.3.0 mod apk
            -unduh angry birds classic 6.3.0 mod apk
            -download aplikasi angry birds classic 6.3.0 mod apk
            -indir angry birds classic 6.3.0 mod apk
            -scaricare angry birds classic 6.3.0 mod apk
            -herunterladen angry birds classic 6.3.0 mod apk
            -скачать angry birds classic 6.3.0 мод апк
            -下载angry birds classic 6.3.0模式apk
            -ダウンロードangrybirdsclassic6.3.0modapk
            -다운로드angrybirdsclassic6.3.0modapk
            -تحميلangrybirdsclassic6.3.0modapk
            -डाउनलोडangrybirdsclassic6.3.0modapk
            -descargarangrybirdsclassic6.3.0modapk
            -baixarangrybirdsclassic6.3.0modapk
            -téléchargerangrybirdsclassic6.3.0modapk
            -unduhangrybirdsclassic6.3.0modapk
            -indirangrybirdsclassic6.3.0modapk
            -scaricareangrybirdsclassic6.3.0modapk

            -

            FAQs

            -

            Here are some common questions and answers about Angry Birds Classic 6.3.0 Mod APK:

            -
              -
            1. Is Angry Birds Classic 6.3.0 Mod APK safe to download?
            2. -

              Yes, Angry Birds Classic 6.3.0 Mod APK is safe to download from reputable websites that have verified the file for viruses and malware. However, you should always be careful when downloading any file from unknown sources or third-party apps, as they might contain harmful or unwanted content.

              -
            3. Is Angry Birds Classic 6.3.0 Mod APK legal to use?
            4. -

              Yes, Angry Birds Classic 6.3.0 Mod APK is legal to use for personal and non-commercial purposes only. However, you should not use it to cheat or hack the game or violate its terms of service or privacy policy.

              -
            5. How do I install Angry Birds Classic 6.3.0 Mod APK on my Android device?
            6. -

              You can install Angry Birds Classic 6.3.0 Mod APK on your Android device by following these steps:

              -
                -
              • Download the APK file from a trusted website or use a QR code scanner to get it directly on your device.
              • -
              • Enable unknown sources on your device by going to Settings > Security > Unknown Sources.
              • -
              • Locate the downloaded file on your device using a file manager app or your browser.
              • -
              • Tap on the file and follow the instructions to install it.
              • -
              • Launch the game and enjoy the mod features.
              • -
              -
            7. How do I update Angry Birds Classic 6.3.0 Mod APK?
            8. -

              You can update Angry Birds Classic 6.3.0 Mod APK by downloading the latest version of the APK file from the same website or source that you got it from. You can also check for updates within the game by tapping on the settings icon and then on the update button. However, you should be aware that updating the game might remove some of the mod features or cause compatibility issues.

              -
            9. How do I uninstall Angry Birds Classic 6.3.0 Mod APK?
            10. -

              You can uninstall Angry Birds Classic 6.3.0 Mod APK by following these steps:

              -
                -
              • Go to Settings > Apps > Angry Birds Classic.
              • -
              • Tap on Uninstall and confirm your choice.
              • -
              • Delete the APK file from your device if you still have it.
              • -
              -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Bridge Card Game - The Best Spades Game for Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Bridge Card Game - The Best Spades Game for Android.md deleted file mode 100644 index 7e9a4a6f2ef995ce90495f4df7ae47d7828fbebd..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Bridge Card Game - The Best Spades Game for Android.md +++ /dev/null @@ -1,106 +0,0 @@ -
            -

            Call Bridge Card Game APK Download: A Fun and Challenging Card Game for Everyone

            -

            If you are looking for a card game that is easy to learn but hard to master, you might want to try call bridge card game. Call bridge is a trick-taking card game for four players that originated in South Asia and is popular in countries like India, Nepal, and Bangladesh. It is similar to spades, but with some unique features that make it more exciting and strategic.

            -

            call bridge card game apk download


            Download Zip ->>->>->> https://ssurll.com/2uNQVK



            -

            How to Play Call Bridge Card Game

            -

            The basic rules of call bridge are as follows:

            -
              -
            • The game is played with a standard 52-card deck. The cards are ranked from high to low: A, K, Q, J, 10, 9, 8, 7, 6, 5, 4, 3, 2. Spades are always the trump suit.
            • -
            • The dealer shuffles the cards and deals 13 cards to each player, one at a time, counterclockwise.
            • -
            • Each player makes a bid, which is the number of tricks they think they can win in that round. The minimum bid is 2 and the maximum bid is 12. The player to the right of the dealer starts the bidding and it goes on counterclockwise.
            • -
            • The player who made the highest bid becomes the declarer and chooses the first card to play. The other players must follow suit if they can. If they cannot follow suit, they can play any card. The highest card of the suit led or the highest trump card wins the trick.
            • -
            • The winner of the trick leads the next trick. The round ends when all 13 tricks have been played.
            • -
            • The declarer scores points equal to their bid if they win at least that many tricks. If they fail to do so, they lose points equal to their bid. The other players score one point for each trick they win.
            • -
            • The game consists of five rounds. The player with the highest score at the end of the fifth round wins the game.
            • -
            -

            There are some variations and strategies that make call bridge more interesting and challenging. For example:

            -
              -
            • Some players use doubling and redoubling to increase or decrease the stakes of the game.
            • -
            • Some players use bonus bids of 8 or more tricks that score more points if successful but also carry more risk.
            • -
            • Some players use signals and conventions to communicate with their partner or deceive their opponents.
            • -
            • Some players use different scoring systems or rules for different situations.
            • -
            -

            How to Download Call Bridge Card Game APK

            -

            If you want to play call bridge on your Android device, you can download the call bridge card game apk from various sources online. Here are some steps to follow:

            -
              -
            1. Find a reliable website that offers the call bridge card game apk file. You can use a search engine or a review site to find one.
            2. -
            3. Click on the download link or button and save the file on your device.
            4. -
            5. Go to your device settings and enable installation from unknown sources if needed.
            6. -
            7. Locate the downloaded file and tap on it to install it.
            8. -
            9. Launch the app and enjoy playing call bridge with your friends or online players.
            10. -
            -

            Some of the features and benefits of downloading call bridge card game apk are:

            -
              -
            • You can play call bridge anytime and anywhere without an internet connection.
            • -
            • You can customize the game settings according to your preferences.
            • -
            • You can choose from different modes and levels of difficulty.
            • -
            • You can chat with other players and make new friends.
            • -
            • You can improve your skills and challenge yourself with different opponents.
            • -
            -

            Conclusion

            -

            Call bridge is a fun and challenging card game that can keep you entertained for hours. Whether you are a beginner or an expert, you can find a suitable level of challenge and enjoyment in this game. You can also download the call bridge card game apk on your Android device and play it offline or online with your friends or strangers. If you are looking for a card game that is easy to learn but hard to master, you might want to try call bridge card game.

            -

            call bridge card game spades apk download
            -call bridge card game offline apk download
            -call bridge card game online apk download
            -call bridge card game free apk download
            -call bridge card game latest version apk download
            -call bridge card game android apk download
            -call bridge card game 2023 apk download
            -call bridge card game dynamite games limited apk download
            -call bridge card game knight's cave apk download
            -call bridge card game 29 cards apk download
            -call bridge card game callbreak apk download
            -call bridge card game multiplayer apk download
            -call bridge card game mod apk download
            -call bridge card game hack apk download
            -call bridge card game pro apk download
            -call bridge card game premium apk download
            -call bridge card game unlimited coins apk download
            -call bridge card game for pc apk download
            -call bridge card game for windows 10 apk download
            -call bridge card game for mac apk download
            -call bridge card game for laptop apk download
            -call bridge card game for desktop apk download
            -call bridge card game for chromebook apk download
            -call bridge card game for linux apk download
            -call bridge card game for ubuntu apk download
            -call bridge card game hd graphics apk download
            -call bridge card game 3d animation apk download
            -call bridge card game realistic sound effects apk download
            -call bridge card game easy controls apk download
            -call bridge card game smooth gameplay apk download
            -call bridge card game fast loading apk download
            -call bridge card game no ads apk download
            -call bridge card game no internet required apk download
            -call bridge card game low mb size apk download
            -call bridge card game high rating apk download
            -call bridge card game best reviews apk download
            -call bridge card game tips and tricks apk download
            -call bridge card game rules and guide apk download
            -call bridge card game strategy and skills apk download
            -call bridge card game fun and addictive apk download
            -call bridge card game classic and popular apk download
            -call bridge card game south asian countries apk download
            -call bridge card game variant of spades apk download
            -call bridge card game tricks trumps and bidding apk download
            -call bridge card game 52-card pack apk download

            -

            FAQs

            -

            Here are some common questions and answers about call bridge card game:

            -

            Q: What is the difference between call bridge and spades?

            -

            A: Call bridge and spades are both trick-taking card games for four players, but they have some differences. For example, in call bridge, spades are always the trump suit, while in spades, the trump suit can change depending on the bids. Also, in call bridge, the declarer is the player who made the highest bid, while in spades, the declarer is the player who bid nil or blind nil.

            -

            Q: How can I improve my call bridge skills?

            -

            A: There are many ways to improve your call bridge skills, such as:

            -
              -
            • Practice playing with different opponents and learn from your mistakes.
            • -
            • Read books or articles about call bridge strategies and tips.
            • -
            • Watch videos or tutorials of call bridge experts and observe their moves.
            • -
            • Join online forums or communities of call bridge players and exchange ideas and feedback.
            • -
            -

            Q: Is call bridge a game of luck or skill?

            -

            A: Call bridge is a game that involves both luck and skill. Luck plays a role in the distribution of the cards and the bids of the opponents. Skill plays a role in the decision-making, communication, and execution of the tricks. A good call bridge player knows how to balance both luck and skill and adapt to different situations.

            -

            Q: How can I play call bridge with my friends online?

            -

            A: If you want to play call bridge with your friends online, you can use the call bridge card game apk that you downloaded on your device. You can create a private room and invite your friends to join it. You can also chat with them and have fun together.

            -

            Q: Where can I find more information about call bridge card game?

            -

            A: If you want to find more information about call bridge card game, you can use Bing search to find various sources online. You can also visit [this website] that provides a comprehensive guide on how to play call bridge card game.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/sixsixsix/BingAi/README.md b/spaces/sixsixsix/BingAi/README.md deleted file mode 100644 index 4bf2b46957199656b7d0b516ba3f9540947f18cd..0000000000000000000000000000000000000000 --- a/spaces/sixsixsix/BingAi/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: BingAi -emoji: 😻 -colorFrom: yellow -colorTo: green -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/skf15963/summary/fengshen/examples/tcbert/__init__.py b/spaces/skf15963/summary/fengshen/examples/tcbert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sklearn-docs/feature_agglomeration/README.md b/spaces/sklearn-docs/feature_agglomeration/README.md deleted file mode 100644 index cea9095a1be3ee1adf78089336de89b864073d97..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/feature_agglomeration/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Feature Agglomeration -emoji: 👀 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sklearn-docs/gaussian-quantile-adaboost/README.md b/spaces/sklearn-docs/gaussian-quantile-adaboost/README.md deleted file mode 100644 index 18e807b96ea4dc23fc016a5e0fd1fd4a91cb3c67..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/gaussian-quantile-adaboost/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gaussian Quantile Adaboost -emoji: 🚀 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/utils/misc.py b/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/utils/misc.py deleted file mode 100644 index 3b444ff3b950e38f43a5451d1330ff1b65951a9e..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/utils/misc.py +++ /dev/null @@ -1,134 +0,0 @@ -import numpy as np -import os -import random -import time -import torch -from os import path as osp - -from .dist_util import master_only -from .logger import get_root_logger - - -def set_random_seed(seed): - """Set random seeds.""" - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def get_time_str(): - return time.strftime('%Y%m%d_%H%M%S', time.localtime()) - - -def mkdir_and_rename(path): - """mkdirs. If path exists, rename it with timestamp and create a new one. - - Args: - path (str): Folder path. - """ - if osp.exists(path): - new_name = path + '_archived_' + get_time_str() - print(f'Path already exists. Rename it to {new_name}', flush=True) - os.rename(path, new_name) - os.makedirs(path, exist_ok=True) - - -@master_only -def make_exp_dirs(opt): - """Make dirs for experiments.""" - path_opt = opt['path'].copy() - if opt['is_train']: - mkdir_and_rename(path_opt.pop('experiments_root')) - else: - mkdir_and_rename(path_opt.pop('results_root')) - for key, path in path_opt.items(): - if ('strict_load' not in key) and ('pretrain_network' not in key) and ('resume' not in key): - os.makedirs(path, exist_ok=True) - - -def scandir(dir_path, suffix=None, recursive=False, full_path=False): - """Scan a directory to find the interested files. - - Args: - dir_path (str): Path of the directory. - suffix (str | tuple(str), optional): File suffix that we are - interested in. Default: None. - recursive (bool, optional): If set to True, recursively scan the - directory. Default: False. - full_path (bool, optional): If set to True, include the dir_path. - Default: False. - - Returns: - A generator for all the interested files with relative pathes. - """ - - if (suffix is not None) and not isinstance(suffix, (str, tuple)): - raise TypeError('"suffix" must be a string or tuple of strings') - - root = dir_path - - def _scandir(dir_path, suffix, recursive): - for entry in os.scandir(dir_path): - if not entry.name.startswith('.') and entry.is_file(): - if full_path: - return_path = entry.path - else: - return_path = osp.relpath(entry.path, root) - - if suffix is None: - yield return_path - elif return_path.endswith(suffix): - yield return_path - else: - if recursive: - yield from _scandir(entry.path, suffix=suffix, recursive=recursive) - else: - continue - - return _scandir(dir_path, suffix=suffix, recursive=recursive) - - -def check_resume(opt, resume_iter): - """Check resume states and pretrain_network paths. - - Args: - opt (dict): Options. - resume_iter (int): Resume iteration. - """ - logger = get_root_logger() - if opt['path']['resume_state']: - # get all the networks - networks = [key for key in opt.keys() if key.startswith('network_')] - flag_pretrain = False - for network in networks: - if opt['path'].get(f'pretrain_{network}') is not None: - flag_pretrain = True - if flag_pretrain: - logger.warning('pretrain_network path will be ignored during resuming.') - # set pretrained model paths - for network in networks: - name = f'pretrain_{network}' - basename = network.replace('network_', '') - if opt['path'].get('ignore_resume_networks') is None or (basename - not in opt['path']['ignore_resume_networks']): - opt['path'][name] = osp.join(opt['path']['models'], f'net_{basename}_{resume_iter}.pth') - logger.info(f"Set {name} to {opt['path'][name]}") - - -def sizeof_fmt(size, suffix='B'): - """Get human readable file size. - - Args: - size (int): File size. - suffix (str): Suffix. Default: 'B'. - - Return: - str: Formated file siz. - """ - for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: - if abs(size) < 1024.0: - return f'{size:3.1f} {unit}{suffix}' - size /= 1024.0 - return f'{size:3.1f} Y{suffix}' diff --git a/spaces/sneedium/pixelplanetocr/callbacks.py b/spaces/sneedium/pixelplanetocr/callbacks.py deleted file mode 100644 index 82fb9e34da2a819ce849857c304bb3cd23973e81..0000000000000000000000000000000000000000 --- a/spaces/sneedium/pixelplanetocr/callbacks.py +++ /dev/null @@ -1,360 +0,0 @@ -import logging -import shutil -import time - -import editdistance as ed -import torchvision.utils as vutils -from fastai.callbacks.tensorboard import (LearnerTensorboardWriter, - SummaryWriter, TBWriteRequest, - asyncTBWriter) -from fastai.vision import * -from torch.nn.parallel import DistributedDataParallel -from torchvision import transforms - -import dataset -from utils import CharsetMapper, Timer, blend_mask - - -class IterationCallback(LearnerTensorboardWriter): - "A `TrackerCallback` that monitor in each iteration." - def __init__(self, learn:Learner, name:str='model', checpoint_keep_num=5, - show_iters:int=50, eval_iters:int=1000, save_iters:int=20000, - start_iters:int=0, stats_iters=20000): - #if self.learn.rank is not None: time.sleep(self.learn.rank) # keep all event files - super().__init__(learn, base_dir='.', name=learn.path, loss_iters=show_iters, - stats_iters=stats_iters, hist_iters=stats_iters) - self.name, self.bestname = Path(name).name, f'best-{Path(name).name}' - self.show_iters = show_iters - self.eval_iters = eval_iters - self.save_iters = save_iters - self.start_iters = start_iters - self.checpoint_keep_num = checpoint_keep_num - self.metrics_root = 'metrics/' # rewrite - self.timer = Timer() - self.host = self.learn.rank is None or self.learn.rank == 0 - - def _write_metrics(self, iteration:int, names:List[str], last_metrics:MetricsList)->None: - "Writes training metrics to Tensorboard." - for i, name in enumerate(names): - if last_metrics is None or len(last_metrics) < i+1: return - scalar_value = last_metrics[i] - self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration) - - def _write_sub_loss(self, iteration:int, last_losses:dict)->None: - "Writes sub loss to Tensorboard." - for name, loss in last_losses.items(): - scalar_value = to_np(loss) - tag = self.metrics_root + name - self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration) - - def _save(self, name): - if isinstance(self.learn.model, DistributedDataParallel): - tmp = self.learn.model - self.learn.model = self.learn.model.module - self.learn.save(name) - self.learn.model = tmp - else: self.learn.save(name) - - def _validate(self, dl=None, callbacks=None, metrics=None, keeped_items=False): - "Validate on `dl` with potential `callbacks` and `metrics`." - dl = ifnone(dl, self.learn.data.valid_dl) - metrics = ifnone(metrics, self.learn.metrics) - cb_handler = CallbackHandler(ifnone(callbacks, []), metrics) - cb_handler.on_train_begin(1, None, metrics); cb_handler.on_epoch_begin() - if keeped_items: cb_handler.state_dict.update(dict(keeped_items=[])) - val_metrics = validate(self.learn.model, dl, self.loss_func, cb_handler) - cb_handler.on_epoch_end(val_metrics) - if keeped_items: return cb_handler.state_dict['keeped_items'] - else: return cb_handler.state_dict['last_metrics'] - - def jump_to_epoch_iter(self, epoch:int, iteration:int)->None: - try: - self.learn.load(f'{self.name}_{epoch}_{iteration}', purge=False) - logging.info(f'Loaded {self.name}_{epoch}_{iteration}') - except: logging.info(f'Model {self.name}_{epoch}_{iteration} not found.') - - def on_train_begin(self, n_epochs, **kwargs): - # TODO: can not write graph here - # super().on_train_begin(**kwargs) - self.best = -float('inf') - self.timer.tic() - if self.host: - checkpoint_path = self.learn.path/'checkpoint.yaml' - if checkpoint_path.exists(): - os.remove(checkpoint_path) - open(checkpoint_path, 'w').close() - return {'skip_validate': True, 'iteration':self.start_iters} # disable default validate - - def on_batch_begin(self, **kwargs:Any)->None: - self.timer.toc_data() - super().on_batch_begin(**kwargs) - - def on_batch_end(self, iteration, epoch, last_loss, smooth_loss, train, **kwargs): - super().on_batch_end(last_loss, iteration, train, **kwargs) - if iteration == 0: return - - if iteration % self.loss_iters == 0: - last_losses = self.learn.loss_func.last_losses - self._write_sub_loss(iteration=iteration, last_losses=last_losses) - self.tbwriter.add_scalar(tag=self.metrics_root + 'lr', - scalar_value=self.opt.lr, global_step=iteration) - - if iteration % self.show_iters == 0: - log_str = f'epoch {epoch} iter {iteration}: loss = {last_loss:6.4f}, ' \ - f'smooth loss = {smooth_loss:6.4f}' - logging.info(log_str) - # log_str = f'data time = {self.timer.data_diff:.4f}s, runing time = {self.timer.running_diff:.4f}s' - # logging.info(log_str) - - if iteration % self.eval_iters == 0: - # TODO: or remove time to on_epoch_end - # 1. Record time - log_str = f'average data time = {self.timer.average_data_time():.4f}s, ' \ - f'average running time = {self.timer.average_running_time():.4f}s' - logging.info(log_str) - - # 2. Call validate - last_metrics = self._validate() - self.learn.model.train() - log_str = f'epoch {epoch} iter {iteration}: eval loss = {last_metrics[0]:6.4f}, ' \ - f'ccr = {last_metrics[1]:6.4f}, cwr = {last_metrics[2]:6.4f}, ' \ - f'ted = {last_metrics[3]:6.4f}, ned = {last_metrics[4]:6.4f}, ' \ - f'ted/w = {last_metrics[5]:6.4f}, ' - logging.info(log_str) - names = ['eval_loss', 'ccr', 'cwr', 'ted', 'ned', 'ted/w'] - self._write_metrics(iteration, names, last_metrics) - - # 3. Save best model - current = last_metrics[2] - if current is not None and current > self.best: - logging.info(f'Better model found at epoch {epoch}, '\ - f'iter {iteration} with accuracy value: {current:6.4f}.') - self.best = current - self._save(f'{self.bestname}') - - if iteration % self.save_iters == 0 and self.host: - logging.info(f'Save model {self.name}_{epoch}_{iteration}') - filename = f'{self.name}_{epoch}_{iteration}' - self._save(filename) - - checkpoint_path = self.learn.path/'checkpoint.yaml' - if not checkpoint_path.exists(): - open(checkpoint_path, 'w').close() - with open(checkpoint_path, 'r') as file: - checkpoints = yaml.load(file, Loader=yaml.FullLoader) or dict() - checkpoints['all_checkpoints'] = ( - checkpoints.get('all_checkpoints') or list()) - checkpoints['all_checkpoints'].insert(0, filename) - if len(checkpoints['all_checkpoints']) > self.checpoint_keep_num: - removed_checkpoint = checkpoints['all_checkpoints'].pop() - removed_checkpoint = self.learn.path/self.learn.model_dir/f'{removed_checkpoint}.pth' - os.remove(removed_checkpoint) - checkpoints['current_checkpoint'] = filename - with open(checkpoint_path, 'w') as file: - yaml.dump(checkpoints, file) - - - self.timer.toc_running() - - def on_train_end(self, **kwargs): - #self.learn.load(f'{self.bestname}', purge=False) - pass - - def on_epoch_end(self, last_metrics:MetricsList, iteration:int, **kwargs)->None: - self._write_embedding(iteration=iteration) - - -class TextAccuracy(Callback): - _names = ['ccr', 'cwr', 'ted', 'ned', 'ted/w'] - def __init__(self, charset_path, max_length, case_sensitive, model_eval): - self.charset_path = charset_path - self.max_length = max_length - self.case_sensitive = case_sensitive - self.charset = CharsetMapper(charset_path, self.max_length) - self.names = self._names - - self.model_eval = model_eval or 'alignment' - assert self.model_eval in ['vision', 'language', 'alignment'] - - def on_epoch_begin(self, **kwargs): - self.total_num_char = 0. - self.total_num_word = 0. - self.correct_num_char = 0. - self.correct_num_word = 0. - self.total_ed = 0. - self.total_ned = 0. - - def _get_output(self, last_output): - if isinstance(last_output, (tuple, list)): - for res in last_output: - if res['name'] == self.model_eval: output = res - else: output = last_output - return output - - def _update_output(self, last_output, items): - if isinstance(last_output, (tuple, list)): - for res in last_output: - if res['name'] == self.model_eval: res.update(items) - else: last_output.update(items) - return last_output - - def on_batch_end(self, last_output, last_target, **kwargs): - output = self._get_output(last_output) - logits, pt_lengths = output['logits'], output['pt_lengths'] - pt_text, pt_scores, pt_lengths_ = self.decode(logits) - assert (pt_lengths == pt_lengths_).all(), f'{pt_lengths} != {pt_lengths_} for {pt_text}' - last_output = self._update_output(last_output, {'pt_text':pt_text, 'pt_scores':pt_scores}) - - pt_text = [self.charset.trim(t) for t in pt_text] - label = last_target[0] - if label.dim() == 3: label = label.argmax(dim=-1) # one-hot label - gt_text = [self.charset.get_text(l, trim=True) for l in label] - - for i in range(len(gt_text)): - if not self.case_sensitive: - gt_text[i], pt_text[i] = gt_text[i].lower(), pt_text[i].lower() - distance = ed.eval(gt_text[i], pt_text[i]) - self.total_ed += distance - self.total_ned += float(distance) / max(len(gt_text[i]), 1) - - if gt_text[i] == pt_text[i]: - self.correct_num_word += 1 - self.total_num_word += 1 - - for j in range(min(len(gt_text[i]), len(pt_text[i]))): - if gt_text[i][j] == pt_text[i][j]: - self.correct_num_char += 1 - self.total_num_char += len(gt_text[i]) - - return {'last_output': last_output} - - def on_epoch_end(self, last_metrics, **kwargs): - mets = [self.correct_num_char / self.total_num_char, - self.correct_num_word / self.total_num_word, - self.total_ed, - self.total_ned, - self.total_ed / self.total_num_word] - return add_metrics(last_metrics, mets) - - def decode(self, logit): - """ Greed decode """ - # TODO: test running time and decode on GPU - out = F.softmax(logit, dim=2) - pt_text, pt_scores, pt_lengths = [], [], [] - for o in out: - text = self.charset.get_text(o.argmax(dim=1), padding=False, trim=False) - text = text.split(self.charset.null_char)[0] # end at end-token - pt_text.append(text) - pt_scores.append(o.max(dim=1)[0]) - pt_lengths.append(min(len(text) + 1, self.max_length)) # one for end-token - pt_scores = torch.stack(pt_scores) - pt_lengths = pt_scores.new_tensor(pt_lengths, dtype=torch.long) - return pt_text, pt_scores, pt_lengths - - -class TopKTextAccuracy(TextAccuracy): - _names = ['ccr', 'cwr'] - def __init__(self, k, charset_path, max_length, case_sensitive, model_eval): - self.k = k - self.charset_path = charset_path - self.max_length = max_length - self.case_sensitive = case_sensitive - self.charset = CharsetMapper(charset_path, self.max_length) - self.names = self._names - - def on_epoch_begin(self, **kwargs): - self.total_num_char = 0. - self.total_num_word = 0. - self.correct_num_char = 0. - self.correct_num_word = 0. - - def on_batch_end(self, last_output, last_target, **kwargs): - logits, pt_lengths = last_output['logits'], last_output['pt_lengths'] - gt_labels, gt_lengths = last_target[:] - - for logit, pt_length, label, length in zip(logits, pt_lengths, gt_labels, gt_lengths): - word_flag = True - for i in range(length): - char_logit = logit[i].topk(self.k)[1] - char_label = label[i].argmax(-1) - if char_label in char_logit: self.correct_num_char += 1 - else: word_flag = False - self.total_num_char += 1 - if pt_length == length and word_flag: - self.correct_num_word += 1 - self.total_num_word += 1 - - def on_epoch_end(self, last_metrics, **kwargs): - mets = [self.correct_num_char / self.total_num_char, - self.correct_num_word / self.total_num_word, - 0., 0., 0.] - return add_metrics(last_metrics, mets) - - -class DumpPrediction(LearnerCallback): - - def __init__(self, learn, dataset, charset_path, model_eval, image_only=False, debug=False): - super().__init__(learn=learn) - self.debug = debug - self.model_eval = model_eval or 'alignment' - self.image_only = image_only - assert self.model_eval in ['vision', 'language', 'alignment'] - - self.dataset, self.root = dataset, Path(self.learn.path)/f'{dataset}-{self.model_eval}' - self.attn_root = self.root/'attn' - self.charset = CharsetMapper(charset_path) - if self.root.exists(): shutil.rmtree(self.root) - self.root.mkdir(), self.attn_root.mkdir() - - self.pil = transforms.ToPILImage() - self.tensor = transforms.ToTensor() - size = self.learn.data.img_h, self.learn.data.img_w - self.resize = transforms.Resize(size=size, interpolation=0) - self.c = 0 - - def on_batch_end(self, last_input, last_output, last_target, **kwargs): - if isinstance(last_output, (tuple, list)): - for res in last_output: - if res['name'] == self.model_eval: pt_text = res['pt_text'] - if res['name'] == 'vision': attn_scores = res['attn_scores'].detach().cpu() - if res['name'] == self.model_eval: logits = res['logits'] - else: - pt_text = last_output['pt_text'] - attn_scores = last_output['attn_scores'].detach().cpu() - logits = last_output['logits'] - - images = last_input[0] if isinstance(last_input, (tuple, list)) else last_input - images = images.detach().cpu() - pt_text = [self.charset.trim(t) for t in pt_text] - gt_label = last_target[0] - if gt_label.dim() == 3: gt_label = gt_label.argmax(dim=-1) # one-hot label - gt_text = [self.charset.get_text(l, trim=True) for l in gt_label] - - prediction, false_prediction = [], [] - for gt, pt, image, attn, logit in zip(gt_text, pt_text, images, attn_scores, logits): - prediction.append(f'{gt}\t{pt}\n') - if gt != pt: - if self.debug: - scores = torch.softmax(logit, dim=-1)[:max(len(pt), len(gt)) + 1] - logging.info(f'{self.c} gt {gt}, pt {pt}, logit {logit.shape}, scores {scores.topk(5, dim=-1)}') - false_prediction.append(f'{gt}\t{pt}\n') - - image = self.learn.data.denorm(image) - if not self.image_only: - image_np = np.array(self.pil(image)) - attn_pil = [self.pil(a) for a in attn[:, None, :, :]] - attn = [self.tensor(self.resize(a)).repeat(3, 1, 1) for a in attn_pil] - attn_sum = np.array([np.array(a) for a in attn_pil[:len(pt)]]).sum(axis=0) - blended_sum = self.tensor(blend_mask(image_np, attn_sum)) - blended = [self.tensor(blend_mask(image_np, np.array(a))) for a in attn_pil] - save_image = torch.stack([image] + attn + [blended_sum] + blended) - save_image = save_image.view(2, -1, *save_image.shape[1:]) - save_image = save_image.permute(1, 0, 2, 3, 4).flatten(0, 1) - vutils.save_image(save_image, self.attn_root/f'{self.c}_{gt}_{pt}.jpg', - nrow=2, normalize=True, scale_each=True) - else: - self.pil(image).save(self.attn_root/f'{self.c}_{gt}_{pt}.jpg') - self.c += 1 - - with open(self.root/f'{self.model_eval}.txt', 'a') as f: f.writelines(prediction) - with open(self.root/f'{self.model_eval}-false.txt', 'a') as f: f.writelines(false_prediction) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py deleted file mode 100644 index 724c6912a62d48fc61988cac1434a4f5c8754521..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py +++ /dev/null @@ -1,126 +0,0 @@ -from typing import Optional, Dict -from torch import Tensor -import torch - - -def waitk_p_choose( - tgt_len: int, - src_len: int, - bsz: int, - waitk_lagging: int, - key_padding_mask: Optional[Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None -): - - max_src_len = src_len - if incremental_state is not None: - # Retrieve target length from incremental states - # For inference the length of query is always 1 - max_tgt_len = incremental_state["steps"]["tgt"] - assert max_tgt_len is not None - max_tgt_len = int(max_tgt_len) - else: - max_tgt_len = tgt_len - - if max_src_len < waitk_lagging: - if incremental_state is not None: - max_tgt_len = 1 - return torch.zeros( - bsz, max_tgt_len, max_src_len - ) - - # Assuming the p_choose looks like this for wait k=3 - # src_len = 6, max_tgt_len = 5 - # [0, 0, 1, 0, 0, 0, 0] - # [0, 0, 0, 1, 0, 0, 0] - # [0, 0, 0, 0, 1, 0, 0] - # [0, 0, 0, 0, 0, 1, 0] - # [0, 0, 0, 0, 0, 0, 1] - # linearize the p_choose matrix: - # [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...] - # The indices of linearized matrix that equals 1 is - # 2 + 6 * 0 - # 3 + 6 * 1 - # ... - # n + src_len * n + k - 1 = n * (src_len + 1) + k - 1 - # n from 0 to max_tgt_len - 1 - # - # First, generate the indices (activate_indices_offset: bsz, max_tgt_len) - # Second, scatter a zeros tensor (bsz, max_tgt_len * src_len) - # with activate_indices_offset - # Third, resize the tensor to (bsz, max_tgt_len, src_len) - - activate_indices_offset = ( - ( - torch.arange(max_tgt_len) * (max_src_len + 1) - + waitk_lagging - 1 - ) - .unsqueeze(0) - .expand(bsz, max_tgt_len) - .long() - ) - - if key_padding_mask is not None: - if key_padding_mask[:, 0].any(): - # Left padding - activate_indices_offset += ( - key_padding_mask.sum(dim=1, keepdim=True) - ) - - # Need to clamp the indices that are too large - activate_indices_offset = ( - activate_indices_offset - .clamp( - 0, - min( - [ - max_tgt_len, - max_src_len - waitk_lagging + 1 - ] - ) * max_src_len - 1 - ) - ) - - p_choose = torch.zeros(bsz, max_tgt_len * max_src_len) - - p_choose = p_choose.scatter( - 1, - activate_indices_offset, - 1.0 - ).view(bsz, max_tgt_len, max_src_len) - - if key_padding_mask is not None: - p_choose = p_choose.to(key_padding_mask) - p_choose = p_choose.masked_fill(key_padding_mask.unsqueeze(1), 0) - - if incremental_state is not None: - p_choose = p_choose[:, -1:] - - return p_choose.float() - - -def learnable_p_choose( - energy, - noise_mean: float = 0.0, - noise_var: float = 0.0, - training: bool = True -): - """ - Calculating step wise prob for reading and writing - 1 to read, 0 to write - energy: bsz, tgt_len, src_len - """ - - noise = 0 - if training: - # add noise here to encourage discretness - noise = ( - torch.normal(noise_mean, noise_var, energy.size()) - .type_as(energy) - .to(energy.device) - ) - - p_choose = torch.sigmoid(energy + noise) - - # p_choose: bsz * self.num_heads, tgt_len, src_len - return p_choose diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/stmnk/pygen/strings.py b/spaces/stmnk/pygen/strings.py deleted file mode 100644 index 4eb306a8d4ead37c01970d0c742628afb6baf6a2..0000000000000000000000000000000000000000 --- a/spaces/stmnk/pygen/strings.py +++ /dev/null @@ -1,151 +0,0 @@ -dfs_code = r""" -def dfs(visited, graph, node): #function for dfs - if node not in visited: - print (node) - visited.add(node) - for neighbour in graph[node]: - dfs(visited, graph, neighbour) -""" - -function_code = r""" -def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, - batch_size: int = 10_000, duplicate_documents: Optional[str] = None): - - if index and not self.client.indices.exists(index=index): - self._create_document_index(index) - - if index is None: - index = self.index - duplicate_documents = duplicate_documents or self.duplicate_documents - assert duplicate_documents in self.duplicate_documents_options, - f"duplicate_documents parameter must be {', '.join(self.duplicate_documents_options)}" - - field_map = self._create_document_field_map() - document_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in documents] - document_objects = self._handle_duplicate_documents(documents=document_objects, - index=index, - duplicate_documents=duplicate_documents) - documents_to_index = [] - for doc in document_objects: - _doc = { - "_op_type": "index" if duplicate_documents == 'overwrite' else "create", - "_index": index, - **doc.to_dict(field_map=self._create_document_field_map()) - } # type: Dict[str, Any] - - # cast embedding type as ES cannot deal with np.array - if _doc[self.embedding_field] is not None: - if type(_doc[self.embedding_field]) == np.ndarray: - _doc[self.embedding_field] = _doc[self.embedding_field].tolist() - - # rename id for elastic - _doc["_id"] = str(_doc.pop("id")) - - # don't index query score and empty fields - _ = _doc.pop("score", None) - _doc = {k:v for k,v in _doc.items() if v is not None} - - # In order to have a flat structure in elastic + similar behaviour to the other DocumentStores, - # we "unnest" all value within "meta" - if "meta" in _doc.keys(): - for k, v in _doc["meta"].items(): - _doc[k] = v - _doc.pop("meta") - documents_to_index.append(_doc) - - # Pass batch_size number of documents to bulk - if len(documents_to_index) % batch_size == 0: - bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type) - documents_to_index = [] - - if documents_to_index: - bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type) - -""" - -real_docstring = r""" - Indexes documents for later queries in Elasticsearch. - - Behaviour if a document with the same ID already exists in ElasticSearch: - a) (Default) Throw Elastic's standard error message for duplicate IDs. - b) If `self.update_existing_documents=True` for DocumentStore: Overwrite existing documents. - (This is only relevant if you pass your own ID when initializing a `Document`. - If don't set custom IDs for your Documents or just pass a list of dictionaries here, - they will automatically get UUIDs assigned. See the `Document` class for details) - - :param documents: a list of Python dictionaries or a list of Haystack Document objects. - For documents as dictionaries, the format is {"content": ""}. - Optionally: Include meta data via {"content": "", - "meta":{"name": ", "author": "somebody", ...}} - It can be used for filtering and is accessible in the responses of the Finder. - Advanced: If you are using your own Elasticsearch mapping, the key names in the dictionary - should be changed to what you have set for self.content_field and self.name_field. - :param index: Elasticsearch index where the documents should be indexed. If not supplied, self.index will be used. - :param batch_size: Number of documents that are passed to Elasticsearch's bulk function at a time. - :param duplicate_documents: Handle duplicates document based on parameter options. - Parameter options : ( 'skip','overwrite','fail') - skip: Ignore the duplicates documents - overwrite: Update any existing documents with the same ID when adding documents. - fail: an error is raised if the document ID of the document being added already - exists. - :raises DuplicateDocumentError: Exception trigger on duplicate document - :return: None -""" - -tree_code = r""" -class Tree: - def __init__(self): - self.val = None - self.left = None - self.right = None -""" - -insert_code = r""" -def insert(self, val): - if self.val: - if val < self.val: - if self.left is None: - self.left = Tree(val) - else: - self.left.insert(val) - elif val > self.val: - if self.right is None: - self.right = Tree(val) - else: - self.right.insert(val) - else: - self.val = val -""" - -display_code = r""" -def display_tree(self: Tree, prefix='value: '): - current_node = self.val - - if self.left: - self.left.display_tree() - - print(prefix, current_node) - - if self.right: - self.right.display_tree() - -""" - -article_string = r"""CodeXGLLUE task definition (and dataset): **Code summarization (CodeSearchNet)**: - -_A model is given the task to generate natural language comments for a programming language code input._ - -For further details, see the [CodeXGLUE](https://github.com/microsoft/CodeXGLUE) benchmark dataset and open challenge for code intelligence. -""" - -descr_string = 'The application takes as input the python code for a function, or a class, and generates a documentation string, or code comment, for it using codeT5 fine tuned for code2text generation. Code to text generation, or code summarization, is a CodeXGLUE generation, or sequence to sequence, downstream task. CodeXGLUE stands for General Language Understanding Evaluation benchmark *for code*, which includes diversified code intelligence downstream inference tasks and datasets.' - -def pygen_func(nl_code_intent): - pass # TODO: generate code PL from intent NL + search in corpus - # inputs = {'code_nl': code_nl} - # payload = json.dumps(inputs) - # prediction = req.request(CT5_METHOD, CT5_URL, data=payload) - # prediction = req.request(CT5_METHOD, CT5_URL, json=req_data) - # answer = json.loads(prediction.content.decode("utf-8")) - # return str(answer) - # CT5_URL = "https://api-inference.huggingface.co/models/nielsr/codet5-small-code-summarization-ruby" diff --git a/spaces/stomexserde/gpt4-ui/Examples/Aerosoft - Casablanca 2011 (FSX)Aerosoft - Casablanca 2011 (FSX)l.md b/spaces/stomexserde/gpt4-ui/Examples/Aerosoft - Casablanca 2011 (FSX)Aerosoft - Casablanca 2011 (FSX)l.md deleted file mode 100644 index 3afe69ad1f7dce8fbc7dc21192909e4a55bd71f7..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Aerosoft - Casablanca 2011 (FSX)Aerosoft - Casablanca 2011 (FSX)l.md +++ /dev/null @@ -1,76 +0,0 @@ - -

            Aerosoft - Casablanca 2011 (FSX): A Review

            -

            If you are a fan of flight simulation, you probably know that one of the best ways to enhance your experience is to use add-ons that improve the realism and variety of your virtual flights. There are many add-ons available for different aspects of flight simulation, such as aircraft, scenery, weather, traffic, etc. In this article, we will review one of the most popular scenery add-ons for Microsoft Flight Simulator X (FSX), which is Aerosoft - Casablanca 2011.

            -

            Aerosoft - Casablanca 2011 (FSX)Aerosoft - Casablanca 2011 (FSX)l


            Download Ziphttps://urlgoal.com/2uIavD



            -

            Introduction

            -

            Aerosoft - Casablanca 2011 is a product that recreates the Mohammed V International Airport (GMMN) and its surroundings in FSX. It is developed by FSDG, a team of talented scenery designers who have created many other high-quality products for flight simulation. Aerosoft is a well-known publisher and distributor of flight simulation software and hardware.

            -

            Why is Aerosoft - Casablanca 2011 a popular add-on for flight simulator enthusiasts? There are several reasons for that. First of all, Casablanca is one of the biggest airports in North Africa and one of the most famous cities on the continent. It is a hub for Royal Air Maroc, Air Arabia Maroc, and Royal Air Maroc Express, as well as a destination for many other airlines from Europe, Africa, Asia, and America. It offers a variety of routes and challenges for pilots of different skill levels and preferences.

            -

            Secondly, Aerosoft - Casablanca 2011 is a product that offers a high level of quality and detail in both the airport and the scenery. It features realistic shadow and light rendition, optimized performance, animated ground traffic, new terminal and ground layout, compatible jetways, accurate buildings, landmarks, vegetation, etc. It also includes some aircraft models and liveries that are specific to Casablanca airport.

            -

            Thirdly, Aerosoft - Casablanca 2011 is a product that enhances the immersion and realism of flight simulation. It provides realistic weather conditions, dynamic traffic, radio navigation aids, navigation charts, etc. It also integrates well with other add-ons such as AES, GSX, UTX, FS Global, etc.

            -

            -

            Installation and Configuration

            -

            How to download and install Aerosoft - Casablanca 2011 (FSX)? The process is very simple and straightforward. You can buy the product from the Aerosoft website or from other online stores that sell flight simulation products. After you complete the payment, you will receive a download link and a serial number. You just need to download the installer file, run it, and follow the instructions. You will need to enter your serial number during the installation.

            -

            How to configure the settings and options for optimal performance and realism? After you install Aerosoft - Casablanca 2011 (FSX), you will find a configuration tool in the start menu or on your desktop. You can use this tool to adjust various settings and options, such as the level of detail, the amount of traffic, the type of jetways, etc. You can also enable or disable some features, such as dynamic shadows, 3D grass, animated vehicles, etc. You should experiment with different settings and find the best balance between performance and quality for your system.

            -

            How to access the manual and other documentation? Aerosoft - Casablanca 2011 (FSX) comes with a comprehensive manual that explains everything you need to know about the product. You can find the manual in PDF format in the Aerosoft folder in your FSX directory. You can also access it from the configuration tool or from the start menu. The manual covers topics such as installation, configuration, scenery features, airport information, navigation charts, etc. It also includes some screenshots and tips for flying in and out of Casablanca airport.

            -

            Scenery and Airport

            -

            How does Aerosoft - Casablanca 2011 (FSX) recreate the scenery and airport of Casablanca, Morocco? Aerosoft - Casablanca 2011 (FSX) is a product that aims to provide a realistic and detailed representation of the Mohammed V International Airport and its surroundings. It covers an area of about 400 square kilometers, including the city of Casablanca, the coastline, the mountains, and some nearby towns and villages.

            -

            What are the details and accuracy of the terrain, buildings, landmarks, and vegetation? Aerosoft - Casablanca 2011 (FSX) uses high-resolution aerial imagery and custom-made mesh to create a realistic terrain with accurate elevation data. It also features custom-made buildings and landmarks that match the real ones in terms of shape, size, color, and texture. Some of these landmarks include the Hassan II Mosque, the Royal Palace, the Twin Center, the Port of Casablanca, etc. The vegetation is also carefully placed and varied according to the region and season.

            -

            What are the effects of lighting, shadows, weather, and traffic on the scenery and airport? Aerosoft - Casablanca 2011 (FSX) uses advanced techniques to create realistic lighting and shadow effects on the scenery and airport. It supports dynamic shadows that change according to the time of day and weather conditions. It also features realistic night lighting that illuminates the buildings, roads, runways, taxiways, etc. The weather effects include rain, snow, fog, clouds, etc., that affect the visibility and atmosphere of the scenery. The traffic effects include animated ground vehicles that move around the airport and city streets.

            -

            Aircraft and Navigation

            -

            What are the aircraft models and liveries included in Aerosoft - Casablanca 2011 (FSX)? Aerosoft - Casablanca 2011 (FSX) includes some aircraft models and liveries that are specific to Casablanca airport. These are static aircraft that are parked at various gates or stands at the airport. They add some realism and variety to the airport environment. Some of these aircraft models are Boeing 737-800, Airbus A320-200, Airbus A330-200, Boeing 747-400, etc. Some of these liveries are Royal Air Maroc, Air Arabia Maroc, Air France, Emirates, etc.

            -

            How do they perform and handle in flight simulation? These aircraft models are not flyable in flight simulation. They are only for decoration purposes. They do not have any impact on the performance or handling of your own aircraft in flight simulation. They are only visible when you are near them or when you use external views or cameras.

            -

            What are the navigation aids and systems available for flying to and from Casablanca airport? Aerosoft - Casablanca 2011 (FSX) provides realistic navigation aids and systems for flying to and from Casablanca airport. These include ILS (Instrument Landing System), VOR (VHF Omnidirectional Range), DME (Distance Measuring Equipment), NDB (Non-Directional Beacon), - GPS (Global Positioning System), etc. These navigation aids and systems help you to fly accurately and safely to and from Casablanca airport. They are compatible with the default FSX ATC (Air Traffic Control) and AI (Artificial Intelligence) traffic. They are also updated to reflect the latest changes and procedures at the airport.

            -

            Conclusion

            -

            What are the pros and cons of Aerosoft - Casablanca 2011 (FSX)? Aerosoft - Casablanca 2011 (FSX) is a product that has many pros and few cons. Some of the pros are:

            -
              -
            • It is a high-quality and detailed scenery add-on that recreates the Mohammed V International Airport and its surroundings in FSX.
            • -
            • It offers a variety of routes and challenges for pilots of different skill levels and preferences.
            • -
            • It enhances the immersion and realism of flight simulation with realistic lighting, shadows, weather, traffic, etc.
            • -
            • It includes some aircraft models and liveries that are specific to Casablanca airport.
            • -
            • It provides realistic navigation aids and systems for flying to and from Casablanca airport.
            • -
            • It integrates well with other add-ons such as AES, GSX, UTX, FS Global, etc.
            • -
            • It comes with a comprehensive manual that explains everything you need to know about the product.
            • -
            -

            Some of the cons are:

            -
              -
            • It is a relatively expensive product compared to some other scenery add-ons on the market.
            • -
            • It requires a powerful system to run smoothly and without any issues.
            • -
            • It does not include any dynamic or interactive features such as animated jetways, moving people, etc.
            • -
            -

            How does Aerosoft - Casablanca 2011 (FSX) compare to other similar products on the market? Aerosoft - Casablanca 2011 (FSX) is one of the best scenery add-ons for FSX that covers the region of North Africa. It is comparable to other products such as FSDreamTeam - Cairo International Airport, FlyTampa - Dubai Rebooted, Simbreeze - Abu Dhabi International Airport, etc. However, each product has its own strengths and weaknesses, and it depends on your personal taste and preference which one you like more.

            -

            Who is this product suitable for and why? Aerosoft - Casablanca 2011 (FSX) is a product that is suitable for anyone who enjoys flight simulation and wants to explore a new and exciting destination. It is especially suitable for those who like to fly in different weather conditions, with different aircraft types, and with different levels of difficulty. It is also suitable for those who appreciate high-quality and realistic scenery that enhances the immersion and realism of flight simulation.

            -

            FAQs

            -

            Here are some frequently asked questions about Aerosoft - Casablanca 2011 (FSX):

            -

            What are the system requirements for Aerosoft - Casablanca 2011 (FSX)?

            -

            The system requirements for Aerosoft - Casablanca 2011 (FSX) are:

            -
              -
            • Microsoft Flight Simulator X (SP2, Acceleration Pack or Gold Edition)
            • -
            • Windows XP / Vista / 7 / 8 / 10
            • -
            • Dual Core Processor with 3 GHz or faster
            • -
            • 4 GB RAM or more
            • -
            • 3D graphics card with 1024 MB or more
            • -
            • Download-Size: 800 MB
            • -
            • Installation-Size: 1.8 GB
            • -
            -

            How much does Aerosoft - Casablanca 2011 (FSX) cost and where can I buy it?

            -

            Aerosoft - Casablanca 2011 (FSX) costs €24.95 (including VAT) or $29.99 (excluding VAT). You can buy it from the Aerosoft website or from other online stores that sell flight simulation products. You can also get a discount if you buy it as part of a bundle with other products from Aerosoft or FSDG.

            -

            How can I get support or report issues with Aerosoft - Casablanca 2011 (FSX)?

            -

            If you need support or want to report any issues with Aerosoft - Casablanca 2011 (FSX), you can contact the developers or the publishers through their websites or forums. You can also check the manual or the FAQ section for possible solutions or answers to your questions. You can also read reviews or watch videos from other users who have used the product before.

            -How can I get support or report issues with Aerosoft - Casablanca 2011 (FSX)? -

            If you need support or want to report any issues with Aerosoft - Casablanca 2011 (FSX), you can contact the developers or the publishers through their websites or forums. You can also check the manual or the FAQ section for possible solutions or answers to your questions. You can also read reviews or watch videos from other users who have used the product before.

            -

            Is Aerosoft - Casablanca 2011 (FSX) compatible with other add-ons or versions of flight simulator?

            -

            Aerosoft - Casablanca 2011 (FSX) is compatible with most of the add-ons that enhance the realism and variety of flight simulation. These include AES, GSX, UTX, FS Global, etc. However, some add-ons may cause conflicts or issues with Aerosoft - Casablanca 2011 (FSX), especially if they modify the same area or airport. In that case, you may need to disable or uninstall the conflicting add-ons, or adjust the priority order in the scenery library.

            -

            Aerosoft - Casablanca 2011 (FSX) is designed for Microsoft Flight Simulator X (SP2, Acceleration Pack or Gold Edition). It is not compatible with other versions of flight simulator, such as FS2004, P3D, X-Plane, etc. However, there may be some unofficial patches or updates that make it compatible with other versions of flight simulator. You should check the forums or websites of the developers or publishers for more information.

            -

            What are some tips and tricks for enjoying Aerosoft - Casablanca 2011 (FSX)?

            -

            Here are some tips and tricks for enjoying Aerosoft - Casablanca 2011 (FSX):

            -
              -
            • Read the manual carefully and follow the installation and configuration instructions.
            • -
            • Use the configuration tool to adjust the settings and options according to your system and preference.
            • -
            • Use the navigation charts and aids to plan and execute your flights to and from Casablanca airport.
            • -
            • Explore the scenery and airport from different angles and views, using external cameras or tools such as EZDOK or TrackIR.
            • -
            • Try different weather conditions, time of day, seasons, etc., to see how they affect the scenery and airport.
            • -
            • Try different aircraft types and liveries, especially those that are included in Aerosoft - Casablanca 2011 (FSX) or that are common at Casablanca airport.
            • -
            • Join online communities or networks such as VATSIM or IVAO to fly with other pilots and controllers in a realistic environment.
            • -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Baa Baaa Black Sheep Full Movie [VERIFIED] Download 720p Movie.md b/spaces/stomexserde/gpt4-ui/Examples/Baa Baaa Black Sheep Full Movie [VERIFIED] Download 720p Movie.md deleted file mode 100644 index 658ff075b85316ea8fc64d542c771b4e8cf02641..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Baa Baaa Black Sheep Full Movie [VERIFIED] Download 720p Movie.md +++ /dev/null @@ -1,13 +0,0 @@ - -

            Baa Baaa Black Sheep: A Hilarious Action Comedy Movie

            -

            If you are looking for a movie that will make you laugh and keep you entertained, then you should check out Baa Baaa Black Sheep, a 2018 action comedy film starring Anupam Kher, Maniesh Paul, Annu Kapoor, Manjari Phadnis and Kay Kay Menon. The movie is directed by Vishwas Paandya and written by Sanjeev Puri and Vishwas Paandya.

            -

            The movie revolves around Baba (Maniesh Paul), who is a simple and innocent cashew nut dealer. On his 25th birthday, he gets a shocking revelation from his father Charudutt Sharma (Anupam Kher) that their family business is actually contract killing. Baba is the 13th generation of hitmen in their family and he has to take over the legacy. However, Baba is not cut out for this job and he faces many hilarious situations as he tries to balance his love life with Angelina Morris (Manjari Phadnis), his friendship with Ashish Shivalkar (Sahil Vaid) and his encounters with ACP Shivraj Naik (Kay Kay Menon) and Brian Morris (Annu Kapoor), who are after him.

            -

            Baa Baaa Black Sheep Full Movie Download 720p Movie


            Download Ziphttps://urlgoal.com/2uI8M4



            -

            Baa Baaa Black Sheep is a fun-filled movie that will make you laugh with its witty dialogues, quirky characters and hilarious situations. The movie also has some action sequences and twists that will keep you hooked. The movie has a runtime of 103 minutes and it is available in Hindi language. You can watch Baa Baaa Black Sheep online on ZEE5, where you can also download the movie in 720p quality. So, what are you waiting for? Grab your popcorn and enjoy this comedy caper with your family and friends.

            - -

            The movie has a talented cast of actors who have proved their mettle in various genres. However, in this movie, they are wasted in roles that do not suit them or challenge them. Anupam Kher and Annu Kapoor try to infuse some life into their characters with their comic timing and expressions, but they are let down by the poor script and direction. Maniesh Paul, who is known for his hosting skills and comic sense, fails to impress as the lead actor. He looks confused and out of place throughout the movie. Manjari Phadnis and Natasha Suri have nothing much to do except look pretty and be part of some songs. Kay Kay Menon, who is one of the finest actors in the industry, is reduced to a caricature of a cop who keeps shouting and chasing people.

            -

            The movie also suffers from a lack of logic and coherence. The plot is full of loopholes and inconsistencies that make you wonder what the makers were thinking. The movie tries to be a spoof of the action comedy genre, but it ends up being a mockery of itself. The jokes are stale and forced, the songs are unnecessary and forgettable, the action scenes are amateurish and unrealistic, and the climax is predictable and lame.

            -

            Baa Baaa Black Sheep is a movie that you can easily skip without missing anything. It is a disappointing attempt at making a comedy that neither entertains nor engages. It is a waste of time and talent for everyone involved.

            -

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Coolorus 2.5.14.md b/spaces/stomexserde/gpt4-ui/Examples/Coolorus 2.5.14.md deleted file mode 100644 index f5cf83b946fe59037268ad8689ed2691a80f7052..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Coolorus 2.5.14.md +++ /dev/null @@ -1,30 +0,0 @@ -
            -Here is a possible title and article with html formatting for the keyword "Coolorus 2.5.14": - -

            Coolorus 2.5.14: A Color Wheel Plugin for Photoshop

            -

            Coolorus is a color wheel plugin for Adobe Photoshop, inspired by Corel Painter color picker. It is designed for creative people who want to improve their painting workflow and choose better colors. Coolorus 2.5.14 is compatible with Adobe Photoshop CC 2014.2.2 and above on Windows and Mac (M1/M2 Rosetta 2 required).

            -

            Coolorus offers many features that make color selection easier and more fun, such as:

            -

            Coolorus 2.5.14


            Download ✫✫✫ https://urlgoal.com/2uIbBV



            -
              -
            • Color Schemes: You can choose from various color harmonies, such as complementary, triadic, tetradic, etc.
            • -
            • Gamut Lock: You can limit your color palette to a certain range of hues, values and saturations.
            • -
            • Triangle HSV Representation: You can adjust the hue, value and saturation of your color using a triangular slider.
            • -
            • Color Sliders: You can switch between different color spaces, such as RGB, HSV, LAB and more.
            • -
            • Color Mixer: You can save, load and blend your picked colors using a mixer panel.
            • -
            • Affects Shapes & Text Layers: You can change the fill color of text and solid shapes layers using Coolorus.
            • -
            • Simple Mode: You can simplify almost every element of Coolorus by pressing +/- on your keyboard or using the configuration mode.
            • -
            -

            Coolorus is a powerful and user-friendly tool that can help you create stunning artworks with amazing colors. You can buy it for $16.99 (60 days money-back guarantee) or upgrade your existing license for $9.99 from their official website[^1^]. You can also download a free trial version and watch some video tutorials to learn more about Coolorus.

            Here are a few more paragraphs for the article: - -

            Coolorus has received many positive reviews from professional and amateur artists who use it for their digital paintings. Some of the benefits they have mentioned are:

            -
              -
            • Coolorus helps them create more harmonious and appealing color schemes with less trial and error.
            • -
            • Coolorus saves them time and clicks by allowing them to access the color wheel from any tool or panel in Photoshop.
            • -
            • Coolorus gives them more control and flexibility over their color choices by offering different color modes, sliders and mixers.
            • -
            • Coolorus improves their learning and understanding of color theory and relationships by showing them various color harmonies and gamut masks.
            • -
            -

            Coolorus is compatible with most graphic tablets, such as Wacom, and supports pressure sensitivity and pen tilt. It also works well with other Photoshop plugins, such as Lazy Nezumi Pro, Hej Stylus! and BrushBox. Coolorus has a simple and intuitive interface that can be customized to suit your preferences and workflow. You can resize, rotate, dock and undock the Coolorus panel as you wish.

            -

            -

            If you are looking for a way to enhance your digital painting experience and skills, Coolorus is a must-have plugin for Photoshop. It will make your coloring process more enjoyable, efficient and creative. You can try it for free for 14 days and see for yourself how Coolorus can transform your artworks with amazing colors.

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Crack Trikker V1.5.21 277.md b/spaces/stomexserde/gpt4-ui/Examples/Crack Trikker V1.5.21 277.md deleted file mode 100644 index 0b9df3498fad0820646f6a24db7acf6e80fe97f3..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Crack Trikker V1.5.21 277.md +++ /dev/null @@ -1,26 +0,0 @@ -
            -

            Trikker: A Drawing Program for Electricians

            -

            Trikker is a drawing program for electricians that allows you to create professional single-line diagrams and situation diagrams of electrical installations in no time. It is simple and yet complete. You can easily draw the floor plan of your house or building, add electrical symbols, connect them with wires, and generate the diagrams automatically. Trikker also helps you with the calculation of the cable sections, the verification of the installation according to the standards, and the printing of labels and lists.

            -

            Trikker is developed by Bluebits, a Belgian company that specializes in software for electricians. Trikker is available in Dutch, French, English, and German. You can download a free trial version from their website[^2^] or buy a license for 199 euros (VAT included). Trikker is compatible with Windows XP, Vista, 7, 8, and 10.

            -

            Crack trikker v1.5.21 277


            Download File 🗹 https://urlgoal.com/2uI9iP



            -

            If you are looking for a fast and easy way to create electrical diagrams, Trikker is the program for you. It will save you time and money, and make your work more professional and efficient.

            Trikker has many features that make it a powerful and user-friendly drawing program for electricians. Some of these features are:

            -
              -
            • Automatic generation of single-line and situation diagrams. You only need to draw the floor plan and place the electrical symbols. Trikker will take care of the rest.
            • -
            • Easy drawing of floor plans. You can use the built-in drawing tools or import an existing floor plan from a PDF or image file.
            • -
            • Large library of electrical symbols. You can choose from over 1000 symbols for switches, sockets, lights, sensors, appliances, etc. You can also create your own symbols and add them to the library.
            • -
            • Smart wiring system. You can connect the symbols with wires by simply clicking on them. Trikker will automatically assign the correct wire colors and numbers according to the standards.
            • -
            • Cable calculation and verification. Trikker will calculate the cable sections and verify the installation according to the standards (AREI/RGIE, NEN 1010, HD 60364). You can also adjust the parameters and settings according to your preferences.
            • -
            • Printing and exporting. You can print your diagrams on any paper size or export them as PDF, DXF, or JPG files. You can also print labels and lists for your installation.
            • -
            -

            Trikker is a reliable and affordable drawing program for electricians that will help you create high-quality electrical diagrams in a matter of minutes. Try it out today and see for yourself!

            Trikker is not only a drawing program for electricians, but also a useful tool for other professionals and enthusiasts. For example, you can use Trikker to:

            -
              -
            • Design and plan your own electrical installation for your house or building.
            • -
            • Create and share electrical diagrams with your clients, colleagues, or contractors.
            • -
            • Teach and learn about electrical engineering and installation standards.
            • -
            • Simulate and test different scenarios and configurations for your installation.
            • -
            -

            Trikker is a versatile and flexible drawing program for electricians that can adapt to your needs and preferences. You can customize the appearance and behavior of Trikker according to your liking. You can also update Trikker regularly to get the latest features and improvements.

            -

            -

            If you want to learn more about Trikker, you can visit their website or contact their support team. They will be happy to answer your questions and provide you with technical assistance. You can also join their online community and share your feedback and suggestions with other Trikker users.

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Dark Romance Heart Of The Beast Collectors Edition Crack Activation [EXCLUSIVE].md b/spaces/stomexserde/gpt4-ui/Examples/Dark Romance Heart Of The Beast Collectors Edition Crack Activation [EXCLUSIVE].md deleted file mode 100644 index 384b5987c4f86907098116deef6108c00fc1edf7..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Dark Romance Heart Of The Beast Collectors Edition Crack Activation [EXCLUSIVE].md +++ /dev/null @@ -1,14 +0,0 @@ - -

            How to Download and Play Dark Romance: Heart of the Beast Collector's Edition for Free

            -

            If you are a fan of hidden object games and fairy tales, you might be interested in Dark Romance: Heart of the Beast Collector's Edition, a game that combines adventure, romance, and magic. In this game, you have to find the Rose of Life, a mythical flower that can cure your father's illness and save an enchanted kingdom from a wicked witch. But you are not the only one who wants the Rose. The witch has stolen it from a prince and cursed his realm. You have to team up with the prince and work together to stop the witch and lift the curse.

            -

            Dark Romance: Heart of the Beast Collector's Edition is a game developed by Domini Games and published by Big Fish Games. It was released on April 6, 2018, and it is available for Windows PC. The game features beautiful graphics, captivating music, and voice-overs. It also has a lot of extras, such as a bonus chapter, wallpapers, concept art, achievements, and collectibles.

            -

            Dark Romance: Heart Of The Beast Collector's Edition Crack Activation


            Download Filehttps://urlgoal.com/2uI9P5



            -

            But what if you want to play Dark Romance: Heart of the Beast Collector's Edition for free? Is there a way to download and play it without paying anything? The answer is yes, but you have to be careful. There are many websites that claim to offer free downloads of this game, but some of them are not trustworthy. They might contain viruses, malware, or spyware that can harm your computer or steal your personal information. They might also require you to complete surveys, sign up for subscriptions, or enter your credit card details.

            -

            One of the websites that claims to offer a free download of Dark Romance: Heart of the Beast Collector's Edition is [website name]. This website says that you can download the game by clicking on a button that says "Download Now". However, when you click on it, you are redirected to another website that asks you to enter your email address and create an account. Then, you are asked to activate your account by entering a code that is sent to your email. After that, you are asked to download and install a software called [software name] that supposedly allows you to play the game.

            -

            -

            However, this software is actually a crack activation tool that tries to bypass the game's security system and make it run without a license key. This is illegal and risky. Not only are you violating the game's terms of service and copyright laws, but you are also exposing your computer to potential threats. The crack activation tool might contain viruses or malware that can damage your system or steal your data. It might also not work properly or cause errors or crashes.

            -

            Therefore, we do not recommend downloading Dark Romance: Heart of the Beast Collector's Edition from [website name] or any other website that offers crack activation tools. The best way to play this game is to buy it from a legitimate source, such as Steam or Big Fish Games. These platforms offer safe and secure downloads of the game and guarantee its quality and performance. They also provide customer support and updates for the game.

            -

            If you want to play Dark Romance: Heart of the Beast Collector's Edition for free, you can try the demo version of the game first. The demo version allows you to play the first hour of the game and see if you like it. You can download the demo version from Steam or Big Fish Games for free. If you enjoy the game and want to continue playing it, you can buy the full version from these platforms at a reasonable price.

            -

            Dark Romance: Heart of the Beast Collector's Edition is a game worth playing if you love hidden object games and fairy tales. It has an engaging story, charming characters, and challenging puzzles. It also has a lot of bonus content that adds more value to the game. However, if you want to play it for free, be careful where you download it from. Avoid websites that offer crack activation tools and opt for legitimate sources instead. This way, you can enjoy the game without risking your computer or breaking the law.

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Facebook3.2.1 Java App.md b/spaces/stomexserde/gpt4-ui/Examples/Facebook3.2.1 Java App.md deleted file mode 100644 index 930f15f0d4a0cff3ac384bc150a26126cce990d9..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Facebook3.2.1 Java App.md +++ /dev/null @@ -1,30 +0,0 @@ - -

            How to Download and Use Facebook3.2.1 Java App on Your Phone

            -

            Facebook is one of the most popular social media platforms in the world, with billions of users who share, like, comment, and chat with their friends and family. If you want to access Facebook on your Java-enabled phone, you might be interested in Facebook3.2.1 Java App, a free application that lets you enjoy Facebook on your mobile device.

            -

            Facebook3.2.1 Java App is a lightweight and fast version of Facebook that works on almost any Java phone. It allows you to browse your news feed, update your status, upload photos, send messages, and more. You can also use it to access Phoneky chat, a feature that lets you chat with other Phoneky users who have downloaded the app.

            -

            Facebook3.2.1 Java App


            Download Filehttps://urlgoal.com/2uIbDY



            -

            To download and use Facebook3.2.1 Java App on your phone, follow these simple steps:

            -
              -
            1. Go to https://phoneky.com/java-software/?id=a3a33577 on your phone's browser.
            2. -
            3. Click on the "Download" button and choose the option that suits your phone's screen size.
            4. -
            5. Wait for the download to finish and then open the file.
            6. -
            7. Follow the instructions to install the app on your phone.
            8. -
            9. Launch the app and log in with your Facebook credentials.
            10. -
            11. Enjoy Facebook on your phone!
            12. -
            -

            Facebook3.2.1 Java App is a great way to stay connected with your friends and family on Facebook without using too much data or battery. It also gives you access to Phoneky chat, where you can meet new people and have fun conversations. Download it today and see for yourself!

            -

            - -

            If you want to learn more about Facebook3.2.1 Java App and its features, you can visit the official Phoneky website, where you can find more information, reviews, and ratings from other users. You can also check out other Java apps and games that Phoneky offers for free download.

            -

            Some of the other Java apps that you might like are:

            -
              -
            • UCWEB 6.3: A fast and powerful web browser that supports multiple tabs, downloads, and online video streaming.
            • -
            • Opera Mini 4.4 Firefox: A popular web browser that compresses web pages to save data and speed up loading.
            • -
            • BOLT Browser 1.04: A web browser that supports Flash and HTML5 content, as well as social media integration.
            • -
            • Nimbuzz Lite 0.0.5: A messaging and calling app that connects you with your friends on various platforms, such as Facebook, Yahoo, MSN, and more.
            • -
            • Qeep 1.92: A social networking app that lets you chat, flirt, play games, and share photos with millions of users around the world.
            • -
            -

            You can find these apps and more on https://phoneky.com/java-software/. Phoneky is your one-stop destination for all your Java app needs. Whether you are looking for entertainment, productivity, or utility apps, Phoneky has something for everyone.

            -

            Facebook3.2.1 Java App is one of the best Java apps that Phoneky offers. It lets you enjoy Facebook on your phone without any hassle or compromise. Download it now and join the Phoneky community!

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fujitsu Irmc License Key Recreasoft Personnal.md b/spaces/stomexserde/gpt4-ui/Examples/Fujitsu Irmc License Key Recreasoft Personnal.md deleted file mode 100644 index a7bbd4387ec065fb59dea7f1a60ce5f2963c0631..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Fujitsu Irmc License Key Recreasoft Personnal.md +++ /dev/null @@ -1,132 +0,0 @@ - -

            Fujitsu Irmc License Key: What Is It and How to Get It?

            -

            If you are looking for a comprehensive and efficient way to manage your recreational space, you might want to consider using Fujitsu Irmc with Recreasoft Personnal Software. In this article, we will explain what Fujitsu Irmc is, what are its features and benefits, what is a license key and why do you need it, what is Recreasoft Personnal Software and how does it relate to Fujitsu Irmc, how to get a Fujitsu Irmc license key, how to use Fujitsu Irmc with Recreasoft Personnal Software, and answer some frequently asked questions. By the end of this article, you will have a clear idea of how Fujitsu Irmc and Recreasoft Personnal Software can help you improve your business performance and customer satisfaction.

            -

            Fujitsu Irmc License Key recreasoft personnal


            Download ····· https://urlgoal.com/2uI8Tc



            -

            Introduction

            -

            Fujitsu Irmc stands for Integrated Remote Management Controller. It is a system that enables extensive monitoring and management of Fujitsu PRIMERGY servers regardless of their system status. It is implemented in a chip on the motherboard and integrates essential system management functions with extensive remote management functionality. It has its own operating system, web server, user management, and alert management. It communicates via a LAN connection, which can be shared with the server or used exclusively for system management. It gives administrators or service technicians access to the server for extensive control, even at decentralized locations. In the event of server problems, routine tasks and maintenance can be carried out efficiently.

            -

            Some of the main features and benefits of Fujitsu Irmc are:

            -
              -
            • Universal system management solution for all Fujitsu PRIMERGY servers
            • -
            • Around-the-clock control, independent of the server status
            • -
            • Extensive power management inclusive pre-defined profiles and a scheduled mode to switch between profiles automatically
            • -
            • Monitoring of server-internal HDDs, PCIe SSDs, and RAID configurations also in agentless out-of-band operation
            • -
            • Support of local service display
            • -
            • Customer Self Service (CSS) concept
            • -
            • Secure data connections based on HTTPS/SSH
            • -
            • CIM support
            • -
            • Efficient graphical console redirection (AVR)
            • -
            • Video capturing
            • -
            • Virtual media
            • -
            -

            A license key is a code that activates certain functions or features of a software or hardware product. A license key is usually required for paid or premium products or services

            that require a license agreement between the vendor and the customer. A license key is usually a combination of letters and numbers that is entered during the installation or activation process of the product or service. A license key can be valid for a specific period of time, for a specific number of users, for a specific version of the product or service, or for other criteria. A license key can also be revoked or deactivated by the vendor if the customer violates the terms of the license agreement.

            -

            Recreasoft Personnal Software is a software developed specifically to meet the needs of management and control of your recreational space, efficiently and safely. It is designed for individuals or businesses that operate toy libraries, shopping mall recreations, kids spaces, playgrounds, hotels, day cares, workshops, school days, parties, events, summer camps, and more. It allows you to have a panoramic view of all sectors of your company, such as recreation, party, product sale, finance, marketing, invoice, etc. It also helps you improve your revenue, time management, employee management, billing and invoicing, event management, registration management, check-in/check-out, and more.

            -

            Recreasoft Personnal Software can be used with Fujitsu Irmc to monitor and control your recreational space remotely and securely. By connecting the software to Fujitsu Irmc, you can access your server from anywhere and perform various tasks such as viewing reports, managing reservations, sending invoices, updating inventory, creating promotions, etc. You can also use the features of Fujitsu Irmc to power on/off your server, reboot it, update it, configure it, troubleshoot it, etc.

            -

            In order to use Fujitsu Irmc with Recreasoft Personnal Software, you need to have a Fujitsu Irmc license key. A Fujitsu Irmc license key is a code that activates the advanced features of Fujitsu Irmc such as graphical console redirection (AVR), video capturing, virtual media, etc. These features are essential for using Fujitsu Irmc with Recreasoft Personnal Software effectively and efficiently. Without a Fujitsu Irmc license key, you will only be able to use the basic features of Fujitsu Irmc such as power management, monitoring, alerting, etc.

            -

            How to Get a Fujitsu Irmc License Key

            -

            There are two ways to get a Fujitsu Irmc license key: online licensing portal and remote management controller user's guide.

            -

            Online Licensing Portal

            -

            The online licensing portal is the easiest and fastest way to get a Fujitsu Irmc license key. You can access the portal from any web browser and enter your TAN (Transaction Authorization Number) that you received when you purchased your Fujitsu PRIMERGY server. The TAN is a 16-digit code that is printed on a sticker on the server or on the delivery note. You can also find your TAN in the ServerView Suite DVD that came with your server.

            -

            To get a Fujitsu Irmc license key from the online licensing portal, follow these steps:

            -

            -
              -
            1. Go to https://irmc.fujitsu.com/
            2. -
            3. Select your language and click "Next"
            4. -
            5. Enter your TAN and click "Next"
            6. -
            7. Select the type of license key you want (Standard or Advanced) and click "Next"
            8. -
            9. Enter your email address and click "Next"
            10. -
            11. Confirm your details and click "Finish"
            12. -
            13. You will receive an email with your license key within a few minutes
            14. -
            -

            Remote Management Controller User's Guide

            -

            The remote management controller user's guide is another way to get a Fujitsu Irmc license key. You can find the user's guide in the ServerView Suite DVD that came with your server or download it from https://support.ts.fujitsu.com/. The user's guide contains detailed instructions on how to activate your license key using the web interface of Fujitsu Irmc.

            -

            To get a Fujitsu Irmc license key from the remote management controller user's guide, follow these steps:

            -
              -
            1. Insert the ServerView Suite DVD into your computer or download the user's guide from https://support.ts.fujitsu.com/
            2. -
            3. Open the user's guide and go to the section "Activating License Keys"
            4. -
            5. Follow the instructions on how to access the web interface of Fujitsu Irmc and enter your TAN
            6. -
            7. Select the type of license key you want (Standard or Advanced) and click "Activate"
            8. -
            9. You will see a confirmation message and your license key will be displayed on the screen
            10. -
            11. You can also receive your license key by email if you enter your email address and click "Send"
            12. -
            -

            How to Use Fujitsu Irmc with Recreasoft Personnal Software

            -

            Now that you have your Fujitsu Irmc license key, you can use it with Recreasoft Personnal Software to monitor and control your recreational space. In this section, we will explain what Recreasoft Personnal Software is, how to install and configure it, and how to use it with Fujitsu Irmc.

            -

            What is Recreasoft Personnal Software?

            -

            Recreasoft Personnal Software is a software that allows you to manage and control your recreational space efficiently and safely. It is designed for individuals or businesses that operate toy libraries, shopping mall recreations, kids spaces, playgrounds, hotels, day cares, workshops, school days, parties, events, summer camps, and more. It helps you to have a panoramic view of all sectors of your company, such as recreation, party, product sale, finance, marketing, invoice, etc. It also helps you to improve your revenue, time management, employee management, billing and invoicing, event management, registration management, check-in/check-out, and more.

            -

            Some of the main features and benefits of Recreasoft Personnal Software are:

            -
              -
            • Easy to use and intuitive interface
            • -
            • Customizable and adaptable to your needs and preferences
            • -
            • Compatible with Windows, Mac OS X, Linux, Android, iOS, and web browsers
            • -
            • Secure and reliable data storage and backup
            • -
            • Multi-language and multi-currency support
            • -
            • Online and offline mode
            • -
            • Real-time synchronization and updates
            • -
            • Comprehensive reports and statistics
            • -
            • Email and SMS notifications
            • -
            • Customer loyalty program and feedback system
            • -
            • Online booking and payment system
            • -
            • Barcode scanner and printer integration
            • -
            • Fujitsu Irmc integration
            • -
            -

            Recreasoft Personnal Software can be used with Fujitsu Irmc to monitor and control your recreational space remotely and securely. By connecting the software to Fujitsu Irmc, you can access your server from anywhere and perform various tasks such as viewing reports, managing reservations, sending invoices, updating inventory, creating promotions, etc. You can also use the features of Fujitsu Irmc to power on/off your server, reboot it, update it, configure it, troubleshoot it, etc.

            -

            How to Install and Configure Recreasoft Personnal Software

            -

            To install and configure Recreasoft Personnal Software on your computer or device, you need to meet the following system requirements:

            - - - -
            Operating SystemMemoryDisk SpaceProcessorInternet Connection
            Windows 7 or higher
            Mac OS X 10.10 or higher
            Linux Ubuntu 16.04 or higher
            Android 4.4 or higher
            iOS 9.0 or higher
            Web browser (Chrome recommended)
            2 GB RAM or higher500 MB or higherDual core or higherBroadband or higher (for online mode)
            -

            To install and configure Recreasoft Personnal Software on your computer or device, follow these steps:

            -
              -
            1. Go to https://www.recreasoft.com/ and click on "Download" or "Get App"
            2. -
            3. Select the version that matches your operating system or device and click on "Download" or "Install"
            4. -
            5. Follow the instructions on the screen to complete the installation process
            6. -
            7. Launch the software and enter your email address and password to create an account or log in if you already have one
            8. -
            9. Select the language and currency that you want to use for the software
            10. -
            11. Enter the information about your company such as name, address, phone number, email, website, logo, etc.
            12. -
            13. Enter the information about your recreational space such as name, location, capacity, opening hours, services, prices, etc.
            14. -
            15. Connect the software to Fujitsu Irmc by entering the IP address, username, and password of your server
            16. -
            17. Enter your Fujitsu Irmc license key to activate the advanced features of Fujitsu Irmc
            18. -
            19. Configure the settings and preferences of the software and Fujitsu Irmc according to your needs and requirements
            20. -
            21. Save your changes and exit the software
            22. -
            -

            How to Monitor and Control Your Recreational Space with Recreasoft Personnal Software and Fujitsu Irmc

            -

            Once you have installed and configured Recreasoft Personnal Software and Fujitsu Irmc on your computer or device, you can use them to monitor and control your recreational space remotely and securely. You can access the software and Fujitsu Irmc from any web browser or app and perform various tasks such as:

            -
              -
            • Viewing reports and statistics on your revenue, expenses, customers, employees, inventory, etc.
            • -
            • Managing reservations and bookings for your recreational space and events
            • -
            • Sending invoices and receipts to your customers by email or SMS
            • -
            • Updating inventory and stock levels of your products and services
            • -
            • Creating promotions and discounts for your customers and loyalty program members
            • -
            • Powering on/off your server, rebooting it, updating it, configuring it, troubleshooting it, etc.
            • -
            • Monitoring the status and performance of your server, such as temperature, fan speed, power consumption, etc.
            • -
            • Using graphical console redirection (AVR) to view the screen of your server remotely
            • -
            • Using video capturing to record the screen of your server remotely
            • -
            • Using virtual media to mount ISO images or USB devices to your server remotely
            • -
            -

            To monitor and control your recreational space with Recreasoft Personnal Software and Fujitsu Irmc, follow these steps:

            -
              -
            1. Launch the software or app on your computer or device or go to https://www.recreasoft.com/
            2. -
            3. Log in with your email address and password
            4. -
            5. Select the recreational space that you want to monitor and control from the list of available spaces
            6. -
            7. Select the function or feature that you want to use from the menu bar or the dashboard
            8. -
            9. Follow the instructions on the screen to complete the task or action
            10. -
            11. Log out when you are done
            12. -
            -

            Conclusion

            -

            In this article, we have explained what Fujitsu Irmc is, what are its features and benefits, what is a license key and why do you need it, what is Recreasoft Personnal Software and how does it relate to Fujitsu Irmc, how to get a Fujitsu Irmc license key, how to use Fujitsu Irmc with Recreasoft Personnal Software, and answered some frequently asked questions. We hope that this article has helped you understand how Fujitsu Irmc and Recreasoft Personnal Software can help you improve your business performance and customer satisfaction.

            -

            If you are interested in trying out Fujitsu Irmc and Recreasoft Personnal Software for yourself, you can download them from their respective websites or contact their customer support for more information. You can also request a free trial or a demo to see how they work in action. You will be amazed by how easy and convenient it is to monitor and control your recreational space with these two powerful tools.

            -

            Don't wait any longer. Get your Fujitsu Irmc license key today and start using it with Recreasoft Personnal Software. You will not regret it!

            -

            FAQs

            -

            Here are some of the most common questions and answers about Fujitsu Irmc and Recreasoft Personnal Software:

            -

            Q: How much does a Fujitsu Irmc license key cost?

            -

            A: A Fujitsu Irmc license key costs $99 for a standard license key or $199 for an advanced license key. You can purchase a license key from https://irmc.fujitsu.com/ or from an authorized reseller.

            -

            Q: How long does a Fujitsu Irmc license key last?

            -

            A: A Fujitsu Irmc license key lasts for the lifetime of the server that it is activated on. You can transfer a license key to another server if you replace or upgrade your server. You can also deactivate a license key if you no longer need it or want to use it on another server.

            -

            Q: How many Fujitsu Irmc license keys do I need?

            -

            A: You need one Fujitsu Irmc license key per server that you want to use with Recreasoft Personnal Software. You can use the same license key for multiple servers if they are in the same network and have the same TAN.

            -

            Q: How do I update my Fujitsu Irmc firmware?

            -

            A: You can update your Fujitsu Irmc firmware by downloading the latest version from https://support.ts.fujitsu.com/ and uploading it to your server via the web interface of Fujitsu Irmc. You can also use the ServerView Update Manager to update your Fujitsu Irmc firmware automatically.

            -

            Q: How do I contact Recreasoft Personnal Software customer support?

            -

            A: You can contact Recreasoft Personnal Software customer support by email at support@recreasoft.com or by phone at +33 (0)1 76 54 00 54. You can also visit their website at https://www.recreasoft.com/ and fill out the contact form or chat with an agent online.

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Html5 Jquery Audio Player Pro Nulled And Void.md b/spaces/stomexserde/gpt4-ui/Examples/Html5 Jquery Audio Player Pro Nulled And Void.md deleted file mode 100644 index d1ebf88323930313e0ddccdc6e22d5b5c35bc16d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Html5 Jquery Audio Player Pro Nulled And Void.md +++ /dev/null @@ -1,35 +0,0 @@ - -

            How to Use HTML5 jQuery Audio Player Pro Nulled and Void for Your Website

            - -

            If you are looking for a responsive and customizable audio player for your website, you might have come across HTML5 jQuery Audio Player Pro. This plugin claims to offer a top-notch audio player that works on all major browsers and mobile devices, with any color scheme and design. It also boasts a unique wave bar feature that shows the highs and lows of the audio track and allows users to skip to any time.

            - -

            However, you might also have noticed that this plugin is not free. It costs $29 for a regular license and $99 for an extended license on CodeCanyon. If you are on a tight budget, you might be tempted to look for a nulled and void version of this plugin, which means a cracked or hacked version that bypasses the license verification.

            -

            html5 jquery audio player pro nulled and void


            Download File ››› https://urlgoal.com/2uIbI5



            - -

            But is this a good idea? What are the risks and drawbacks of using HTML5 jQuery Audio Player Pro nulled and void? And are there any better alternatives? In this article, we will answer these questions and help you make an informed decision.

            - -

            The Dangers of Using HTML5 jQuery Audio Player Pro Nulled and Void

            - -

            Using nulled and void plugins may seem like a quick and easy way to save money, but it comes with many risks and disadvantages. Here are some of them:

            - -
              -
            • Security issues: Nulled and void plugins may contain malicious code that can harm your website or your visitors. For example, they may inject malware, viruses, spam links, or backdoors that can compromise your site's security and performance. They may also steal your personal or financial information or redirect your traffic to other sites.
            • -
            • Lack of updates: Nulled and void plugins do not receive any updates from the original developers. This means that they may become outdated, incompatible, or buggy over time. They may also miss out on new features, improvements, or bug fixes that the original plugin offers. This can affect your site's functionality and user experience.
            • -
            • No support: Nulled and void plugins do not come with any support from the original developers. This means that if you encounter any problems or issues with the plugin, you are on your own. You cannot contact the developers for help or guidance. You may also have trouble finding reliable documentation or tutorials for the plugin.
            • -
            • Legal issues: Nulled and void plugins are illegal and unethical. They violate the intellectual property rights of the original developers and the terms of service of CodeCanyon. By using them, you are exposing yourself to potential legal actions or penalties from the developers or CodeCanyon. You may also damage your reputation as a website owner or developer.
            • -
            - -

            As you can see, using HTML5 jQuery Audio Player Pro nulled and void is not worth the risk. You may end up spending more time, money, and energy fixing the problems caused by the nulled plugin than buying the original one.

            - -

            The Benefits of Using HTML5 jQuery Audio Player Pro Original Version

            - -

            If you want to use HTML5 jQuery Audio Player Pro for your website, we recommend that you buy the original version from CodeCanyon. Here are some of the benefits of doing so:

            -

            - -
              -
            • Security assurance: The original version of HTML5 jQuery Audio Player Pro is safe and secure. It does not contain any malicious code or hidden threats that can harm your website or your visitors. It also follows the best coding practices and standards for WordPress plugins.
            • -
            • Regular updates: The original version of HTML5 jQuery Audio Player Pro receives regular updates from the developers. This means that it stays up-to-date, compatible, and bug-free with the latest WordPress versions and technologies. It also gets new features, improvements, or bug fixes that enhance its functionality and user experience.
            • -
            • Professional support: The original version of HTML5 jQuery Audio Player Pro comes with professional support from the developers. This means that if you have any questions or issues with the plugin, you can contact them for help or guidance. They will respond to your queries promptly and courteously. You can also access their detailed documentation and tutorials for the plugin.
            • -
            • Legal compliance: The original version of HTML5 jQuery

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/style0427/anime-remove-background/README.md b/spaces/style0427/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/style0427/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sub314xxl/MetaGPT/metagpt/utils/common.py b/spaces/sub314xxl/MetaGPT/metagpt/utils/common.py deleted file mode 100644 index 791bb2767697d5082e414e1dd0e32cdecff81729..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/utils/common.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/4/29 16:07 -@Author : alexanderwu -@File : common.py -@Modified By: mashenquan, 2023-8-17, add `initalize_enviroment()` to load `config/config.yaml` to `os.environ` -""" -import ast -import contextlib -import inspect -import os -import re -from pathlib import Path -from typing import List, Tuple - -import yaml - -from metagpt.logs import logger - - -def check_cmd_exists(command) -> int: - """ 检查命令是否存在 - :param command: 待检查的命令 - :return: 如果命令存在,返回0,如果不存在,返回非0 - """ - check_command = 'command -v ' + command + ' >/dev/null 2>&1 || { echo >&2 "no mermaid"; exit 1; }' - result = os.system(check_command) - return result - - -class OutputParser: - - @classmethod - def parse_blocks(cls, text: str): - # 首先根据"##"将文本分割成不同的block - blocks = text.split("##") - - # 创建一个字典,用于存储每个block的标题和内容 - block_dict = {} - - # 遍历所有的block - for block in blocks: - # 如果block不为空,则继续处理 - if block.strip() != "": - # 将block的标题和内容分开,并分别去掉前后的空白字符 - block_title, block_content = block.split("\n", 1) - # LLM可能出错,在这里做一下修正 - if block_title[-1] == ":": - block_title = block_title[:-1] - block_dict[block_title.strip()] = block_content.strip() - - return block_dict - - @classmethod - def parse_code(cls, text: str, lang: str = "") -> str: - pattern = rf'```{lang}.*?\s+(.*?)```' - match = re.search(pattern, text, re.DOTALL) - if match: - code = match.group(1) - else: - raise Exception - return code - - @classmethod - def parse_str(cls, text: str): - text = text.split("=")[-1] - text = text.strip().strip("'").strip("\"") - return text - - @classmethod - def parse_file_list(cls, text: str) -> list[str]: - # Regular expression pattern to find the tasks list. - pattern = r'\s*(.*=.*)?(\[.*\])' - - # Extract tasks list string using regex. - match = re.search(pattern, text, re.DOTALL) - if match: - tasks_list_str = match.group(2) - - # Convert string representation of list to a Python list using ast.literal_eval. - tasks = ast.literal_eval(tasks_list_str) - else: - tasks = text.split("\n") - return tasks - - @staticmethod - def parse_python_code(text: str) -> str: - for pattern in ( - r'(.*?```python.*?\s+)?(?P.*)(```.*?)', - r'(.*?```python.*?\s+)?(?P.*)', - ): - match = re.search(pattern, text, re.DOTALL) - if not match: - continue - code = match.group("code") - if not code: - continue - with contextlib.suppress(Exception): - ast.parse(code) - return code - raise ValueError("Invalid python code") - - @classmethod - def parse_data(cls, data): - block_dict = cls.parse_blocks(data) - parsed_data = {} - for block, content in block_dict.items(): - # 尝试去除code标记 - try: - content = cls.parse_code(text=content) - except Exception: - pass - - # 尝试解析list - try: - content = cls.parse_file_list(text=content) - except Exception: - pass - parsed_data[block] = content - return parsed_data - - @classmethod - def parse_data_with_mapping(cls, data, mapping): - block_dict = cls.parse_blocks(data) - parsed_data = {} - for block, content in block_dict.items(): - # 尝试去除code标记 - try: - content = cls.parse_code(text=content) - except Exception: - pass - typing_define = mapping.get(block, None) - if isinstance(typing_define, tuple): - typing = typing_define[0] - else: - typing = typing_define - if typing == List[str] or typing == List[Tuple[str, str]]: - # 尝试解析list - try: - content = cls.parse_file_list(text=content) - except Exception: - pass - # TODO: 多余的引号去除有风险,后期再解决 - # elif typing == str: - # # 尝试去除多余的引号 - # try: - # content = cls.parse_str(text=content) - # except Exception: - # pass - parsed_data[block] = content - return parsed_data - - -class CodeParser: - - @classmethod - def parse_block(cls, block: str, text: str) -> str: - blocks = cls.parse_blocks(text) - for k, v in blocks.items(): - if block in k: - return v - return "" - - @classmethod - def parse_blocks(cls, text: str): - # 首先根据"##"将文本分割成不同的block - blocks = text.split("##") - - # 创建一个字典,用于存储每个block的标题和内容 - block_dict = {} - - # 遍历所有的block - for block in blocks: - # 如果block不为空,则继续处理 - if block.strip() != "": - # 将block的标题和内容分开,并分别去掉前后的空白字符 - block_title, block_content = block.split("\n", 1) - block_dict[block_title.strip()] = block_content.strip() - - return block_dict - - @classmethod - def parse_code(cls, block: str, text: str, lang: str = "") -> str: - if block: - text = cls.parse_block(block, text) - pattern = rf'```{lang}.*?\s+(.*?)```' - match = re.search(pattern, text, re.DOTALL) - if match: - code = match.group(1) - else: - logger.error(f"{pattern} not match following text:") - logger.error(text) - raise Exception - return code - - @classmethod - def parse_str(cls, block: str, text: str, lang: str = ""): - code = cls.parse_code(block, text, lang) - code = code.split("=")[-1] - code = code.strip().strip("'").strip("\"") - return code - - @classmethod - def parse_file_list(cls, block: str, text: str, lang: str = "") -> list[str]: - # Regular expression pattern to find the tasks list. - code = cls.parse_code(block, text, lang) - # print(code) - pattern = r'\s*(.*=.*)?(\[.*\])' - - # Extract tasks list string using regex. - match = re.search(pattern, code, re.DOTALL) - if match: - tasks_list_str = match.group(2) - - # Convert string representation of list to a Python list using ast.literal_eval. - tasks = ast.literal_eval(tasks_list_str) - else: - raise Exception - return tasks - - -class NoMoneyException(Exception): - """Raised when the operation cannot be completed due to insufficient funds""" - - def __init__(self, amount, message="Insufficient funds"): - self.amount = amount - self.message = message - super().__init__(self.message) - - def __str__(self): - return f'{self.message} -> Amount required: {self.amount}' - - -def print_members(module, indent=0): - """ - https://stackoverflow.com/questions/1796180/how-can-i-get-a-list-of-all-classes-within-current-module-in-python - :param module: - :param indent: - :return: - """ - prefix = ' ' * indent - for name, obj in inspect.getmembers(module): - print(name, obj) - if inspect.isclass(obj): - print(f'{prefix}Class: {name}') - # print the methods within the class - if name in ['__class__', '__base__']: - continue - print_members(obj, indent + 2) - elif inspect.isfunction(obj): - print(f'{prefix}Function: {name}') - elif inspect.ismethod(obj): - print(f'{prefix}Method: {name}') - - -def parse_recipient(text): - pattern = r"## Send To:\s*([A-Za-z]+)\s*?" # hard code for now - recipient = re.search(pattern, text) - return recipient.group(1) if recipient else "" - diff --git a/spaces/sub314xxl/MusicGen/tests/modules/test_seanet.py b/spaces/sub314xxl/MusicGen/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MusicGen/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Elasto Mania V.1.11a Full Version Extra Pack 419 Levels !NEW!.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Elasto Mania V.1.11a Full Version Extra Pack 419 Levels !NEW!.md deleted file mode 100644 index f15a440125f1fb2b317698012ec1650b68decbcb..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Elasto Mania V.1.11a Full Version Extra Pack 419 Levels !NEW!.md +++ /dev/null @@ -1,149 +0,0 @@ - -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels: A Guide for Beginners and Fans

              -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels is a game that will challenge your skills and creativity. It is a 2D motorbike simulation game that was released in 2000 as a successor to Action SuperCross. The game features realistic physics, challenging levels, and a huge community of fans who have created tons of new levels and graphics for the game.

              -

              In this article, we will tell you everything you need to know about Elasto Mania V.1.11a Full Version Extra Pack 419 Levels. We will explain what the game is, how to download and play it, how to create and share your own levels, and how to join the online community of players. We will also give you some tips and tricks on how to master the game and have fun.

              -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels


              Download Ziphttps://cinurl.com/2uEYyJ



              -

              What is Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels is a game that combines racing, puzzle-solving, and stunts. The goal of the game is to drive your motorbike through various levels and collect all the apples and touch all the flowers before reaching the exit. Sounds easy, right? Well, not quite.

              -

              The game is not just about speed, but also about balance, precision, and timing. You have to control your motorbike's acceleration, braking, rotation, and suspension with the arrow keys on your keyboard. You have to avoid crashing into walls, spikes, gravity fields, or falling off the platforms. You have to use your imagination and creativity to find the best way to complete each level.

              -

              The game has two modes: single player and multiplayer. In single player mode, you can play through the original 54 levels that come with the game, or any of the extra levels that you can download from the internet. You can also create your own levels using the built-in level editor or any of the external tools available online.

              -

              In multiplayer mode, you can play online with other players from around the world. You can join or host a server and choose from various modes such as battle, flag tag, apple harvest, or team play. You can also chat with other players and share your levels and replays.

              -

              How to download and play Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              To download and play Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you need to follow these steps:

              -
                -
              1. Go to the official website of Elasto Mania or any other online platform that offers this game.
              2. -
              3. Click on the download link or button and choose your operating system - Windows or Android.
              4. -
              5. Wait for the download to complete and then install the game on your device.
              6. -
              7. Open the game and enter your name and email address to register yourself as a user.
              8. -
              9. Select your preferred language, graphics quality, sound volume, etc.
              10. -
              11. Click on start game and choose single player or multiplayer mode.
              12. -
              13. Select a level pack from the list or browse your computer for any extra level packs that you have downloaded.
              14. -
              15. Select a level from the pack and click on play.
              16. -
              17. Use the arrow keys on your keyboard to control your motorbike and complete the level.
              18. -
              19. Press Esc to pause or quit the game.
              20. -
              - -

              You can also play this game online without downloading it. Just go to any website that offers this service and follow the same steps as above.

              - -

              How to create and share your own levels for Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              If you want to create and share your own levels for Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you need to follow these steps:

              -
                -
              1. Open the game and click on level editor.
              2. -
              3. Select a template from the list or create a new one from scratch.
              4. -
              5. Use the mouse and keyboard to draw the terrain, place objects, set gravity fields, etc.
              6. -
              7. Save your level as a .lev file in your computer.
              8. -
              9. Test your level by playing it in single player mode or uploading it to a server in multiplayer mode.
              10. -
              11. Share your level with other players by uploading it to any of the online platforms that host Elasto Mania levels such as Moposite or Elma Online.
              12. -
              - -

              You can also use external tools such as Elma Editor or SLE to create more advanced levels with custom graphics, sounds, scripts, etc.

              -

              - -

              How to join the online community of Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              If you want to join the online community of Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you need to follow these steps:

              -
                -
              1. Go to any of the websites that host Elasto Mania servers such as Elma Online or Battle Server List.
              2. -
              3. Select a server from the list that suits your preferences such as location, mode, level pack, etc.
              4. -
              5. Click on join server and enter your name and password if required.
              6. -
              7. Play with other players on the server and chat with them using text or voice messages.
              8. -
              9. Follow the rules of each server and respect other players.
              10. -
              - -

              You can also join various forums, groups, chats, or social media platforms where Elasto Mania fans gather such as Mopolauta or Discord Server.

              - -

              Tips and tricks on how to master Elasto Mania V.1.11a Full Version Extra Pack 419 Levels

              - -

              To master Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you should keep these tips and tricks in mind:

              - -
                - -
              • Practice makes perfect. The more you play, the more you learn about how your motorbike behaves in different situations and how to overcome various obstacles.
              • - -
              • Watch replays of other players who have completed each level faster or better than you. You can learn from their techniques and strategies and improve your own skills.
              • - -
              • Experiment with different ways of completing each level. Sometimes there are hidden shortcuts or alternative routes that can save you time or make things easier for you.
              • - -
              • Use different keys for different actions such as accelerating, braking, rotating left or right, suspending left or right wheels etc. You can customize them in the options menu according to your preference.
              • - -
              • Use gravity fields wisely. They can help you gain speed or change direction but they can also slow you down or make you lose control if used incorrectly.
              • - -
              - -

              Conclusion

              - -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels is a game that will test your skills and creativity in a fun and addictive way. It is a 2D motorbike simulation game that features realistic physics, challenging levels, and a huge community of fans who have created tons of new levels and graphics for the game.

              -

              You can download it from their official website or use it online without downloading it. You can also create your own levels using the built-in level editor or any of the external tools available online. You can also play online with other players from around the world in various modes such as battle, flag tag, apple harvest etc.

              -

              If you follow these tips and tricks, you will surely master this game and have fun playing it.

              -

              We hope this article has helped you understand more about this game and how to play it effectively.

              -

              If you have any questions or feedback about this article or this game,

              -

              please feel free to contact us.

              -

              We would love to hear from you.

              -

              Thank you for reading!

              -

              How to update Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              If you want to update Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you need to follow these steps:

              -
                -
              1. Go to the official website of Elasto Mania or any other online platform that offers this game.
              2. -
              3. Click on the update link or button and choose your operating system - Windows or Android.
              4. -
              5. Wait for the download to complete and then install the update on your device.
              6. -
              7. Open the game and enjoy the new features and improvements.
              8. -
              - -

              The latest update for this game is version 1.2, which is an unofficial patch that adds many new features and fixes many bugs. Some of the new features include:

              -
                -
              • A new menu system that allows you to access all the options and modes easily.
              • -
              • A new replay system that allows you to save, load, edit, and share your replays.
              • -
              • A new level browser that allows you to search, sort, and filter levels by various criteria.
              • -
              • A new online system that allows you to play online with other players without using external programs.
              • -
              • A new statistics system that allows you to track your progress and achievements in the game.
              • -
              • A new graphics engine that improves the performance and quality of the game.
              • -
              - -

              How to get more levels for Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              If you want to get more levels for Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you need to follow these steps:

              -
                -
              1. Go to any of the websites that host Elasto Mania levels such as Moposite or Elma Online.
              2. -
              3. Select a level pack from the list or use the search function to find a level pack that suits your preferences such as difficulty, style, theme, etc.
              4. -
              5. Click on download and save the level pack as a .lev file in your computer.
              6. -
              7. Open the game and click on level editor.
              8. -
              9. Click on import and browse your computer for the level pack that you have downloaded.
              10. -
              11. Select the levels that you want to import and click on ok.
              12. -
              13. Save your level pack as a .lev file in your computer.
              14. -
              15. Play the levels in single player mode or upload them to a server in multiplayer mode.
              16. -
              - -

              You can also create your own levels using the built-in level editor or any of the external tools available online.

              - -

              How to get more graphics for Elasto Mania V.1.11a Full Version Extra Pack 419 Levels?

              -

              If you want to get more graphics for Elasto Mania V.1.11a Full Version Extra Pack 419 Levels, you need to follow these steps:

              -
                -
              1. Go to any of the websites that host Elasto Mania graphics such as Moposite or Elma Online.
              2. -
              3. Select a graphics pack from the list or use the search function to find a graphics pack that suits your preferences such as style, theme, quality, etc.
              4. -
              5. Click on download and save the graphics pack as a .zip file in your computer.
              6. -
              7. Open the game and click on options.
              8. -
              9. Click on graphics and browse your computer for the graphics pack that you have downloaded.
              10. -
              11. Select the graphics that you want to use and click on ok.
              12. -
              - -

              You can also create your own graphics using any of the external tools available online such as Paint.NET or GIMP.

              - -

              Conclusion

              - -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels is a game that will test your skills and creativity in a fun and addictive way. It is a 2D motorbike simulation game that features realistic physics, challenging levels, and a huge community of fans who have created tons of new levels and graphics for the game.

              -

              You can download it from their official website or use it online without downloading it. You can also create your own levels and graphics using the built-in level editor or any of the external tools available online. You can also play online with other players from around the world in various modes such as battle, flag tag, apple harvest etc.

              -

              If you follow these tips and tricks, you will surely master this game and have fun playing it.

              -

              We hope this article has helped you understand more about this game and how to play it effectively.

              -

              If you have any questions or feedback about this article or this game,

              -

              please feel free to contact us.

              -

              We would love to hear from you.

              -

              Thank you for reading!

              -

              Conclusion

              -

              Elasto Mania V.1.11a Full Version Extra Pack 419 Levels is a game that will challenge your skills and creativity. It is a 2D motorbike simulation game that was released in 2000 as a successor to Action SuperCross. The game features realistic physics, challenging levels, and a huge community of fans who have created tons of new levels and graphics for the game.

              -

              You can download it from their official website or use it online without downloading it. You can also create your own levels and graphics using the built-in level editor or any of the external tools available online. You can also play online with other players from around the world in various modes such as battle, flag tag, apple harvest etc.

              -

              If you follow these tips and tricks, you will surely master this game and have fun playing it.

              -

              We hope this article has helped you understand more about this game and how to play it effectively.

              -

              If you have any questions or feedback about this article or this game,

              -

              please feel free to contact us.

              -

              We would love to hear from you.

              -

              Thank you for reading!

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Atr72500fsxcrackinstall [PATCHED].md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Atr72500fsxcrackinstall [PATCHED].md deleted file mode 100644 index 23612271d68effa844a9974a84e1a6331019ebba..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Atr72500fsxcrackinstall [PATCHED].md +++ /dev/null @@ -1,9 +0,0 @@ -

              atr72500fsxcrackinstall


              DOWNLOAD ☆☆☆☆☆ https://urluss.com/2uCFrR



              - -atr72500fsxcrackinstall - Stata 10 SE MP keygen (Windows) 64 bit ... Keygen AutoCAD 2018 portable - Dmitrirender V4 Beta (uncracked) .... ... More RiderCRACK ... 1 - RiderCRACK for Stata v10+ for Windows ... -T-Desktop is a personal desktop software that allows you to run all your most frequently used programs and ..... -In the Cracker.exe file name, there should be a "CRACKER.EXE" or "CRACKER.CMD" with the states that have been generated by the keygen. .... -Autocad 2018 portable - Dmitrir 8a78ff9644
              -
              -
              -

              diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/default_runtime.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/default_runtime.py deleted file mode 100644 index b564cc4e7e7d9a67dacaaddecb100e4d8f5c005b..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/default_runtime.py +++ /dev/null @@ -1,14 +0,0 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook', by_epoch=False), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True diff --git a/spaces/szzzzz/toxic_detection/README.md b/spaces/szzzzz/toxic_detection/README.md deleted file mode 100644 index 95a07ac89769f0c6cf20aae51022e19a60157244..0000000000000000000000000000000000000000 --- a/spaces/szzzzz/toxic_detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Toxic Detection -emoji: 💻 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/t110-ai-admin/InspectLens/README.md b/spaces/t110-ai-admin/InspectLens/README.md deleted file mode 100644 index 87a74280ea20e702b61e8a61d02959c80411903b..0000000000000000000000000000000000000000 --- a/spaces/t110-ai-admin/InspectLens/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: InspectLens -emoji: 🔍 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: other -duplicated_from: DAMO-NLP-SG/Video-LLaMA ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - diff --git a/spaces/t110-ai-admin/InspectLens/video_llama/datasets/datasets/video_instruct_dataset.py b/spaces/t110-ai-admin/InspectLens/video_llama/datasets/datasets/video_instruct_dataset.py deleted file mode 100644 index 7de6e20d30d9b0d7280d706636e9849b7f02618c..0000000000000000000000000000000000000000 --- a/spaces/t110-ai-admin/InspectLens/video_llama/datasets/datasets/video_instruct_dataset.py +++ /dev/null @@ -1,253 +0,0 @@ -import os -from video_llama.datasets.datasets.base_dataset import BaseDataset -from video_llama.datasets.datasets.caption_datasets import CaptionDataset -import pandas as pd -import decord -from decord import VideoReader -import random -import torch -from torch.utils.data.dataloader import default_collate -from PIL import Image -from typing import Dict, Optional, Sequence -import transformers -import pathlib -import json -from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer -import copy -from video_llama.processors import transforms_video,AlproVideoTrainProcessor -from torchvision import transforms -from video_llama.processors.video_processor import ToTHWC,ToUint8,load_video -from video_llama.conversation.conversation_video import Conversation,SeparatorStyle - -DEFAULT_IMAGE_PATCH_TOKEN = '' -video_conversation = Conversation( - system="", - roles=("Human", "Assistant"), - messages=[], - offset=0, - sep_style=SeparatorStyle.SINGLE, - sep="###", -) -IGNORE_INDEX = -100 - -class Video_Instruct_Dataset(BaseDataset): - def __init__(self, vis_processor, text_processor, vis_root, ann_root,num_video_query_token=32,tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/',data_type = 'video'): - """ - vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/) - ann_root (string): Root directory of video (e.g. webvid_eval/annotations/) - split (string): val or test - """ - super().__init__(vis_processor=vis_processor, text_processor=text_processor) - - data_path = pathlib.Path(ann_root) - with data_path.open(encoding='utf-8') as f: - self.annotation = json.load(f) - - self.num_video_query_token = num_video_query_token - self.vis_root = vis_root - self.resize_size = 224 - self.num_frm = 8 - self.tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name, use_fast=False) - self.tokenizer.pad_token = self.tokenizer.eos_token - self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) - self.IMAGE_PATCH_TOKEN_ID = self.tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN] - - self.transform = AlproVideoTrainProcessor( - image_size=self.resize_size, n_frms = self.num_frm - ).transform - self.data_type = data_type - - def _get_video_path(self, sample): - rel_video_fp = sample['video'] - full_video_fp = os.path.join(self.vis_root, rel_video_fp) - return full_video_fp - - def __getitem__(self, index): - num_retries = 10 # skip error videos - for _ in range(num_retries): - try: - sample = self.annotation[index] - - video_path = self._get_video_path(sample) - conversation_list = sample['QA'] - - video, msg = load_video( - video_path=video_path, - n_frms=self.num_frm, - height=self.resize_size, - width=self.resize_size, - sampling ="uniform", return_msg = True - ) - video = self.transform(video) - if 'cn' in self.data_type: - msg = "" - # 添加视频,以及msg到convsation list 0 - sources = preprocess_multimodal(copy.deepcopy(conversation_list), None, cur_token_len=self.num_video_query_token,msg = msg) - new_sources = convert_source_vicuna_format(sources) - - data_dict = preprocess( - new_sources, - self.tokenizer) - data_dict = dict(input_ids=data_dict["input_ids"][0], - labels=data_dict["labels"][0]) - # image exist in the data - data_dict['image'] = video - except: - print(f"Failed to load examples with video: {video_path}. " - f"Will randomly sample an example as a replacement.") - index = random.randint(0, len(self) - 1) - continue - break - else: - raise RuntimeError(f"Failed to fetch video after {num_retries} retries.") - # "image_id" is kept to stay compatible with the COCO evaluation format - return { - "image": video, - "text_input": data_dict["input_ids"], - "labels": data_dict["labels"], - "type":'video', - } - - def __len__(self): - return len(self.annotation) - - def collater(self, instances): - input_ids, labels = tuple([instance[key] for instance in instances] - for key in ("text_input", "labels")) - input_ids = torch.nn.utils.rnn.pad_sequence( - input_ids, - batch_first=True, - padding_value=self.tokenizer.pad_token_id) - labels = torch.nn.utils.rnn.pad_sequence(labels, - batch_first=True, - padding_value=IGNORE_INDEX) - batch = dict( - input_ids=input_ids, - labels=labels, - attention_mask=input_ids.ne(self.tokenizer.pad_token_id), - ) - - if 'image' in instances[0]: - images = [instance['image'] for instance in instances] - if all(x is not None and x.shape == images[0].shape for x in images): - batch['images'] = torch.stack(images) - else: - batch['images'] = images - batch['conv_type'] = 'multi' - return batch - -def convert_source_vicuna_format(sources): - new_sources = [] - for source in sources: - new_source = [] - for i, sentence in enumerate(source): - role_0_msg = sentence['q'] - role_1_msg = sentence['a'] - new_source.append({ - 'from':'human', - 'value': role_0_msg, - }) - new_source.append({ - 'from':'gpt', - 'value': role_1_msg, - }) - new_sources.append(new_source) - return new_sources - -def preprocess_multimodal( - conversation_list: Sequence[str], - multimodal_cfg: dict, - cur_token_len: int, - msg='' -) -> Dict: - # 将conversational list中 - is_multimodal = True - # image_token_len = multimodal_cfg['image_token_len'] - image_token_len = cur_token_len - conversation_list[0]["q"] = " " + msg + conversation_list[0]["q"] - return [conversation_list] - -def _add_speaker_and_signal(header, source, get_conversation=True): - """Add speaker and start/end signal on each round.""" - BEGIN_SIGNAL = "###" - END_SIGNAL = "\n" - conversation = header - for sentence in source: - from_str = sentence["from"] - if from_str.lower() == "human": - from_str = video_conversation.roles[0] - elif from_str.lower() == "gpt": - from_str = video_conversation.roles[1] - else: - from_str = 'unknown' - sentence["value"] = (BEGIN_SIGNAL + from_str + ": " + - sentence["value"] + END_SIGNAL) - if get_conversation: - conversation += sentence["value"] - conversation += BEGIN_SIGNAL - return conversation - -def _tokenize_fn(strings: Sequence[str], - tokenizer: transformers.PreTrainedTokenizer) -> Dict: - """Tokenize a list of strings.""" - tokenized_list = [ - tokenizer( - text, - return_tensors="pt", - padding="longest", - max_length=512, - truncation=True, - ) for text in strings - ] - input_ids = labels = [ - tokenized.input_ids[0] for tokenized in tokenized_list - ] - input_ids_lens = labels_lens = [ - tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() - for tokenized in tokenized_list - ] - return dict( - input_ids=input_ids, - labels=labels, - input_ids_lens=input_ids_lens, - labels_lens=labels_lens, - ) - -def preprocess( - sources: Sequence[str], - tokenizer: transformers.PreTrainedTokenizer, -) -> Dict: - """ - Given a list of sources, each is a conversation list. This transform: - 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; - 2. Concatenate conversations together; - 3. Tokenize the concatenated conversation; - 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. - """ - # add end signal and concatenate together - conversations = [] - for source in sources: - header = f"{video_conversation.system}\n\n" - conversation = _add_speaker_and_signal(header, source) - conversations.append(conversation) - # tokenize conversations - conversations_tokenized = _tokenize_fn(conversations, tokenizer) - input_ids = conversations_tokenized["input_ids"] - targets = copy.deepcopy(input_ids) - for target, source in zip(targets, sources): - tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], - tokenizer)["input_ids_lens"] - speakers = [sentence["from"] for sentence in source] - _mask_targets(target, tokenized_lens, speakers) - - return dict(input_ids=input_ids, labels=targets) - -def _mask_targets(target, tokenized_lens, speakers): - # cur_idx = 0 - cur_idx = tokenized_lens[0] - tokenized_lens = tokenized_lens[1:] - target[:cur_idx] = IGNORE_INDEX - for tokenized_len, speaker in zip(tokenized_lens, speakers): - if speaker == "human": - target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX - cur_idx += tokenized_len diff --git a/spaces/taneemishere/html-code-generation-from-images-with-deep-neural-networks/classes/model/autoencoder_image.py b/spaces/taneemishere/html-code-generation-from-images-with-deep-neural-networks/classes/model/autoencoder_image.py deleted file mode 100644 index f4ddc426c2abee8a4e10d5a2b0b6e69e50df3ee0..0000000000000000000000000000000000000000 --- a/spaces/taneemishere/html-code-generation-from-images-with-deep-neural-networks/classes/model/autoencoder_image.py +++ /dev/null @@ -1,59 +0,0 @@ -__author__ = 'Taneem Jan, improved the old model through pretrained Auto-encoders' - -from keras.layers import Input, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D -from keras.models import Model -from .Config import * -from .AModel import * - - -class autoencoder_image(AModel): - def __init__(self, input_shape, output_size, output_path): - AModel.__init__(self, input_shape, output_size, output_path) - self.name = 'autoencoder' - - input_image = Input(shape=input_shape) - encoder = Conv2D(32, 3, padding='same', activation='relu')(input_image) - encoder = Conv2D(32, 3, padding='same', activation='relu')(encoder) - encoder = MaxPooling2D()(encoder) - encoder = Dropout(0.25)(encoder) - - encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder) - encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder) - encoder = MaxPooling2D()(encoder) - encoder = Dropout(0.25)(encoder) - - encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder) - encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder) - encoder = MaxPooling2D()(encoder) - encoded = Dropout(0.25, name='encoded_layer')(encoder) - - decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(encoded) - decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(decoder) - decoder = UpSampling2D()(decoder) - decoder = Dropout(0.25)(decoder) - - decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder) - decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder) - decoder = UpSampling2D()(decoder) - decoder = Dropout(0.25)(decoder) - - decoder = Conv2DTranspose(32, 3, padding='same', activation='relu')(decoder) - decoder = Conv2DTranspose(3, 3, padding='same', activation='relu')(decoder) - decoder = UpSampling2D()(decoder) - decoded = Dropout(0.25)(decoder) - - # decoder = Dense(256*256*3)(decoder) - # decoded = Reshape(target_shape=input_shape)(decoder) - - self.model = Model(input_image, decoded) - self.model.compile(optimizer='adadelta', loss='binary_crossentropy') - - # self.model.summary() - - def fit_generator(self, generator, steps_per_epoch): - self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1) - self.save() - - def predict_hidden(self, images): - hidden_layer_model = Model(inputs=self.input, outputs=self.get_layer('encoded_layer').output) - return hidden_layer_model.predict(images) diff --git a/spaces/tanishqvashisht/colorizeAnime/train.py b/spaces/tanishqvashisht/colorizeAnime/train.py deleted file mode 100644 index 84b3f93d4cb53dec4720bad677e146f61b6929ab..0000000000000000000000000000000000000000 --- a/spaces/tanishqvashisht/colorizeAnime/train.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -from utils import save_checkpoint, load_checkpoint, save_some_examples -import torch.nn as nn -import torch.optim as optim -import config -from dataset import MapDataset -from generator_model import Generator -from discriminator_model import Discriminator -from torch.utils.data import DataLoader -from tqdm import tqdm -from torchvision.utils import save_image - -torch.backends.cudnn.benchmark = True - - -def train_fn( - disc, gen, loader, opt_disc, opt_gen, l1_loss, bce, g_scaler, d_scaler, -): - loop = tqdm(loader, leave=True) - - for idx, (x, y) in enumerate(loop): - x = x.to(config.DEVICE) - y = y.to(config.DEVICE) - - # Train Discriminator - with torch.cuda.amp.autocast(): - y_fake = gen(x) - D_real = disc(x, y) - D_real_loss = bce(D_real, torch.ones_like(D_real)) - D_fake = disc(x, y_fake.detach()) - D_fake_loss = bce(D_fake, torch.zeros_like(D_fake)) - D_loss = (D_real_loss + D_fake_loss) / 2 - - disc.zero_grad() - d_scaler.scale(D_loss).backward() - d_scaler.step(opt_disc) - d_scaler.update() - - # Train generator - with torch.cuda.amp.autocast(): - D_fake = disc(x, y_fake) - G_fake_loss = bce(D_fake, torch.ones_like(D_fake)) - L1 = l1_loss(y_fake, y) * config.L1_LAMBDA - G_loss = G_fake_loss + L1 - - opt_gen.zero_grad() - g_scaler.scale(G_loss).backward() - g_scaler.step(opt_gen) - g_scaler.update() - - if idx % 10 == 0: - loop.set_postfix( - D_real=torch.sigmoid(D_real).mean().item(), - D_fake=torch.sigmoid(D_fake).mean().item(), - ) - - -def main(): - disc = Discriminator(in_channels=3).to(config.DEVICE) - gen = Generator(in_channels=3, features=64).to(config.DEVICE) - opt_disc = optim.Adam(disc.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999), ) - opt_gen = optim.Adam(gen.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999)) - BCE = nn.BCEWithLogitsLoss() - L1_LOSS = nn.L1Loss() - - if config.LOAD_MODEL: - load_checkpoint( - config.CHECKPOINT_GEN, gen, opt_gen, config.LEARNING_RATE, - ) - load_checkpoint( - config.CHECKPOINT_DISC, disc, opt_disc, config.LEARNING_RATE, - ) - - train_dataset = MapDataset(root_dir=config.TRAIN_DIR) - train_loader = DataLoader( - train_dataset, - batch_size=config.BATCH_SIZE, - shuffle=True, - num_workers=config.NUM_WORKERS, - ) - g_scaler = torch.cuda.amp.GradScaler() - d_scaler = torch.cuda.amp.GradScaler() - val_dataset = MapDataset(root_dir=config.VAL_DIR) - val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False) - - input_data = MapDataset(root_dir="input") - input_loader = DataLoader(input_data, batch_size=1, shuffle=False) - - for epoch in range(config.NUM_EPOCHS): - # train_fn( - # disc, gen, train_loader, opt_disc, opt_gen, L1_LOSS, BCE, g_scaler, d_scaler, - # ) - # - # if config.SAVE_MODEL and epoch % 5 == 0: - # save_checkpoint(gen, opt_gen, filename=config.CHECKPOINT_GEN) - # save_checkpoint(disc, opt_disc, filename=config.CHECKPOINT_DISC) - - save_some_examples(gen, input_loader, epoch, folder="evaluation") - - -if __name__ == "__main__": - main() diff --git a/spaces/team-language-detector/LanguageDetector/app.py b/spaces/team-language-detector/LanguageDetector/app.py deleted file mode 100644 index 99f6873709e9e1ecdcd0dcd167bc398a43019cc6..0000000000000000000000000000000000000000 --- a/spaces/team-language-detector/LanguageDetector/app.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Gradio app to showcase the language detector.""" - -import gradio as gr -from transformers import pipeline - - -# Get transformer model and set up a pipeline -model_ckpt = "papluca/xlm-roberta-base-language-detection" -pipe = pipeline("text-classification", model=model_ckpt) - - -def predict(text: str) -> dict: - """Compute predictions for text.""" - preds = pipe(text, return_all_scores=True, truncation=True, max_length=128) - if preds: - pred = preds[0] - return {p["label"]: float(p["score"]) for p in pred} - else: - return None - - -title = "Language detection with XLM-RoBERTa" -description = "Determine the language in which your text is written." -examples = [ - ["Better late than never."], - ["Tutto è bene ciò che finisce bene."], - ["Donde hay humo, hay fuego."], -] -explanation = "Supported languages are (20): arabic (ar), bulgarian (bg), german (de), modern greek (el), english (en), spanish (es), french (fr), hindi (hi), italian (it), japanese (ja), dutch (nl), polish (pl), portuguese (pt), russian (ru), swahili (sw), thai (th), turkish (tr), urdu (ur), vietnamese (vi), and chinese (zh)." - -app = gr.Interface( - fn=predict, - inputs=gr.inputs.Textbox( - placeholder="What's the text you want to know the language for?", - label="Text", - lines=3, - ), - outputs=gr.outputs.Label(num_top_classes=3, label="Your text is written in "), - title=title, - description=description, - examples=examples, - article=explanation, -) - -app.launch() diff --git a/spaces/terfces0erbo/CollegeProjectV2/Among Trees Free Download PC Game.md b/spaces/terfces0erbo/CollegeProjectV2/Among Trees Free Download PC Game.md deleted file mode 100644 index da5752def65961a65350dd2d2596cf11ecd0615f..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Among Trees Free Download PC Game.md +++ /dev/null @@ -1,11 +0,0 @@ -
              -

              among trees is a free game from the free game category. it was added to our catalog of games on march, 16 2016 and since then it was played 2,037 times. you can also find other free games like among trees - episode 1 : ténèbres and beyond sky. download free game among trees now and play it for free. if you like our game, be sure to share it with your friends!

              -

              among trees is a free game where you play as a tree who must travel through dangerous and challenging forest levels in order to find the way home. this is a unique adventure with minimal controls and a simple, intuitive concept.

              -

              Among Trees Free Download PC Game


              Download Zip ✸✸✸ https://bytlly.com/2uGlSh



              -

              the story begins with a tree called king who is slowly drowning in a forest pond. after he feels a sudden pain in his leg, he discovers that his tree friend has been eaten by a wolf. king eventually finds the rest of the trees who are out of water in the forest. after king explains to them his pain, the trees agree to help him get home. they all find their way out of the forest but king is not sure if he can make it home or not.

              -

              the game features various levels to play and collect coins, gems, and other items. the number of coins you collect determines how fast the trees can move through the forest. the gems can be used to buy new trees and items.

              -

              the game has a unique visual aesthetic. it was designed by a renowned art director called john crispin who also worked on the game brothers. the game has an amazing soundtrack composed by the famous composer danny baranowsky.

              -

              you are downloading the game "among trees free download" for free. enjoy this free game for a while and if you like it, you can register it. in case you already played this game, you can write a review. thanks!

              -

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Baixar Al Casillas Pdf Em Portugues ((FREE)).md b/spaces/terfces0erbo/CollegeProjectV2/Baixar Al Casillas Pdf Em Portugues ((FREE)).md deleted file mode 100644 index d3f973c1411dbc8c549c52b5cccc8e936e787a71..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Baixar Al Casillas Pdf Em Portugues ((FREE)).md +++ /dev/null @@ -1,6 +0,0 @@ -

              Baixar Al Casillas Pdf Em Portugues


              Download File ✑ ✑ ✑ https://bytlly.com/2uGkgU



              - -Veja grátis o arquivo A. L. Casillas enviado para a disciplina de Elementos de Máquinas I Categoria: Outro - 17202330. 4d29de3e1b
              -
              -
              -

              diff --git a/spaces/terfces0erbo/CollegeProjectV2/Embrilliance Serial Number Crack Adobeinstmank 2021.md b/spaces/terfces0erbo/CollegeProjectV2/Embrilliance Serial Number Crack Adobeinstmank 2021.md deleted file mode 100644 index 4603115eceb1fd4addcd6c4d384159ce8ae31e7c..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Embrilliance Serial Number Crack Adobeinstmank 2021.md +++ /dev/null @@ -1,76 +0,0 @@ - -

              Embrilliance Serial Number Crack Adobeinstmank: How to Get Embrilliance Embroidery Software for Free

              -

              If you are looking for a way to get Embrilliance embroidery software for free, you might have come across the term "embrilliance serial number crack adobeinstmank". This is a combination of words that refers to a method of cracking the serial number of Embrilliance software and using it without paying for it. In this article, we will explain what Embrilliance software is, why you might want to crack it, how to crack it, and what are the risks and consequences of doing so.

              -

              What is Embrilliance software?

              -

              Embrilliance software is a suite of embroidery programs that allow you to create, edit, and digitize embroidery designs. It consists of several products, such as Embrilliance Essentials, Embrilliance Thumbnailer, Embrilliance StitchArtist, Embrilliance AlphaTricks, Embrilliance Enthusiast, and more. Each product has different features and functions that cater to different levels of embroidery skills and needs.

              -

              embrilliance serial number crack adobeinstmank


              DOWNLOAD · https://bytlly.com/2uGkUC



              -

              Embrilliance software is compatible with both Mac and PC computers, and supports most embroidery machine formats. It also integrates with popular online services, such as Dropbox, Google Drive, iCloud Drive, and more. You can use Embrilliance software to create your own designs from scratch, import designs from other sources, add lettering and monograms, customize colors and stitches, convert images to embroidery files, and more.

              -

              Why would you want to crack Embrilliance software?

              -

              Embrilliance software is not free. You have to purchase a license for each product you want to use. The prices range from $44.95 for Embrilliance Thumbnailer to $649 for Embrilliance StitchArtist Level 3. If you want to use multiple products, you have to pay more. For example, if you want to use Embrilliance Essentials and Embrilliance StitchArtist Level 1, you have to pay $298 in total.

              -

              Some people might find these prices too expensive or unreasonable for their budget or needs. They might think that paying hundreds of dollars for software to use a machine that is already overpriced is not worth it. They might also think that they can get the same or better results with other cheaper or free alternatives. Therefore, they might look for ways to get Embrilliance software for free by cracking its serial number.

              -

              How to crack Embrilliance software?

              -

              A serial number is a unique code that identifies a specific copy of a software product. It is used to activate the software and verify its authenticity. When you purchase Embrilliance software, you receive a serial number via email that you have to enter in the software to use it.

              -

              To crack Embrilliance software means to bypass or modify its serial number verification process and use it without paying for it. There are different methods of doing this, such as using key generators, patchers, loaders, or modified files. These methods are usually distributed through websites or forums that offer pirated software or cracks.

              -

              One of these websites or forums might use the term "embrilliance serial number crack adobeinstmank" as a keyword or a title to attract people who are looking for Embrilliance cracks. The term "adobeinstmank" is probably a random word that has no meaning but is used to make the keyword more unique and less detectable by search engines or anti-piracy measures.

              -

              To crack Embrilliance software using one of these methods, you might have to follow these steps:

              -

              -
                -
              1. Download the crack file or program from the website or forum.
              2. -
              3. Disable your antivirus or firewall software, as they might detect the crack as malware or virus.
              4. -
              5. Extract or run the crack file or program on your computer.
              6. -
              7. Follow the instructions on the screen or in the readme file.
              8. -
              9. Launch Embrilliance software and enter the generated or modified serial number.
              10. -
              11. Enjoy using Embrilliance software for free.
              12. -
              -

              What are the risks and consequences of cracking Embrilliance software?

              -

              Cracking Embrilliance software might seem like an easy and convenient way to get it for free, but it comes with many risks and consequences that you should be aware of before doing it. Some of them are:

              -
                -
              • You might download malware or virus along with the crack file or program. This could harm your computer or compromise your personal data and privacy.
              • -
              • You might face legal issues or penalties for violating the intellectual property rights of Embrilliance software developers. This could result in fines or lawsuits.
              • -
              • You might lose access to updates or support from Embrilliance software developers. This could affect the performance or compatibility of your software with your machine or operating system.
              • -
              • You might experience bugs or errors in your software that could ruin your designs or damage your machine.
              • -
              • You might lose your reputation or credibility as an embroidery professional or hobbyist if you use cracked software.
              • -
              -

              Conclusion

              -

              Embrilliance serial number crack adobeinstmank is a term that refers to a method of cracking the serial number of Embrilliance embroidery software and using it without paying for it. While this might seem like a tempting option for some people who want to save money or try different features, it is not recommended or ethical to do so. Cracking Embrilliance software involves many risks and consequences that could outweigh its benefits. It is better to purchase a license for Embrilliance software from its official website and enjoy its full functionality and quality without any worries.

              -

              What are the alternatives to cracking Embrilliance software?

              -

              If you are looking for a way to get Embrilliance software for free or for a lower price, cracking its serial number is not the only option. There are some alternatives that you can consider, such as:

              -
                -
              • Using the free trial version of Embrilliance software. You can download and use Embrilliance software for 30 days without paying anything. You can access all the features and functions of the software and create your own designs. However, you will not be able to save or export your designs unless you purchase a license.
              • -
              • Using the free or open source embroidery software. There are some free or open source programs that you can use to create, edit, or digitize embroidery designs. Some examples are Ink/Stitch, Thred, SophieSew, Embroidermodder, and more. However, these programs might not have the same quality or functionality as Embrilliance software, and they might not support all the embroidery machine formats.
              • -
              • Using the online embroidery services. There are some online platforms that offer embroidery services, such as digitizing, editing, converting, or stitching your designs. Some examples are EmbroideryDesigns.com, DigitizingNinja.com, StitchAmerica.com, and more. However, these services might charge you a fee per design or per stitch count, and they might not guarantee the quality or satisfaction of your designs.
              • -
              -

              What are the best practices for using Embrilliance software?

              -

              If you decide to purchase a license for Embrilliance software and use it legally and ethically, there are some best practices that you can follow to make the most of it. Some of them are:

              -
                -
              • Keep your serial number safe and secure. Do not share it with anyone or post it online. If you lose it or delete it permanently, you might not be able to use your software again.
              • -
              • Update your software regularly. Embrilliance software developers release updates and patches that fix bugs, improve performance, and add new features. You can check for updates from within the software or from its official website.
              • -
              • Learn from the tutorials and resources. Embrilliance software has a user manual and a help menu that explain how to use its features and functions. You can also access tutorials and resources from its official website, blog, YouTube channel, community portal, and online courses.
              • -
              • Ask for help or support. If you have any questions or issues with your software, you can contact the Embrilliance support team via email or phone. You can also ask for help or advice from other Embrilliance users on the community portal or other forums.
              • -
              -

              Conclusion

              -

              Embrilliance serial number crack adobeinstmank is a term that refers to a method of cracking the serial number of Embrilliance embroidery software and using it without paying for it. While this might seem like a tempting option for some people who want to save money or try different features, it is not recommended or ethical to do so. Cracking Embrilliance software involves many risks and consequences that could outweigh its benefits. It is better to purchase a license for Embrilliance software from its official website and enjoy its full functionality and quality without any worries.

              -

              What are the features of Embrilliance software?

              -

              Embrilliance software has many features that make it a versatile and powerful embroidery software. Some of these features are:

              -
                -
              • Embrilliance Essentials: This is the basic product that allows you to add lettering and monograms, merge designs, resize and recolor designs, convert formats, and more.
              • -
              • Embrilliance Thumbnailer: This is a product that allows you to view your embroidery designs as thumbnails in your computer folders, without opening them in the software.
              • -
              • Embrilliance StitchArtist: This is a product that allows you to create your own designs from scratch, using vector drawing tools and stitch types.
              • -
              • Embrilliance AlphaTricks: This is a product that allows you to use any alphabet or font as an embroidery design, and map them to keyboard keys.
              • -
              • Embrilliance Enthusiast: This is a product that allows you to edit your designs with advanced tools, such as stitch editing, splitting, carving, knockdown, and more.
              • -
              -

              What are the benefits of using Embrilliance software?

              -

              Using Embrilliance software has many benefits that make it a worthwhile investment for any embroidery enthusiast or professional. Some of these benefits are:

              -
                -
              • You can create beautiful and unique embroidery designs with ease and creativity.
              • -
              • You can save time and money by using one software for multiple platforms and machines.
              • -
              • You can improve your skills and knowledge by learning from the tutorials and resources.
              • -
              • You can get support and help from the Embrilliance team and community.
              • -
              • You can enjoy the satisfaction and pride of using a legal and ethical software.
              • -
              -

              Conclusion

              -

              Embrilliance serial number crack adobeinstmank is a term that refers to a method of cracking the serial number of Embrilliance embroidery software and using it without paying for it. While this might seem like a tempting option for some people who want to save money or try different features, it is not recommended or ethical to do so. Cracking Embrilliance software involves many risks and consequences that could outweigh its benefits. It is better to purchase a license for Embrilliance software from its official website and enjoy its full functionality and quality without any worries.

              -

              Conclusion

              -

              Embrilliance serial number crack adobeinstmank is a term that refers to a method of cracking the serial number of Embrilliance embroidery software and using it without paying for it. While this might seem like a tempting option for some people who want to save money or try different features, it is not recommended or ethical to do so. Cracking Embrilliance software involves many risks and consequences that could outweigh its benefits. It is better to purchase a license for Embrilliance software from its official website and enjoy its full functionality and quality without any worries.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/text-generation-inference/chat-ui/src/app.d.ts b/spaces/text-generation-inference/chat-ui/src/app.d.ts deleted file mode 100644 index f59b884c51ed3c31fc0738fd38d0d75b580df5e4..0000000000000000000000000000000000000000 --- a/spaces/text-generation-inference/chat-ui/src/app.d.ts +++ /dev/null @@ -1,12 +0,0 @@ -// See https://kit.svelte.dev/docs/types#app -// for information about these interfaces -declare global { - namespace App { - // interface Error {} - // interface Locals {} - // interface PageData {} - // interface Platform {} - } -} - -export {}; diff --git a/spaces/thejagstudio/procom/amazon/__init__.py b/spaces/thejagstudio/procom/amazon/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/themanas021/fake-news-gradio/app.py b/spaces/themanas021/fake-news-gradio/app.py deleted file mode 100644 index 93571d63e4c26c972bbecbc8fbd143d78384838f..0000000000000000000000000000000000000000 --- a/spaces/themanas021/fake-news-gradio/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import pickle -import nltk -from nltk.stem.porter import PorterStemmer -from nltk.corpus import stopwords -import re -import gradio as gr - -nltk.download("stopwords") -port_stem = PorterStemmer() -# Load the pre-trained model pipeline -with open('pipeline_model.pkl', 'rb') as model_file: - model_pipeline = pickle.load(model_file) - -# Set up Streamlit page layout - -# Define function for text stemming -def stemming(content): - stemmed_content = re.sub('[^a-zA-Z]',' ',content) - stemmed_content = stemmed_content.lower() - stemmed_content = stemmed_content.split() - stemmed_content = [port_stem.stem(word) for word in stemmed_content if not word in stopwords.words('english')] - stemmed_content = ' '.join(stemmed_content) - return stemmed_content - -app_inputs = gr.inputs.Textbox(lines=2, placeholder="Enter title here...") - -def pre(app_inputs): - stemmed_input = stemming(app_inputs) - - # Make prediction using the model pipeline - prediction = model_pipeline.predict([stemmed_input]) - prediction_prob = model_pipeline.predict_proba([stemmed_input]) - - if prediction == 1: - fake_prob_percent = prediction_prob[0][1] * 100 # Probability of class 1 (Fake) in percentage - return f"Fake News Detected! The probability is: {fake_prob_percent:.2f}%" - else: - legit_prob_percent = prediction_prob[0][0] * 100 # Probability of class 0 (Legit) in percentage - return f"News is Legit! The probability is: {legit_prob_percent:.2f}%" - - - - - - - -interface = gr.Interface(fn=pre, - inputs=app_inputs, - outputs='text', - title='Sup, I\'m Detector Babe') - -interface.launch() diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/ANNO1602GoldEditionFULLISO [TOP].md b/spaces/tialenAdioni/chat-gpt-api/logs/ANNO1602GoldEditionFULLISO [TOP].md deleted file mode 100644 index 3a80e364468a6d66fea9f68de86d75c44112f59b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/ANNO1602GoldEditionFULLISO [TOP].md +++ /dev/null @@ -1,17 +0,0 @@ -
              -

              Anno 1602 Gold Edition: A Classic City-Building Game with Bonus Content

              -

              Anno 1602 Gold Edition is a re-release of the original Anno 1602 game, which was launched in 1998 and became one of the most successful city-building games of all time. The game combines elements of strategy, simulation, and exploration, as you create and manage a flourishing colony in the 17th century.

              -

              The Gold Edition includes the original game and its expansion pack, New Islands, New Adventures, which adds new scenarios, buildings, and features. It also includes bonus content such as the Anno 1602 soundtrack, wallpaper, and a company logo for Anno 1800, the latest installment in the series.

              -

              ANNO1602GoldEditionFULLISO


              Downloadhttps://urlcod.com/2uK3UJ



              -

              Anno 1602 Gold Edition is part of the Anno History Collection, which remasters four classic Anno games for modern systems. The collection features improved graphics, multiplayer functionality, and compatibility with Windows 10. You can buy Anno 1602 Gold Edition separately or as part of the collection from the Ubisoft Store[^1^].

              -

              If you are a fan of city-building games or historical simulations, you might enjoy Anno 1602 Gold Edition. The game has a high replay value, as you can choose from different maps, difficulty levels, and game modes. You can also customize your colony with various buildings, resources, and trade routes. The game has a positive reception from critics and players alike, who praise its addictive gameplay, charming graphics, and rich atmosphere[^2^]. However, some aspects of the game might feel outdated or clunky compared to newer titles in the genre.

              -

              Anno 1602 Gold Edition is a classic game that deserves a place in your library if you appreciate the history and evolution of city-building games. It offers hours of fun and challenge as you build your own empire in the New World.

              -

              - -

              How to play Anno 1602 Gold Edition

              -

              Anno 1602 Gold Edition is a game that requires both strategic thinking and careful management. You start the game by choosing a map and a faction, and then you set sail to find a suitable island to settle. You need to build houses for your settlers, farms for food, and workshops for goods. You also need to balance your budget, as you have to pay taxes and wages to your people.

              -

              As you progress in the game, you can unlock new buildings, technologies, and resources. You can also explore other islands, trade with other factions, and engage in diplomacy or warfare. The game has different scenarios and objectives that challenge your skills and creativity. You can also play in sandbox mode, where you can build your colony without any restrictions.

              -

              Anno 1602 Gold Edition has a simple and intuitive interface that lets you control your colony with ease. You can use the mouse to select and place buildings, adjust production levels, and access menus. You can also use keyboard shortcuts for faster actions. The game has a tutorial that guides you through the basics of the game, as well as a manual that explains the game mechanics in detail.

              -

              Anno 1602 Gold Edition is a game that can appeal to both casual and hardcore gamers. It has a relaxing and immersive atmosphere that lets you enjoy the beauty and diversity of the New World. It also has a deep and complex gameplay that challenges your strategic and managerial skills. Whether you want to build a peaceful utopia or a powerful empire, Anno 1602 Gold Edition gives you the freedom and tools to do so.

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/GTA San Andreas CD Crack PC Download and Install the No-CD Patch.md b/spaces/tialenAdioni/chat-gpt-api/logs/GTA San Andreas CD Crack PC Download and Install the No-CD Patch.md deleted file mode 100644 index 6980a910fdacc88dda32de59ea6899a38d63b82b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/GTA San Andreas CD Crack PC Download and Install the No-CD Patch.md +++ /dev/null @@ -1,167 +0,0 @@ - -

              GTA San Andreas CD Crack PC: How to Download and Play GTA San Andreas Without CD

              - -

              GTA San Andreas is one of the most popular and iconic games in the Grand Theft Auto franchise. Released in 2004, this game features a huge open world, a rich story, and a variety of missions and activities. However, if you want to play GTA San Andreas on your PC, you may encounter a problem: you need to have the original CD of the game to run it.

              - -

              Fortunately, there is a solution: you can download and install a GTA San Andreas CD crack for PC. A CD crack is a modified version of the game executable that bypasses the CD check and allows you to play the game without inserting the CD. This way, you can enjoy GTA San Andreas on your PC without having to worry about losing or damaging your CD.

              -

              gta san andreas cd crack pc


              Download Zip ►►► https://urlcod.com/2uK4L2



              - -

              In this article, we will show you how to download and install a GTA San Andreas CD crack for PC. We will also explain the advantages and disadvantages of using a CD crack, and the alternatives that you can consider. Follow these steps to play GTA San Andreas without CD:

              - -

              Step 1: Download GTA San Andreas Full PC Game + Crack

              - -

              The first step is to download GTA San Andreas full PC game + crack. You can find many websites that offer this download, but you need to be careful and choose a reliable and safe source. Some websites may contain viruses, malware, or fake files that can harm your computer or steal your personal information.

              - -

              One of the websites that we recommend is YASIR252.com. This website provides GTA San Andreas full PC game + crack for free, with a fast and easy download process. You can also find other PC games and software on this website.

              - -

              To download GTA San Andreas full PC game + crack from YASIR252.com, follow these steps:

              - -
                -
              • Go to https://www.yasir252.com/en/pc-games/gta-san-andreas-full/
              • -
              • Scroll down to the bottom of the page and click on the download link that matches your preferred server.
              • -
              • Wait for the download to finish. The file size is 3.6 GB, so it may take some time depending on your internet speed.
              • -
              • Extract the gtasanfurr.rar file with Winrar 5.71 or any other software that can extract RAR files.
              • -
              • Enter the GTA SA folder that you extracted.
              • -
              - -

              Step 2: Install GTA San Andreas Full Crack PC Game

              - -

              The next step is to install GTA San Andreas full crack PC game. You don't need to install the game in the conventional way, as the crack version already contains all the necessary files and settings. You just need to run the gta sa.exe file to play the game.

              - -

              To install GTA San Andreas full crack PC game, follow these steps:

              - -
                -
              • Run the gta sa.exe file that is located in the GTA SA folder that you extracted.
              • -
              • A graphic card selection and playing resolution will appear. Choose the options that suit your preferences and system requirements.
              • -
              • Click OK to start playing GTA San Andreas without CD.
              • -
              - -

              Advantages and Disadvantages of Using GTA San Andreas CD Crack for PC

              - -

              Using a GTA San Andreas CD crack for PC has some advantages and disadvantages that you should be aware of before deciding to use it. Here are some of them:

              - -

              Advantages

              - -
                -
              • You can play GTA San Andreas on your PC without having to insert the CD every time.
              • -
              • You can save your CD from getting lost, damaged, or scratched.
              • -
              • You can download and install GTA San Andreas full PC game + crack for free from various websites.
              • -
              • You can enjoy all the features and content of GTA San Andreas without any limitations or restrictions.
              • -
              - -

              Disadvantages

              - -
                -
              • You may encounter some compatibility or performance issues when playing GTA San Andreas with a CD crack, especially if your system does not meet the minimum requirements or if you have other software running in the background.
              • -
              • You may not be able to access some online features or updates of GTA San Andreas when using a CD crack, as it may not be compatible with the official servers or patches.
              • -
              • You may violate some copyright laws or terms of service when using a CD crack, as it may be considered as piracy or illegal distribution of software.
              • -
              • You may expose your computer or personal information to some risks when downloading or installing a CD crack from untrusted sources, as they may contain viruses, malware, or fake files that can harm your computer or steal your personal information.
              • -
              - -

              Alternatives to Using GTA San Andreas CD Crack for PC

              - -

              If you are not comfortable or satisfied with using a GTA San Andreas CD crack for PC, you can consider some alternatives that can allow you to play GTA San Andreas on your PC without CD. Here are some of them:

              -

              gta san andreas no cd patch download
              -gta san andreas crack file free download for pc
              -gta san andreas hoodlum crack download
              -how to play gta san andreas without cd on pc
              -gta san andreas cd key generator
              -gta san andreas crack only download
              -gta san andreas no cd crack windows 10
              -gta san andreas cd fix
              -gta san andreas crack 1.0 us
              -gta san andreas no cd exe
              -gta san andreas crack skidrow
              -gta san andreas cd rom error fix
              -gta san andreas no cd dvd
              -gta san andreas crack razor1911
              -gta san andreas cd key steam
              -gta san andreas crack reloaded
              -gta san andreas cd cover
              -gta san andreas no cd patch windows 7
              -gta san andreas crack 2.0 download
              -gta san andreas no cd crack gamecopyworld
              -gta san andreas crack torrent download
              -gta san andreas cd price
              -gta san andreas no cd patch mac
              -gta san andreas crack online play
              -gta san andreas cd key free
              -gta san andreas crack 3.0 download
              -gta san andreas cd image download
              -gta san andreas no cd patch linux
              -gta san andreas crack multiplayer download
              -gta san andreas cd key changer
              -gta san andreas crack 4.0 download
              -gta san andreas cd label
              -gta san andreas no cd patch android
              -gta san andreas crack lan play
              -gta san andreas cd key buy
              -gta san andreas crack 5.0 download
              -gta san andreas cd case mod
              -gta san andreas no cd patch ios
              -gta san andreas crack co-op mode
              -gta san andreas cd key activation code
              -gta san andreas crack 6.0 download
              -gta san andreas cd insert error fix
              -how to install gta san andreas without cd on pc
              -how to get a free copy of GTA San Andreas on PC with CD Key?

              - -
                -
              • Buy a digital copy of GTA San Andreas from an official platform like Steam, Rockstar Games Launcher, or Microsoft Store. This way, you can download and play GTA San Andreas on your PC without needing a CD or a crack. You can also enjoy some benefits like online features, updates, achievements, cloud saves, etc.
              • -
              • Use a virtual drive software like Daemon Tools or PowerISO to create a virtual image of your GTA San Andreas CD on your computer. This way, you can mount the image file and play GTA San Andreas on your PC without inserting the actual CD. You will still need to have the original CD of the game for this method.
              • -
              • Use a backup software like Alcohol 120% or CloneCD to create a backup copy of your GTA San Andreas CD on another disc or USB drive. This way, you can use the backup copy instead of the original CD to play GTA San Andreas on your PC. You will still need to have the original CD of the game for this method.
              • -
              - -

              Conclusion

              - -

              GTA San Andreas is one of the best games in the Grand Theft Auto franchise, and you can play it on your PC without needing a CD by using a GTA San Andreas CD crack for PC. However, you should also be aware of the advantages and disadvantages of using a CD crack, and consider some alternatives that may suit your preferences better. We hope this article has helped you learn how to download and install a GTA San Andreas CD crack for PC.

              -

              How to Fix GTA San Andreas CD Crack PC Problems

              - -

              While using a GTA San Andreas CD crack for PC can be convenient and fun, it can also cause some problems that can affect your gaming experience. Some of these problems include:

              - -
                -
              • The game may crash or freeze randomly.
              • -
              • The game may not run properly or display some graphical glitches.
              • -
              • The game may not recognize your save files or settings.
              • -
              • The game may not work with some mods or trainers.
              • -
              • The game may not be compatible with some antivirus or firewall software.
              • -
              - -

              If you encounter any of these problems when using a GTA San Andreas CD crack for PC, you can try some of these solutions to fix them:

              - -
                -
              • Update your drivers and DirectX to the latest versions.
              • -
              • Run the game as administrator and in compatibility mode for Windows XP SP3.
              • -
              • Disable any background programs or processes that may interfere with the game.
              • -
              • Verify the integrity of the game files and repair any corrupted or missing files.
              • -
              • Reinstall the game and the CD crack if necessary.
              • -
              • Contact the CD crack developer or the website where you downloaded it for support.
              • -
              - -

              Is GTA San Andreas CD Crack PC Legal and Safe?

              - -

              One of the most common questions that people ask when using a GTA San Andreas CD crack for PC is whether it is legal and safe. The answer to this question is not simple, as it depends on various factors such as your location, your source, and your intention.

              - -

              In general, using a GTA San Andreas CD crack for PC is considered illegal, as it violates the copyright laws and the terms of service of Rockstar Games, the developer and publisher of GTA San Andreas. By using a CD crack, you are bypassing the copy protection system and distributing or accessing unauthorized copies of the software. This can result in legal consequences such as fines, lawsuits, or even jail time.

              - -

              However, some people may argue that using a GTA San Andreas CD crack for PC is legal under certain circumstances, such as:

              - -
                -
              • You own a legitimate copy of GTA San Andreas and you use the CD crack for personal use only.
              • -
              • You use the CD crack to make a backup copy of your GTA San Andreas CD in case it gets lost, damaged, or scratched.
              • -
              • You use the CD crack to play GTA San Andreas on a different device or platform than the original one.
              • -
              • You use the CD crack to play GTA San Andreas in a region where it is not officially available or supported.
              • -
              - -

              However, these arguments are not universally accepted or recognized by law, and they may not protect you from legal action if you are caught using a GTA San Andreas CD crack for PC. Therefore, it is advisable to consult a lawyer or an expert before using a CD crack to avoid any legal issues.

              - -

              As for safety, using a GTA San Andreas CD crack for PC can also pose some risks for your computer and your personal information. As mentioned earlier, some websites that offer CD cracks may contain viruses, malware, or fake files that can harm your computer or steal your personal information. Some CD cracks may also contain hidden features or codes that can damage your system or compromise your security. Therefore, it is advisable to use a reliable antivirus and firewall software when downloading or installing a CD crack to avoid any malicious attacks.

              - -

              Conclusion

              - -

              GTA San Andreas is one of the best games in the Grand Theft Auto franchise, and you can play it on your PC without needing a CD by using a GTA San Andreas CD crack for PC. However, you should also be aware of the advantages and disadvantages of using a CD crack, and consider some alternatives that may suit your preferences better. We hope this article has helped you learn how to download and install a GTA San Andreas CD crack for PC.

              -

              Conclusion

              - -

              GTA San Andreas is one of the best games in the Grand Theft Auto franchise, and you can play it on your PC without needing a CD by using a GTA San Andreas CD crack for PC. However, you should also be aware of the advantages and disadvantages of using a CD crack, and consider some alternatives that may suit your preferences better. We hope this article has helped you learn how to download and install a GTA San Andreas CD crack for PC.

              679dcb208e
              -
              -
              \ No newline at end of file diff --git a/spaces/timmostone/stabilityai-stable-diffusion-2/app.py b/spaces/timmostone/stabilityai-stable-diffusion-2/app.py deleted file mode 100644 index d2782cea00b1bfcd22df7c204d9e52a6baf46ac2..0000000000000000000000000000000000000000 --- a/spaces/timmostone/stabilityai-stable-diffusion-2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2").launch() \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Manusmriti In Telugu Pdf !FREE!.md b/spaces/tioseFevbu/cartoon-converter/scripts/Manusmriti In Telugu Pdf !FREE!.md deleted file mode 100644 index 900d6958cc6008d7f7c8b425e65e55eecd2384e6..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Manusmriti In Telugu Pdf !FREE!.md +++ /dev/null @@ -1,32 +0,0 @@ -
              -

              Manusmriti in Telugu PDF: A Guide to the Ancient Hindu Law Book

              - -

              Manusmriti, also known as Manu Dharma Shastra or Manava Dharma Shastra, is one of the oldest and most influential Hindu law books. It contains the teachings of Manu, the first human and lawgiver, as revealed by the god Brahma. It covers various topics such as social duties, moral conduct, marriage, inheritance, punishments, purification, and salvation.

              - -

              Manusmriti was written in Sanskrit between 2nd and 3rd century CE. It has 12 chapters and 2,684 verses. It was translated into Telugu by Nallamandighal Lakshminarasimhacharya in 2000. The Telugu version of Manusmriti is available for free download from various online sources.

              -

              manusmriti in telugu pdf


              DOWNLOAD ===> https://urlcod.com/2uHvsK



              - -

              In this article, we will provide a brief overview of Manusmriti and its relevance for modern Hindus. We will also share some links where you can download Manusmriti in Telugu PDF format.

              - -

              What is Manusmriti?

              - -

              Manusmriti is a Hindu law book that codifies the rules and regulations for various aspects of human life. It is based on the concept of varna (social class) and ashrama (stage of life). It prescribes different duties and rights for different groups of people, such as Brahmins (priests), Kshatriyas (warriors), Vaishyas (merchants), Shudras (servants), and Dalits (outcasts).

              - -

              Manusmriti also deals with topics such as creation, cosmology, dharma (righteousness), karma (action), moksha (liberation), yajna (sacrifice), samskara (rites of passage), prashasta (virtues), nishiddha (prohibitions), vyavahara (legal procedure), daana (charity), shanti (peace), and shraaddha (ancestor worship).

              - -

              Manusmriti is considered to be one of the most authoritative sources of Hindu law and ethics. It has influenced many other Hindu texts, such as the Puranas, the Dharmasutras, the Smritis, and the Arthashastra. It has also influenced the legal systems of many ancient and medieval Hindu kingdoms in India and Southeast Asia.

              -

              - -

              Why should you read Manusmriti?

              - -

              Manusmriti is a valuable source of information about the ancient Hindu culture and society. It reflects the values, beliefs, customs, and practices of the people who lived during that time. It also provides insights into the spiritual and philosophical aspects of Hinduism.

              - -

              Reading Manusmriti can help you to understand the origins and evolution of Hindu law and ethics. It can also help you to appreciate the diversity and complexity of Hindu thought and tradition. It can also inspire you to follow the principles of dharma, karma, and moksha in your own life.

              - -

              However, reading Manusmriti also requires a critical and contextual approach. You should not take everything in Manusmriti literally or blindly. You should also be aware of the historical and cultural factors that shaped Manusmriti. You should also be aware of the controversies and criticisms that Manusmriti has faced over time.

              - -

              Some of the issues that Manusmriti has been criticized for are its endorsement of caste discrimination, gender inequality, animal sacrifice, slavery, violence, superstition, and ritualism. Many modern scholars and reformers have challenged and rejected some of the views and practices that Manusmriti advocates. They have argued that Manusmriti is not a divine or eternal law book, but a human-made and time-bound document that reflects the biases and prejudices of its authors.

              - -

              Therefore, reading Manusmriti should not be a matter of blind faith or dogma. It should be a matter of rational inquiry and open-mindedness. You should read Manusmriti with an objective and analytical mind. You should also compare and contrast Manusmriti with other Hindu texts and sources. You should also apply your own reason and conscience to judge what is right and wrong

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/tomofi/MMOCR/tests/test_metrics/test_hmean_ic13.py b/spaces/tomofi/MMOCR/tests/test_metrics/test_hmean_ic13.py deleted file mode 100644 index ac02b38e67c1b0f28f96a93eececdb8225f2e802..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_metrics/test_hmean_ic13.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""Test hmean_ic13.""" -import math - -import pytest - -import mmocr.core.evaluation.hmean_ic13 as hmean_ic13 -import mmocr.core.evaluation.utils as utils - - -def test_compute_recall_precision(): - - gt_polys = [] - det_polys = [] - - # test invalid arguments. - with pytest.raises(AssertionError): - hmean_ic13.compute_recall_precision(1, 1) - - box1 = [0, 0, 1, 0, 1, 1, 0, 1] - - box2 = [0, 0, 10, 0, 10, 1, 0, 1] - - gt_polys = [utils.points2polygon(box1)] - det_polys = [utils.points2polygon(box2)] - recall, precision = hmean_ic13.compute_recall_precision( - gt_polys, det_polys) - assert recall == 1 - assert precision == 0.1 - - -def test_eval_hmean_ic13(): - det_boxes = [] - gt_boxes = [] - gt_ignored_boxes = [] - precision_thr = 0.4 - recall_thr = 0.8 - center_dist_thr = 1.0 - one2one_score = 1. - one2many_score = 0.8 - many2one_score = 1 - # test invalid arguments. - - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13([1], gt_boxes, gt_ignored_boxes, - precision_thr, recall_thr, center_dist_thr, - one2one_score, one2many_score, - many2one_score) - - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, 1, gt_ignored_boxes, - precision_thr, recall_thr, center_dist_thr, - one2one_score, one2many_score, - many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, 1, precision_thr, - recall_thr, center_dist_thr, one2one_score, - one2many_score, many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, 1.1, - recall_thr, center_dist_thr, one2one_score, - one2many_score, many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, - precision_thr, 1.1, center_dist_thr, - one2one_score, one2many_score, - many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, - precision_thr, recall_thr, -1, - one2one_score, one2many_score, - many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, - precision_thr, recall_thr, center_dist_thr, - -1, one2many_score, many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, - precision_thr, recall_thr, center_dist_thr, - one2one_score, -1, many2one_score) - with pytest.raises(AssertionError): - hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, - precision_thr, recall_thr, center_dist_thr, - one2one_score, one2many_score, -1) - - # test one2one match - det_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1], [10, 0, 11, 0, 11, 1, 10, 1]]] - gt_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1]]] - gt_ignored_boxes = [[]] - dataset_result, img_result = hmean_ic13.eval_hmean_ic13( - det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, - center_dist_thr, one2one_score, one2many_score, many2one_score) - assert img_result[0]['recall'] == 1 - assert img_result[0]['precision'] == 0.5 - assert math.isclose(img_result[0]['hmean'], 2 * (0.5) / 1.5) - - # test one2many match - gt_boxes = [[[0, 0, 2, 0, 2, 1, 0, 1]]] - det_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1], [1, 0, 2, 0, 2, 1, 1, 1]]] - dataset_result, img_result = hmean_ic13.eval_hmean_ic13( - det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, - center_dist_thr, one2one_score, one2many_score, many2one_score) - assert img_result[0]['recall'] == 0.8 - assert img_result[0]['precision'] == 1.6 / 2 - assert math.isclose(img_result[0]['hmean'], 2 * (0.64) / 1.6) - - # test many2one match - precision_thr = 0.6 - recall_thr = 0.8 - det_boxes = [[[0, 0, 2, 0, 2, 1, 0, 1]]] - gt_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1], [1, 0, 2, 0, 2, 1, 1, 1]]] - dataset_result, img_result = hmean_ic13.eval_hmean_ic13( - det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, - center_dist_thr, one2one_score, one2many_score, many2one_score) - assert img_result[0]['recall'] == 1 - assert img_result[0]['precision'] == 1 - assert math.isclose(img_result[0]['hmean'], 1) diff --git a/spaces/tomofi/MMOCR/tests/test_utils/test_string_util.py b/spaces/tomofi/MMOCR/tests/test_utils/test_string_util.py deleted file mode 100644 index c0eb467892c1a7c2dc4d64db1a4e12bfb67b7cda..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_utils/test_string_util.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pytest - -from mmocr.utils import StringStrip - - -def test_string_strip(): - strip_list = [True, False] - strip_pos_list = ['both', 'left', 'right'] - strip_str_list = [None, ' '] - - in_str_list = [ - ' hello ', 'hello ', ' hello', ' hello', 'hello ', 'hello ', 'hello', - 'hello', 'hello', 'hello', 'hello', 'hello' - ] - out_str_list = [ - 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', - 'hello', 'hello', 'hello', 'hello' - ] - - for idx1, strip in enumerate(strip_list): - for idx2, strip_pos in enumerate(strip_pos_list): - for idx3, strip_str in enumerate(strip_str_list): - tmp_args = dict( - strip=strip, strip_pos=strip_pos, strip_str=strip_str) - strip_class = StringStrip(**tmp_args) - i = idx1 * len(strip_pos_list) * len( - strip_str_list) + idx2 * len(strip_str_list) + idx3 - - assert strip_class(in_str_list[i]) == out_str_list[i] - - with pytest.raises(AssertionError): - StringStrip(strip='strip') - StringStrip(strip_pos='head') - StringStrip(strip_str=['\n', '\t']) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py deleted file mode 100644 index 68ce4d250ac673a274d1458963eb02614e4f5f98..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py deleted file mode 100644 index 723ab0295f8457c03114ca535dede951e7d5b169..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' - -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/tomofi/NDLOCR/src/separate_pages_ssd/ssd_tools/ssd_utils.py b/spaces/tomofi/NDLOCR/src/separate_pages_ssd/ssd_tools/ssd_utils.py deleted file mode 100644 index 0a9ffb16f2e2591b1ec02023bc48efafbc472855..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/separate_pages_ssd/ssd_tools/ssd_utils.py +++ /dev/null @@ -1,235 +0,0 @@ -"""Some utils for SSD.""" - -import numpy as np -import tensorflow as tf - - -class BBoxUtility(object): - """Utility class to do some stuff with bounding boxes and priors. - - # Arguments - num_classes: Number of classes including background. - priors: Priors and variances, numpy tensor of shape (num_priors, 8), - priors[i] = [xmin, ymin, xmax, ymax, varxc, varyc, varw, varh]. - overlap_threshold: Threshold to assign box to a prior. - nms_thresh: Nms threshold. - top_k: Number of total bboxes to be kept per image after nms step. - - # References - https://arxiv.org/abs/1512.02325 - """ - # TODO add setter methods for nms_thresh and top_K - def __init__(self, num_classes, priors=None, overlap_threshold=0.5, - nms_thresh=0.45, top_k=400): - self.num_classes = num_classes - self.priors = priors - self.num_priors = 0 if priors is None else len(priors) - self.overlap_threshold = overlap_threshold - self._nms_thresh = nms_thresh - self._top_k = top_k - self.boxes = tf.placeholder(dtype='float32', shape=(None, 4)) - self.scores = tf.placeholder(dtype='float32', shape=(None,)) - self.nms = tf.image.non_max_suppression(self.boxes, self.scores, - self._top_k, - iou_threshold=self._nms_thresh) - self.sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0})) - - @property - def nms_thresh(self): - return self._nms_thresh - - @nms_thresh.setter - def nms_thresh(self, value): - self._nms_thresh = value - self.nms = tf.image.non_max_suppression(self.boxes, self.scores, - self._top_k, - iou_threshold=self._nms_thresh) - - @property - def top_k(self): - return self._top_k - - @top_k.setter - def top_k(self, value): - self._top_k = value - self.nms = tf.image.non_max_suppression(self.boxes, self.scores, - self._top_k, - iou_threshold=self._nms_thresh) - - def iou(self, box): - """Compute intersection over union for the box with all priors. - - # Arguments - box: Box, numpy tensor of shape (4,). - - # Return - iou: Intersection over union, - numpy tensor of shape (num_priors). - """ - # compute intersection - inter_upleft = np.maximum(self.priors[:, :2], box[:2]) - inter_botright = np.minimum(self.priors[:, 2:4], box[2:]) - inter_wh = inter_botright - inter_upleft - inter_wh = np.maximum(inter_wh, 0) - inter = inter_wh[:, 0] * inter_wh[:, 1] - # compute union - area_pred = (box[2] - box[0]) * (box[3] - box[1]) - area_gt = (self.priors[:, 2] - self.priors[:, 0]) - area_gt *= (self.priors[:, 3] - self.priors[:, 1]) - union = area_pred + area_gt - inter - # compute iou - iou = inter / union - return iou - - def encode_box(self, box, return_iou=True): - """Encode box for training, do it only for assigned priors. - - # Arguments - box: Box, numpy tensor of shape (4,). - return_iou: Whether to concat iou to encoded values. - - # Return - encoded_box: Tensor with encoded box - numpy tensor of shape (num_priors, 4 + int(return_iou)). - """ - iou = self.iou(box) - encoded_box = np.zeros((self.num_priors, 4 + return_iou)) - assign_mask = iou > self.overlap_threshold - if not assign_mask.any(): - assign_mask[iou.argmax()] = True - if return_iou: - encoded_box[:, -1][assign_mask] = iou[assign_mask] - assigned_priors = self.priors[assign_mask] - box_center = 0.5 * (box[:2] + box[2:]) - box_wh = box[2:] - box[:2] - assigned_priors_center = 0.5 * (assigned_priors[:, :2] + - assigned_priors[:, 2:4]) - assigned_priors_wh = (assigned_priors[:, 2:4] - - assigned_priors[:, :2]) - # we encode variance - encoded_box[:, :2][assign_mask] = box_center - assigned_priors_center - encoded_box[:, :2][assign_mask] /= assigned_priors_wh - encoded_box[:, :2][assign_mask] /= assigned_priors[:, -4:-2] - encoded_box[:, 2:4][assign_mask] = np.log(box_wh / - assigned_priors_wh) - encoded_box[:, 2:4][assign_mask] /= assigned_priors[:, -2:] - return encoded_box.ravel() - - def assign_boxes(self, boxes): - """Assign boxes to priors for training. - - # Arguments - boxes: Box, numpy tensor of shape (num_boxes, 4 + num_classes), - num_classes without background. - - # Return - assignment: Tensor with assigned boxes, - numpy tensor of shape (num_boxes, 4 + num_classes + 8), - priors in ground truth are fictitious, - assignment[:, -8] has 1 if prior should be penalized - or in other words is assigned to some ground truth box, - assignment[:, -7:] are all 0. See loss for more details. - """ - assignment = np.zeros((self.num_priors, 4 + self.num_classes + 8)) - assignment[:, 4] = 1.0 - if len(boxes) == 0: - return assignment - encoded_boxes = np.apply_along_axis(self.encode_box, 1, boxes[:, :4]) - encoded_boxes = encoded_boxes.reshape(-1, self.num_priors, 5) - best_iou = encoded_boxes[:, :, -1].max(axis=0) - best_iou_idx = encoded_boxes[:, :, -1].argmax(axis=0) - best_iou_mask = best_iou > 0 - best_iou_idx = best_iou_idx[best_iou_mask] - assign_num = len(best_iou_idx) - encoded_boxes = encoded_boxes[:, best_iou_mask, :] - assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx, - np.arange(assign_num), - :4] - assignment[:, 4][best_iou_mask] = 0 - assignment[:, 5:-8][best_iou_mask] = boxes[best_iou_idx, 4:] - assignment[:, -8][best_iou_mask] = 1 - return assignment - - def decode_boxes(self, mbox_loc, mbox_priorbox, variances): - """Convert bboxes from local predictions to shifted priors. - - # Arguments - mbox_loc: Numpy array of predicted locations. - mbox_priorbox: Numpy array of prior boxes. - variances: Numpy array of variances. - - # Return - decode_bbox: Shifted priors. - """ - prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0] - prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1] - prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0]) - prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1]) - decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0] - decode_bbox_center_x += prior_center_x - decode_bbox_center_y = mbox_loc[:, 1] * prior_width * variances[:, 1] - decode_bbox_center_y += prior_center_y - decode_bbox_width = np.exp(mbox_loc[:, 2] * variances[:, 2]) - decode_bbox_width *= prior_width - decode_bbox_height = np.exp(mbox_loc[:, 3] * variances[:, 3]) - decode_bbox_height *= prior_height - decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width - decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height - decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width - decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height - decode_bbox = np.concatenate((decode_bbox_xmin[:, None], - decode_bbox_ymin[:, None], - decode_bbox_xmax[:, None], - decode_bbox_ymax[:, None]), axis=-1) - decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0) - return decode_bbox - - def detection_out(self, predictions, background_label_id=0, keep_top_k=200, - confidence_threshold=0.01): - """Do non maximum suppression (nms) on prediction results. - - # Arguments - predictions: Numpy array of predicted values. - num_classes: Number of classes for prediction. - background_label_id: Label of background class. - keep_top_k: Number of total bboxes to be kept per image - after nms step. - confidence_threshold: Only consider detections, - whose confidences are larger than a threshold. - - # Return - results: List of predictions for every picture. Each prediction is: - [label, confidence, xmin, ymin, xmax, ymax] - """ - mbox_loc = predictions[:, :, :4] - variances = predictions[:, :, -4:] - mbox_priorbox = predictions[:, :, -8:-4] - mbox_conf = predictions[:, :, 4:-8] - results = [] - for i in range(len(mbox_loc)): - results.append([]) - decode_bbox = self.decode_boxes(mbox_loc[i], - mbox_priorbox[i], variances[i]) - for c in range(self.num_classes): - if c == background_label_id: - continue - c_confs = mbox_conf[i, :, c] - c_confs_m = c_confs > confidence_threshold - if len(c_confs[c_confs_m]) > 0: - boxes_to_process = decode_bbox[c_confs_m] - confs_to_process = c_confs[c_confs_m] - feed_dict = {self.boxes: boxes_to_process, - self.scores: confs_to_process} - idx = self.sess.run(self.nms, feed_dict=feed_dict) - good_boxes = boxes_to_process[idx] - confs = confs_to_process[idx][:, None] - labels = c * np.ones((len(idx), 1)) - c_pred = np.concatenate((labels, confs, good_boxes), - axis=1) - results[-1].extend(c_pred) - if len(results[-1]) > 0: - results[-1] = np.array(results[-1]) - argsort = np.argsort(results[-1][:, 1])[::-1] - results[-1] = results[-1][argsort] - results[-1] = results[-1][:keep_top_k] - return results diff --git a/spaces/tridragonevo/chat-gpt-voice-stream/azure_utils.py b/spaces/tridragonevo/chat-gpt-voice-stream/azure_utils.py deleted file mode 100644 index 4173eaa689abe9b7b6b66ed3fcf1ede591655a53..0000000000000000000000000000000000000000 --- a/spaces/tridragonevo/chat-gpt-voice-stream/azure_utils.py +++ /dev/null @@ -1,155 +0,0 @@ -# This class stores Azure voice data. Specifically, the class stores several records containing -# language, lang_code, gender, voice_id and engine. The class also has a method to return the -# voice_id, lang_code and engine given a language and gender. - -NEURAL_ENGINE = "neural" -STANDARD_ENGINE = "standard" - - -class AzureVoiceData: - def get_voice(self, language, gender): - for voice in self.voice_data: - if voice['language'] == language and voice['gender'] == gender: - return voice['azure_voice'] - return None - - def __init__(self): - self.voice_data = [ - {'language': 'Arabic', - 'azure_voice': 'ar-EG-ShakirNeural', - 'gender': 'Male'}, - {'language': 'Arabic (Gulf)', - 'azure_voice': 'ar-KW-FahedNeural', - 'gender': 'Male'}, - {'language': 'Catalan', - 'azure_voice': 'ca-ES-EnricNeural', - 'gender': 'Male'}, - {'language': 'Chinese (Cantonese)', - 'azure_voice': 'yue-CN-YunSongNeural', - 'gender': 'Male'}, - {'language': 'Chinese (Mandarin)', - 'azure_voice': 'zh-CN-YunxiNeural', - 'gender': 'Male'}, - {'language': 'Danish', - 'azure_voice': 'da-DK-JeppeNeural', - 'gender': 'Male'}, - {'language': 'Dutch', - 'azure_voice': 'nl-NL-MaartenNeural', - 'gender': 'Male'}, - {'language': 'English (Australian)', - 'azure_voice': 'en-AU-KenNeural', - 'gender': 'Male'}, - {'language': 'English (British)', - 'azure_voice': 'en-GB-RyanNeural', - 'gender': 'Male'}, - {'language': 'English (Indian)', - 'azure_voice': 'en-IN-PrabhatNeural', - 'gender': 'Male'}, - {'language': 'English (New Zealand)', - 'azure_voice': 'en-NZ-MitchellNeural', - 'gender': 'Male'}, - {'language': 'English (South African)', - 'azure_voice': 'en-ZA-LukeNeural', - 'gender': 'Male'}, - {'language': 'English (US)', - 'azure_voice': 'en-US-ChristopherNeural', - 'gender': 'Male'}, - {'language': 'English (Welsh)', - 'azure_voice': 'cy-GB-AledNeural', - 'gender': 'Male'}, - {'language': 'Finnish', - 'azure_voice': 'fi-FI-HarriNeural', - 'gender': 'Male'}, - {'language': 'French', - 'azure_voice': 'fr-FR-HenriNeural', - 'gender': 'Male'}, - {'language': 'French (Canadian)', - 'azure_voice': 'fr-CA-AntoineNeural', - 'gender': 'Male'}, - {'language': 'German', - 'azure_voice': 'de-DE-KlausNeural', - 'gender': 'Male'}, - {'language': 'German (Austrian)', - 'azure_voice': 'de-AT-JonasNeural', - 'gender': 'Male'}, - {'language': 'Hindi', - 'azure_voice': 'hi-IN-MadhurNeural', - 'gender': 'Male'}, - {'language': 'Icelandic', - 'azure_voice': 'is-IS-GunnarNeural', - 'gender': 'Male'}, - {'language': 'Italian', - 'azure_voice': 'it-IT-GianniNeural', - 'gender': 'Male'}, - {'language': 'Japanese', - 'azure_voice': 'ja-JP-KeitaNeural', - 'gender': 'Male'}, - {'language': 'Korean', - 'azure_voice': 'ko-KR-GookMinNeural', - 'gender': 'Male'}, - {'language': 'Norwegian', - 'azure_voice': 'nb-NO-FinnNeural', - 'gender': 'Male'}, - {'language': 'Polish', - 'azure_voice': 'pl-PL-MarekNeural', - 'gender': 'Male'}, - {'language': 'Portuguese (Brazilian)', - 'azure_voice': 'pt-BR-NicolauNeural', - 'gender': 'Male'}, - {'language': 'Portuguese (European)', - 'azure_voice': 'pt-PT-DuarteNeural', - 'gender': 'Male'}, - {'language': 'Romanian', - 'azure_voice': 'ro-RO-EmilNeural', - 'gender': 'Male'}, - {'language': 'Russian', - 'azure_voice': 'ru-RU-DmitryNeural', - 'gender': 'Male'}, - {'language': 'Spanish (European)', - 'azure_voice': 'es-ES-TeoNeural', - 'gender': 'Male'}, - {'language': 'Spanish (Mexican)', - 'azure_voice': 'es-MX-LibertoNeural', - 'gender': 'Male'}, - {'language': 'Spanish (US)', - 'azure_voice': 'es-US-AlonsoNeural"', - 'gender': 'Male'}, - {'language': 'Swedish', - 'azure_voice': 'sv-SE-MattiasNeural', - 'gender': 'Male'}, - {'language': 'Turkish', - 'azure_voice': 'tr-TR-AhmetNeural', - 'gender': 'Male'}, - {'language': 'Welsh', - 'azure_voice': 'cy-GB-AledNeural', - 'gender': 'Male'}, - ] - - -# Run from the command-line -if __name__ == '__main__': - azure_voice_data = AzureVoiceData() - - azure_voice = azure_voice_data.get_voice('English (US)', 'Male') - print('English (US)', 'Male', azure_voice) - - azure_voice = azure_voice_data.get_voice('English (US)', 'Female') - print('English (US)', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('French', 'Female') - print('French', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('French', 'Male') - print('French', 'Male', azure_voice) - - azure_voice = azure_voice_data.get_voice('Japanese', 'Female') - print('Japanese', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('Japanese', 'Male') - print('Japanese', 'Male', azure_voice) - - azure_voice = azure_voice_data.get_voice('Hindi', 'Female') - print('Hindi', 'Female', azure_voice) - - azure_voice = azure_voice_data.get_voice('Hindi', 'Male') - print('Hindi', 'Male', azure_voice) diff --git a/spaces/ttt246/brain/Brain/src/common/program_type.py b/spaces/ttt246/brain/Brain/src/common/program_type.py deleted file mode 100644 index 00bb7455f3f48a3e2b05c70ada37c30b10c1f7d5..0000000000000000000000000000000000000000 --- a/spaces/ttt246/brain/Brain/src/common/program_type.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Program Type for all commands to categorize""" - - -class ProgramType: - BROWSER = "browser" - ALERT = "alert" - IMAGE = "image" - SMS = "sms" - CONTACT = "contact" - MESSAGE = "message" - AUTO_TASK = "autotask" - RAILS_OFF_TOPIC = "rails_off_topic" - - class BrowserType: - OPEN_TAB = "opentab" - OPEN_TAB_SEARCH = "opentabsearch" - CLOSE_TAB = "closetab" - PREVIOUS_PAGE = "previouspage" - NEXT_PAGE = "nextpage" - SCROLL_UP = "scrollup" - SCROLL_DOWN = "scrolldown" - SCROLL_TOP = "scrolltop" - SCROLL_BOTTOM = "scrollbottom" - SELECT_ITEM_DETAIL_INFO = "selectitemdetailinfo" - SELECT_ITEM = "selectitem" - MESSAGE = "message" - ASK_WEBSITE = "askwebsite" diff --git a/spaces/typesdigital/CodeX/app.py b/spaces/typesdigital/CodeX/app.py deleted file mode 100644 index 29be0082c51e83860a9827c5daf3f123ece44111..0000000000000000000000000000000000000000 --- a/spaces/typesdigital/CodeX/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -import openai -import secrets - -# Set up the OpenAI API credentials -openai.api_key = secrets.OPENAI_API_KEY - -# Load the Hugging Face model and tokenizer -model_name = "Helsinki-NLP/opus-mt-python-en" -tokenizer = AutoTokenizer.from_pretrained(model_name) -model = AutoModelForSeq2SeqLM.from_pretrained(model_name) - -# Define a function that takes a user's input code as a prompt and uses the OpenAI API and Hugging Face model to generate a corrected version of the code -def correct_code(prompt): - # Use the OpenAI API to generate suggestions for fixing syntax errors in the code - response = openai.Completion.create( - engine="davinci-codex", - prompt=prompt, - max_tokens=1024, - n=1, - stop=None, - temperature=0.5, - ) - - # Extract the corrected code from the API response - corrected_code = response.choices[0].text.strip() - - # Use the Hugging Face model to generate a more natural-sounding version of the corrected code - input_ids = tokenizer.encode(corrected_code, return_tensors="pt") - outputs = model.generate(input_ids) - corrected_code = tokenizer.decode(outputs[0], skip_special_tokens=True) - - return corrected_code - -# Define a Gradio interface for the code assistant -input_text = gr.inputs.Textbox(lines=10, label="Input Code") -output_text = gr.outputs.Textbox(label="Corrected Code") - -def generate_code(input_text): - corrected_code = correct_code(input_text) - return corrected_code - -# Set up the OpenAI API credentials -secrets.OPENAI_API_KEY = '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' -openai.api_key = secrets.OPENAI_API_KEY - -# Define the Gradio interface -interface = gr.Interface(fn=generate_code, inputs=input_text, outputs=output_text, title="AI Code Assistant", description="Enter your code and click submit to generate a corrected version.") - -# Run the Gradio interface -interface.launch() \ No newline at end of file diff --git a/spaces/typesdigital/WeatherIAPP/app.py b/spaces/typesdigital/WeatherIAPP/app.py deleted file mode 100644 index f8a0de401f2cd41a3ffdc84aa760a63064554508..0000000000000000000000000000000000000000 --- a/spaces/typesdigital/WeatherIAPP/app.py +++ /dev/null @@ -1,55 +0,0 @@ -import gradio as gr -import openai -import requests -import datetime - -openai.api_key = "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" -weather_api_key = "1aafc3163909c1493596da9340e00aee" - -def get_weather_data(city): - url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={weather_api_key}&units=metric" - response = requests.get(url) - data = response.json() - - if data["cod"] == "404": - return "City not found." - - weather = data["weather"][0]["description"] - temperature = data["main"]["temp"] - location = data["name"] - time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - return f"Location: {location}\nTime: {time}\nWeather: {weather}\nTemperature: {temperature}°C" - -def get_weather_feedback(user_input): - response = openai.Completion.create( - engine="text-davinci-003", - prompt=f"The weather today is {user_input}.", - max_tokens=50, - n=1, - stop=None, - temperature=0.6 - ) - feedback = response.choices[0].text.strip() - return feedback - -iface = gr.Interface( - fn=get_weather_data, - inputs="text", - outputs="text", - title="Weather Forecast", - description="Enter the name of a city to get the weather forecast.", - examples=[["New York"], ["London"], ["Tokyo"]] -) - -feedback_iface = gr.Interface( - fn=get_weather_feedback, - inputs="text", - outputs="text", - title="Feedback", - description="Enter a weather description to get feedback.", - examples=[["sunny"], ["rainy"], ["cloudy"]] -) - -if __name__ == "__main__": - iface.launch() diff --git a/spaces/ulysses115/Nogizaka46-so/onnx/model_onnx_48k.py b/spaces/ulysses115/Nogizaka46-so/onnx/model_onnx_48k.py deleted file mode 100644 index d35c92e5d0606d29f40a9ad08a50b60cc93bc48b..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/Nogizaka46-so/onnx/model_onnx_48k.py +++ /dev/null @@ -1,328 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_lengths, f0=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = x + self.f0_emb(f0.long()).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout) - hps = { - "sampling_rate": 48000, - "inter_channels": 192, - "resblock": "1", - "resblock_kernel_sizes": [3, 7, 11], - "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - "upsample_rates": [10, 8, 2, 2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16, 16, 4, 4], - "gin_channels": 256, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - def forward(self, c, c_lengths, f0, g=None): - g = self.emb_g(g.unsqueeze(0)).transpose(1,2) - z_p, m_p, logs_p, c_mask = self.enc_p_(c.transpose(1,2), c_lengths, f0=f0_to_coarse(f0)) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0.float()) - return o - diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Airport Firefighter Simulator Free UPD Download.md b/spaces/usbethFlerru/sovits-modelsV2/example/Airport Firefighter Simulator Free UPD Download.md deleted file mode 100644 index e37bb69831203bba7fd6f60772e96a9374439c2c..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Airport Firefighter Simulator Free UPD Download.md +++ /dev/null @@ -1,24 +0,0 @@ - -

              It is the creation of The German visualization software studio. In English-speaking countries, it has become the airport fire simulator, in its native Germany under the name of Flughafen-Feuerwehr-Simulator. The map shows AI-controlled vehicles like planes, carriers, buses, and security vehicles continuously circulating. The free drilling that is available at this location can be interrupted at times because of warnings.

              -

              Airport Firefighters Simulator Download pc game can be a friend, but it can also be a fierce enemy! There is no place more dangerous than a modern airport, where thousands of passengers are in close proximity to flammable jet fuel and hazardous materials. The stakes are high, as are the demands of the elite fire brigade at the airport! Even the most routine refueling and cargo management tasks carry the chance of a fire, so you should always be vigilant to avoid a minor accident that turns into hell. His duties as a firefighter require keeping watch on day and night shifts, not knowing exactly when he will be called to work on dozens of dramatic missions. You can drive highly realistic deliveries of specialized fire fighting vehicles across 20 square kilometers from the airport, including the famous PANTHER, each vehicle complete with a unique team ready to go into service at any opportunity.

              -

              airport firefighter simulator free download


              Download –––––>>> https://urlcod.com/2uyVC0



              -

              It is a game that, by its name, allows you to play the role of a firefighter who works at an airport fire station. This is the production of the German visual imagination software studio. In English-speaking countries, it has become the airport fire simulator, in its native Germany under the name of Flughafen-Feuerwehr-Simulator. Also check Farming Simulator 15 is an agricultural simulation game developed by Giants Software and published by Focus Home Entertainment.

              -

              In Airport Firefighters Free download pc, Things like boxes and platforms now interact with the flow of water, allowing you to catch, move, and destroy them. Different levels of difficulty will help you survive even crises. Experience exciting situations that challenge the inner hero! Regardless of your engines on fire, an explosion in the passenger terminal, or hell on fire in a huge cabin, your service is important! The game is played entirely on the territory of an airport. On the map, AI-controlled units, such as planes, buses, carriers, and security vehicles, constantly circulate. Free drilling at this location is occasionally interrupted due to warnings. Most of them require access to the fire, leaving the car and using a fire extinguisher or fire hose. There are other tasks as well, such as finding holes in the fence or cooling the wheels of hectic planes with the help of special windmills. We must also be careful to renew our fuel and water levels regularly.

              -

              The Airport Firefighters Simulator game download for pc is divided into daily changes. For successful completion of the mission, we receive points, allowing us to advance to the next level of the firefighting profession. Higher positions include access to better teams, but at the same time they give us more difficult tasks. We are forced to deal with massive fires, as the deadlines are tighter. The ultimate goal is to acquire modern Pantera switches, equipped with various specialized on-board systems and a mechanical water distribution frame. Airport Simulator is entirely based on a 3D graphics engine. Vehicles can have a good level of detail, either outside or after lighting the cab width. The environment looks worse, but it makes up for it with a huge map (about 20 square kilometers) and a realistic cycle of day and night, and anything can happen at any busy airport, and that will require a second decision and nerves of steel. Only the best firefighters can work in an environment that can cause a small spark to the catastrophe. Are you ready to heat up?

              -

              People love free steam games, no doubt. But what many people hate is downloading so many parts and trying to install them on their own. This is why we are the only site that pre-installs every game for you. We have many categories like shooters, action, racing, simulators and even VR games! We strive to satisfy our users and ask for nothing in return. We revolutionized the downloading scene and will continue being your #1 site for free games.

              -

              hotfix download for windows 10high definition audio device driver windows 10 downloaddownload windows media player 12 for windows 10 64 bitbest offline games pc free downloadfilelinked apk download for windows 10diamond twister 2 game free download for pcepson stylus photo r200 driver download windows 7 freeepass 2003 token driver download for windows 10download windows 10 dark modemath lab free download windows 10
              ebook reader download windows 8 free

              windows live essentials 2009 xp download free

              download windows 10 free upgrade 2019 free

              airport simulator games free download full version for pc

              download game bola untuk laptop windows 10

              octagon game download windows 10 free

              hadoop software free download for windows 10 64 bit

              and1 streetball pc game free download

              eclipse 2017 download for windows 10

              download microsoft office picture manager windows 10

              windows iso for mac download free
              free malwarebytes download windows 8 free
              cant download minecraft windows 10 free
              gta vice city download for pc windows 10 crack
              angry birds epic download for pc free full version

              -

              action and adventure games for pc free download
              gta 5 download for pc windows 8
              transmac download for windows 10 free
              candy crush game free download for pc windows 10
              gta vice city apk download for windows 10

              -

              microsoft security essentials download for windows xp sp2 free
              djay 2 download for windows free
              free download teracopy latest version for windows xp free
              bluestacks free download for pc 64 bit
              intel hd graphics 3000 driver windows 10 64 bit download free

              -

              fl studio 11 free download full version windows 10

              best high graphics pc games download free

              call of duty game download for pc windows 7

              android x86 download for windows 10

              d3dcompiler_47 dll windows 10 download

              download patch cleaner for windows 10

              dell windows 10 upgrade download

              ps2 to usb converter driver windows 7 download free

              download windows 10 iso 64 bit google drive

              hp color laserjet 1600 driver for windows 10 free download

              -

              -

              maxthon browser free download for windows 10
              driver installer for windows 10 free download
              donkey kong country 2 game free download for pc
              altera usb blaster driver download windows 10
              download realtek lan driver for windows 10 64 bit

              -

              windows 7 premium recovery disk download free
              download mswlogo for windows 10
              download flash player for pc windows 10
              incredimail for windows 8.1 download free
              cald dictionary free download for pc

              -

              canon lbp 2900 driver download for windows 8 64 bit free
              dj software download windows 7 free
              ds3 tool download windows 10
              download and install windows media player free
              free download windows messenger for xp free

              -

              5kplayer download for windows 10

              download itunes for windows 10 64 bit offline installer

              free file sync download windows 10

              ezvid free download for windows 10

              best video editing software for pc download free

              baby hazel games free download for pc

              download candy crush for windows 10

              download strategy games for pc highly compressed

              new pc studio samsung download free windows 7

              adobe pagemaker 7.0 download for windows 10

              -

              themes for windows 7 ultimate 64 bit free download 3d freeorca download windows xp freeequalizer windows 10 download freerfsim99 download windows 7 freequincy 2005 free download for windows freewindows xp 32bit iso download freebatman arkham knight download for windows 10windows mobile microsoft download freeigi 3 game download for pc windows 10mangal marathi font free download for windows 10
              airport firefighter simulator free download pc

              java programming language download windows 10

              lenovo t470 drivers windows 10 64 bit download

              9apps download pc windows 10

              autocad 2006 windows 7 64 bit free download free

              download sql free for windows 10

              anthem download pc game

              pc games free download full version for windows 7 ultimate

              avidemux download for pc windows 7

              download cubase 5 full crack windows 10

              the sims freeplay free download for windows 7 free
              kaspersky endpoint security 10 for windows workstations component download
              browsec download for windows 10
              attack on titan game download free pc
              ms word 2007 download for pc windows 10 64 bit

              -

              free download audio video drivers for windows xp free
              diablo 2 download pc full game free
              internet explorer 7 for windows 7 free download 32 bit free
              animated wallpaper windows 10 free download
              calculator apps free download for pc

              -

              kaspersky antivirus download for pc windows 10
              euro truck simulator 2 full game download pc
              windows 8.1 crack free download free
              free download torrentbit software for windows 10
              avira download windows 10

              -

              download free adblock for windows 10

              itunes for windows 10 download 64 bit

              free download gw basic for windows 8 64 bit free

              asus eee pc windows 7 starter iso download

              logitech g35 drivers download windows 7 free

              iphoto download windows 7 free free

              download free audio driver for windows 10

              jdk 8 free download for windows 7 32 bit free

              download illustrator free windows 10

              chat video free download pc

              -

              All shops featured on GG.deals will deliver your game immediately after the payment has been approved. This will be either in the form of direct download or PC key - depending on the store of your choice. After you activate key on a corresponding platform, you will be able to download and play your game for free. If you don't know how to activate the key, check out the tutorials section on the bottom of the page.

              aaccfb2cb3
              -
              -
              \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Delphi 20141 Keygen Activation 2014 Release 1 Cdp Ds150e Cdp Cars Trucks Vci Rar BEST.md b/spaces/usbethFlerru/sovits-modelsV2/example/Delphi 20141 Keygen Activation 2014 Release 1 Cdp Ds150e Cdp Cars Trucks Vci Rar BEST.md deleted file mode 100644 index 93326b552fd9ebd723b04e5ef468e8d7893fcc20..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Delphi 20141 Keygen Activation 2014 Release 1 Cdp Ds150e Cdp Cars Trucks Vci Rar BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Delphi 20141 Keygen Activation 2014 Release 1 Cdp Ds150e Cdp Cars Trucks Vci Rar


              Download Zip 🆗 https://urlcod.com/2uyWAD



              - -Download Ebook Yazid Bin Abdul Qadir Jawas · Hebden Chemistry 11 A Workbook For Students Pdf 20 · philips channel editor 2.0.5.9.34 1fdad05405
              -
              -
              -

              diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/index.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/index.md deleted file mode 100644 index 74f8feee4004003cfbd01e7f40455f586e994fbf..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -comments: true -description: Explore the extensive functionalities of the YOLOv5 object detection model, renowned for its speed and precision. Dive into our comprehensive guide for installation, architectural insights, use-cases, and more to unlock the full potential of YOLOv5 for your computer vision applications. -keywords: ultralytics, yolov5, object detection, deep learning, pytorch, computer vision, tutorial, architecture, documentation, frameworks, real-time, model training, multicore, multithreading ---- - -# Comprehensive Guide to Ultralytics YOLOv5 - -
              -

              - - -

              - -YOLOv5 CI -YOLOv5 Citation -Docker Pulls -
              -Run on Gradient -Open In Colab -Open In Kaggle -
              -
              - -Welcome to the Ultralytics' YOLOv5 🚀 Documentation! YOLOv5, the fifth iteration of the revolutionary "You Only Look Once" object detection model, is designed to deliver high-speed, high-accuracy results in real-time. -

              -Built on PyTorch, this powerful deep learning framework has garnered immense popularity for its versatility, ease of use, and high performance. Our documentation guides you through the installation process, explains the architectural nuances of the model, showcases various use-cases, and provides a series of detailed tutorials. These resources will help you harness the full potential of YOLOv5 for your computer vision projects. Let's get started! - -
              - -## Tutorials - -Here's a compilation of comprehensive tutorials that will guide you through different aspects of YOLOv5. - -* [Train Custom Data](tutorials/train_custom_data.md) 🚀 RECOMMENDED: Learn how to train the YOLOv5 model on your custom dataset. -* [Tips for Best Training Results](tutorials/tips_for_best_training_results.md) ☘️: Uncover practical tips to optimize your model training process. -* [Multi-GPU Training](tutorials/multi_gpu_training.md): Understand how to leverage multiple GPUs to expedite your training. -* [PyTorch Hub](tutorials/pytorch_hub_model_loading.md) 🌟 NEW: Learn to load pre-trained models via PyTorch Hub. -* [TFLite, ONNX, CoreML, TensorRT Export](tutorials/model_export.md) 🚀: Understand how to export your model to different formats. -* [NVIDIA Jetson platform Deployment](tutorials/running_on_jetson_nano.md) 🌟 NEW: Learn how to deploy your YOLOv5 model on NVIDIA Jetson platform. -* [Test-Time Augmentation (TTA)](tutorials/test_time_augmentation.md): Explore how to use TTA to improve your model's prediction accuracy. -* [Model Ensembling](tutorials/model_ensembling.md): Learn the strategy of combining multiple models for improved performance. -* [Model Pruning/Sparsity](tutorials/model_pruning_and_sparsity.md): Understand pruning and sparsity concepts, and how to create a more efficient model. -* [Hyperparameter Evolution](tutorials/hyperparameter_evolution.md): Discover the process of automated hyperparameter tuning for better model performance. -* [Transfer Learning with Frozen Layers](tutorials/transfer_learning_with_frozen_layers.md): Learn how to implement transfer learning by freezing layers in YOLOv5. -* [Architecture Summary](tutorials/architecture_description.md) 🌟 Delve into the structural details of the YOLOv5 model. -* [Roboflow for Datasets](tutorials/roboflow_datasets_integration.md): Understand how to utilize Roboflow for dataset management, labeling, and active learning. -* [ClearML Logging](tutorials/clearml_logging_integration.md) 🌟 Learn how to integrate ClearML for efficient logging during your model training. -* [YOLOv5 with Neural Magic](tutorials/neural_magic_pruning_quantization.md) Discover how to use Neural Magic's Deepsparse to prune and quantize your YOLOv5 model. -* [Comet Logging](tutorials/comet_logging_integration.md) 🌟 NEW: Explore how to utilize Comet for improved model training logging. - -## Environments - -YOLOv5 is designed to be run in the following up-to-date, verified environments, with all dependencies (including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/)) pre-installed: - -- **Notebooks** with free - GPU: Run on Gradient Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](environments/google_cloud_quickstart_tutorial.md) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](environments/aws_quickstart_tutorial.md) -- **Docker Image**. See [Docker Quickstart Guide](environments/docker_image_quickstart_tutorial.md) Docker Pulls - -## Status - -YOLOv5 CI - -This badge signifies that all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify the correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and with every new commit. - -
              -
              - - - - - - - - - - - - - - - - - - - - -
              \ No newline at end of file diff --git a/spaces/venkat-natchi/yolov3_obj_detector/utils.py b/spaces/venkat-natchi/yolov3_obj_detector/utils.py deleted file mode 100644 index 32c000b3980d6018c54cf2f63984430c6a940ca1..0000000000000000000000000000000000000000 --- a/spaces/venkat-natchi/yolov3_obj_detector/utils.py +++ /dev/null @@ -1,533 +0,0 @@ -import config -import matplotlib.pyplot as plt -import matplotlib.patches as patches -import numpy as np -import os -import random -import torch - -from collections import Counter -from tqdm import tqdm - - -def iou_width_height(boxes1, boxes2): - """ - Parameters: - boxes1 (tensor): width and height of the first bounding boxes - boxes2 (tensor): width and height of the second bounding boxes - Returns: - tensor: Intersection over union of the corresponding boxes - """ - intersection = torch.min(boxes1[..., 0], boxes2[..., 0]) * torch.min( - boxes1[..., 1], boxes2[..., 1] - ) - union = ( - boxes1[..., 0] * boxes1[..., 1] + boxes2[..., 0] * boxes2[..., 1] - intersection - ) - return intersection / union - - -def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"): - """ - Video explanation of this function: - https://youtu.be/XXYG5ZWtjj0 - - This function calculates intersection over union (iou) given pred boxes - and target boxes. - - Parameters: - boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4) - boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4) - box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2) - - Returns: - tensor: Intersection over union for all examples - """ - - if box_format == "midpoint": - box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2 - box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2 - box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2 - box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2 - box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2 - box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2 - box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2 - box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2 - - if box_format == "corners": - box1_x1 = boxes_preds[..., 0:1] - box1_y1 = boxes_preds[..., 1:2] - box1_x2 = boxes_preds[..., 2:3] - box1_y2 = boxes_preds[..., 3:4] - box2_x1 = boxes_labels[..., 0:1] - box2_y1 = boxes_labels[..., 1:2] - box2_x2 = boxes_labels[..., 2:3] - box2_y2 = boxes_labels[..., 3:4] - - x1 = torch.max(box1_x1, box2_x1) - y1 = torch.max(box1_y1, box2_y1) - x2 = torch.min(box1_x2, box2_x2) - y2 = torch.min(box1_y2, box2_y2) - - intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0) - box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1)) - box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1)) - - return intersection / (box1_area + box2_area - intersection + 1e-6) - - -def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"): - """ - Video explanation of this function: - https://youtu.be/YDkjWEN8jNA - - Does Non Max Suppression given bboxes - - Parameters: - bboxes (list): list of lists containing all bboxes with each bboxes - specified as [class_pred, prob_score, x1, y1, x2, y2] - iou_threshold (float): threshold where predicted bboxes is correct - threshold (float): threshold to remove predicted bboxes (independent of IoU) - box_format (str): "midpoint" or "corners" used to specify bboxes - - Returns: - list: bboxes after performing NMS given a specific IoU threshold - """ - - assert type(bboxes) == list - - bboxes = [box for box in bboxes if box[1] > threshold] - bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True) - bboxes_after_nms = [] - - while bboxes: - chosen_box = bboxes.pop(0) - - bboxes = [ - box - for box in bboxes - if box[0] != chosen_box[0] - or intersection_over_union( - torch.tensor(chosen_box[2:]), - torch.tensor(box[2:]), - box_format=box_format, - ) - < iou_threshold - ] - - bboxes_after_nms.append(chosen_box) - - return bboxes_after_nms - - -def mean_average_precision( - pred_boxes, true_boxes, iou_threshold=0.5, box_format="midpoint", num_classes=20 -): - """ - Video explanation of this function: - https://youtu.be/FppOzcDvaDI - - This function calculates mean average precision (mAP) - - Parameters: - pred_boxes (list): list of lists containing all bboxes with each bboxes - specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2] - true_boxes (list): Similar as pred_boxes except all the correct ones - iou_threshold (float): threshold where predicted bboxes is correct - box_format (str): "midpoint" or "corners" used to specify bboxes - num_classes (int): number of classes - - Returns: - float: mAP value across all classes given a specific IoU threshold - """ - - # list storing all AP for respective classes - average_precisions = [] - - # used for numerical stability later on - epsilon = 1e-6 - - for c in tqdm(range(num_classes)): - detections = [] - ground_truths = [] - - # Go through all predictions and targets, - # and only add the ones that belong to the - # current class c - for detection in pred_boxes: - if detection[1] == c: - detections.append(detection) - - for true_box in true_boxes: - if true_box[1] == c: - ground_truths.append(true_box) - - # find the amount of bboxes for each training example - # Counter here finds how many ground truth bboxes we get - # for each training example, so let's say img 0 has 3, - # img 1 has 5 then we will obtain a dictionary with: - # amount_bboxes = {0:3, 1:5} - amount_bboxes = Counter([gt[0] for gt in ground_truths]) - - # We then go through each key, val in this dictionary - # and convert to the following (w.r.t same example): - # ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]} - for key, val in amount_bboxes.items(): - amount_bboxes[key] = torch.zeros(val) - - # sort by box probabilities which is index 2 - detections.sort(key=lambda x: x[2], reverse=True) - TP = torch.zeros((len(detections))) - FP = torch.zeros((len(detections))) - total_true_bboxes = len(ground_truths) - - # If none exists for this class then we can safely skip - if total_true_bboxes == 0: - continue - - for detection_idx, detection in enumerate(detections): - # Only take out the ground_truths that have the same - # training idx as detection - ground_truth_img = [ - bbox for bbox in ground_truths if bbox[0] == detection[0] - ] - - num_gts = len(ground_truth_img) - best_iou = 0 - - for idx, gt in enumerate(ground_truth_img): - iou = intersection_over_union( - torch.tensor(detection[3:]), - torch.tensor(gt[3:]), - box_format=box_format, - ) - - if iou > best_iou: - best_iou = iou - best_gt_idx = idx - - if best_iou > iou_threshold: - # only detect ground truth detection once - if amount_bboxes[detection[0]][best_gt_idx] == 0: - # true positive and add this bounding box to seen - TP[detection_idx] = 1 - amount_bboxes[detection[0]][best_gt_idx] = 1 - else: - FP[detection_idx] = 1 - - # if IOU is lower then the detection is a false positive - else: - FP[detection_idx] = 1 - - TP_cumsum = torch.cumsum(TP, dim=0) - FP_cumsum = torch.cumsum(FP, dim=0) - recalls = TP_cumsum / (total_true_bboxes + epsilon) - precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon) - precisions = torch.cat((torch.tensor([1]), precisions)) - recalls = torch.cat((torch.tensor([0]), recalls)) - # torch.trapz for numerical integration - average_precisions.append(torch.trapz(precisions, recalls)) - - return sum(average_precisions) / len(average_precisions) - - -def plot_image(image, boxes, return_fig=False): - """Plots predicted bounding boxes on the image""" - cmap = plt.get_cmap("tab20b") - class_labels = config.COCO_LABELS if config.DATASET=='COCO' \ - else config.PASCAL_CLASSES - colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))] - im = np.array(image) - height, width, _ = im.shape - - # Create figure and axes - fig, ax = plt.subplots(1) - # Display the image - ax.imshow(im) - - # box[0] is x midpoint, box[2] is width - # box[1] is y midpoint, box[3] is height - - # Create a Rectangle patch - for box in boxes: - assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height" - class_pred = box[0] - box = box[2:] - upper_left_x = box[0] - box[2] / 2 - upper_left_y = box[1] - box[3] / 2 - rect = patches.Rectangle( - (upper_left_x * width, upper_left_y * height), - box[2] * width, - box[3] * height, - linewidth=2, - edgecolor=colors[int(class_pred)], - facecolor="none", - ) - # Add the patch to the Axes - ax.add_patch(rect) - plt.text( - upper_left_x * width, - upper_left_y * height, - s=class_labels[int(class_pred)], - color="white", - verticalalignment="top", - bbox={"color": colors[int(class_pred)], "pad": 0}, - ) - - if return_fig: - return fig - plt.show() - - -def get_evaluation_bboxes( - loader, - model, - iou_threshold, - anchors, - threshold, - box_format="midpoint", -): - # make sure model is in eval before get bboxes - model.eval() - train_idx = 0 - all_pred_boxes = [] - all_true_boxes = [] - for batch_idx, (x, labels) in enumerate(tqdm(loader)): - x = x.to(config.DEVICE) - with torch.no_grad(): - predictions = model(x) - - batch_size = x.shape[0] - bboxes = [[] for _ in range(batch_size)] - for i in range(3): - S = predictions[i].shape[2] - anchor = torch.tensor([*anchors[i]]) * S - boxes_scale_i = cells_to_bboxes( - predictions[i], anchor, S=S, is_preds=True - ) - for idx, (box) in enumerate(boxes_scale_i): - bboxes[idx] += box - - # we just want one bbox for each label, not one for each scale - true_bboxes = cells_to_bboxes( - labels[2], anchor, S=S, is_preds=False - ) - - for idx in range(batch_size): - nms_boxes = non_max_suppression( - bboxes[idx], - iou_threshold=iou_threshold, - threshold=threshold, - box_format=box_format, - ) - - for nms_box in nms_boxes: - all_pred_boxes.append([train_idx] + nms_box) - - for box in true_bboxes[idx]: - if box[1] > threshold: - all_true_boxes.append([train_idx] + box) - - train_idx += 1 - - model.train() - return all_pred_boxes, all_true_boxes - - -def cells_to_bboxes(predictions, anchors, S, is_preds=True): - """ - Scales the predictions coming from the model to - be relative to the entire image such that they for example later - can be plotted or. - INPUT: - predictions: tensor of size (N, 3, S, S, num_classes+5) - anchors: the anchors used for the predictions - S: the number of cells the image is divided in on the width (and height) - is_preds: whether the input is predictions or the true bounding boxes - OUTPUT: - converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index, - object score, bounding box coordinates - """ - BATCH_SIZE = predictions.shape[0] - num_anchors = len(anchors) - box_predictions = predictions[..., 1:5] - if is_preds: - anchors = anchors.reshape(1, len(anchors), 1, 1, 2) - box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2]) - box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors - scores = torch.sigmoid(predictions[..., 0:1]) - best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1) - else: - scores = predictions[..., 0:1] - best_class = predictions[..., 5:6] - - cell_indices = ( - torch.arange(S) - .repeat(predictions.shape[0], 3, S, 1) - .unsqueeze(-1) - ).to(config.DEVICE) - x = 1 / S * (box_predictions[..., 0:1] + cell_indices) - y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4)) - w_h = 1 / S * box_predictions[..., 2:4] - converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1)\ - .reshape(BATCH_SIZE, num_anchors * S * S, 6) - return converted_bboxes.tolist() - -def check_class_accuracy(model, batch, threshold, tag='train'): - model.eval() - tot_class_preds, correct_class = 0, 0 - tot_noobj, correct_noobj = 0, 0 - tot_obj, correct_obj = 0, 0 - x, y = batch - - x = x.to(config.DEVICE) - - with torch.no_grad(): - out = model(x) - - for i in range(3): - y[i] = y[i].to(config.DEVICE) - obj = y[i][..., 0] == 1 # in paper this is Iobj_i - noobj = y[i][..., 0] == 0 # in paper this is Iobj_i - - correct_class += torch.sum( - torch.argmax(out[i][..., 5:][obj], dim=-1) == y[i][..., 5][obj] - ) - tot_class_preds += torch.sum(obj) - - obj_preds = torch.sigmoid(out[i][..., 0]) > threshold - correct_obj += torch.sum(obj_preds[obj] == y[i][..., 0][obj]) - tot_obj += torch.sum(obj) - correct_noobj += torch.sum(obj_preds[noobj] == y[i][..., 0][noobj]) - tot_noobj += torch.sum(noobj) - - ans = { - f"{tag}_class_accuracy": (correct_class/(tot_class_preds+1e-16))*100, - f"{tag}_no_obj_accuracy": (correct_noobj/(tot_noobj+1e-16))*100, - f"{tag}_obj_accuracy": (correct_obj/(tot_obj+1e-16))*100 - } - model.train() - return ans - - - -def get_mean_std(loader): - # var[X] = E[X**2] - E[X]**2 - channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0 - - for data, _ in tqdm(loader): - channels_sum += torch.mean(data, dim=[0, 2, 3]) - channels_sqrd_sum += torch.mean(data ** 2, dim=[0, 2, 3]) - num_batches += 1 - - mean = channels_sum / num_batches - std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5 - - return mean, std - - -def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"): - print("=> Saving checkpoint") - checkpoint = { - "state_dict": model.state_dict(), - "optimizer": optimizer.state_dict(), - } - torch.save(checkpoint, filename) - - -def load_checkpoint(checkpoint_file, model, optimizer, lr, device): - print("=> Loading checkpoint") - checkpoint = torch.load(checkpoint_file, map_location=device) - model.load_state_dict(checkpoint["state_dict"]) - optimizer.load_state_dict(checkpoint["optimizer"]) - - # If we don't do this then it will just have learning rate of old checkpoint - # and it will lead to many hours of debugging \: - for param_group in optimizer.param_groups: - param_group["lr"] = lr - -def plot_couple_examples(model, batch, thresh, iou_thresh, anchors): - model.eval() - x, _ = batch - x = x.to(config.DEVICE) - batch_size = x.shape[0] - with torch.no_grad(): - out = model(x) - bboxes = [[] for _ in range(x.shape[0])] - for i in range(3): - batch_size, _, S, _, _ = out[i].shape - anchor = anchors[i] - boxes_scale_i = cells_to_bboxes( - out[i], anchor, S=S, is_preds=True - ) - for idx, (box) in enumerate(boxes_scale_i): - bboxes[idx] += box - - model.train() - - for i in range(batch_size//4): - nms_boxes = non_max_suppression( - bboxes[i], iou_threshold=iou_thresh, - threshold=thresh, box_format="midpoint", - ) - plot_image(x[i].permute(1,2,0).detach().cpu(), nms_boxes) - - - -def seed_everything(seed=42): - os.environ['PYTHONHASHSEED'] = str(seed) - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def clip_coords(boxes, img_shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - -def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): - # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x - y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y - y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x - y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y - return y - - -def xyn2xy(x, w=640, h=640, padw=0, padh=0): - # Convert normalized segments into pixel segments, shape (n,2) - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[..., 0] = w * x[..., 0] + padw # top left x - y[..., 1] = h * x[..., 1] + padh # top left y - return y - -def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - if clip: - clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center - y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center - y[..., 2] = (x[..., 2] - x[..., 0]) / w # width - y[..., 3] = (x[..., 3] - x[..., 1]) / h # height - return y - -def clip_boxes(boxes, shape): - # Clip boxes (xyxy) to image shape (height, width) - if isinstance(boxes, torch.Tensor): # faster individually - boxes[..., 0].clamp_(0, shape[1]) # x1 - boxes[..., 1].clamp_(0, shape[0]) # y1 - boxes[..., 2].clamp_(0, shape[1]) # x2 - boxes[..., 3].clamp_(0, shape[0]) # y2 - else: # np.array (faster grouped) - boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 - diff --git a/spaces/vialibre/edia_full_es/README.md b/spaces/vialibre/edia_full_es/README.md deleted file mode 100644 index cee6c09716dddfdb313179ad912852c68ff533b5..0000000000000000000000000000000000000000 --- a/spaces/vialibre/edia_full_es/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Edia Full Es -emoji: 👁 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vonbarnekowa/stable-diffusion/ldm/data/util.py b/spaces/vonbarnekowa/stable-diffusion/ldm/data/util.py deleted file mode 100644 index 5b60ceb2349e3bd7900ff325740e2022d2903b1c..0000000000000000000000000000000000000000 --- a/spaces/vonbarnekowa/stable-diffusion/ldm/data/util.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch - -from ldm.modules.midas.api import load_midas_transform - - -class AddMiDaS(object): - def __init__(self, model_type): - super().__init__() - self.transform = load_midas_transform(model_type) - - def pt2np(self, x): - x = ((x + 1.0) * .5).detach().cpu().numpy() - return x - - def np2pt(self, x): - x = torch.from_numpy(x) * 2 - 1. - return x - - def __call__(self, sample): - # sample['jpg'] is tensor hwc in [-1, 1] at this point - x = self.pt2np(sample['jpg']) - x = self.transform({"image": x})["image"] - sample['midas_in'] = x - return sample \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/conv.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/conv.py deleted file mode 100644 index cf54491997a48ac3e7fadc4183ab7bf3e831024c..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/conv.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch import nn - -from .registry import CONV_LAYERS - -CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d) -CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d) -CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d) -CONV_LAYERS.register_module('Conv', module=nn.Conv2d) - - -def build_conv_layer(cfg, *args, **kwargs): - """Build convolution layer. - - Args: - cfg (None or dict): The conv layer config, which should contain: - - type (str): Layer type. - - layer args: Args needed to instantiate an conv layer. - args (argument list): Arguments passed to the `__init__` - method of the corresponding conv layer. - kwargs (keyword arguments): Keyword arguments passed to the `__init__` - method of the corresponding conv layer. - - Returns: - nn.Module: Created conv layer. - """ - if cfg is None: - cfg_ = dict(type='Conv2d') - else: - if not isinstance(cfg, dict): - raise TypeError('cfg must be a dict') - if 'type' not in cfg: - raise KeyError('the cfg dict must contain the key "type"') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in CONV_LAYERS: - raise KeyError(f'Unrecognized norm type {layer_type}') - else: - conv_layer = CONV_LAYERS.get(layer_type) - - layer = conv_layer(*args, **kwargs, **cfg_) - - return layer diff --git a/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/utils.py b/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/utils.py deleted file mode 100644 index 6917d28298ddff6d37f89ac04dfb1d135dbc8406..0000000000000000000000000000000000000000 --- a/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import re -import pandas as pd -from urllib.parse import urlparse - -import logging -logging.basicConfig( - format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S" -) -logger = logging.getLogger(__name__) - - -class UTILS: - def __init__(self): - pass - - - def split_text( - self, - text - ): - text = text.split(',') - text = [t.strip() for t in text] - return text - - - def replace_newlines_and_spaces( - self, - text - ): - # Replace all newline characters with spaces - text = text.replace("\n", " ") - # Replace multiple spaces with a single space - text = re.sub(r'\s+', ' ', text) - return text - - - def clean_df( - self, - df, - dropna=True, - fillna=False - ): - if fillna: - df.fillna('', inplace=True) - if dropna: - df.dropna(inplace=True) - # df = df[~df.isna()] - df = df.drop_duplicates().reset_index(drop=True) - return df - - - def validate_url_format( - self, - urls, - url_type='urls' - ): - valid_urls = [] - for url in urls: - result = urlparse(url) - # Check if the url is valid - if all([result.scheme, result.netloc]): - # Online PDF urls should end with .pdf extension - if url_type == 'online_pdf' and not url.endswith('.pdf'): - continue - valid_urls.append(url) - logging.info(f'Valid URLs are: {valid_urls}') - return valid_urls diff --git a/spaces/webtest1s/testings/index.html b/spaces/webtest1s/testings/index.html deleted file mode 100644 index 4a9ad8c617cf541a73e3279bed0e363ea687c895..0000000000000000000000000000000000000000 --- a/spaces/webtest1s/testings/index.html +++ /dev/null @@ -1,8 +0,0 @@ - - -
              /discussions/1/merge"> - - -
              - - \ No newline at end of file diff --git a/spaces/widged/bart-generation/app.py b/spaces/widged/bart-generation/app.py deleted file mode 100644 index d1263fd0e43ee8adc72cc65e58ecb9f3bd06e142..0000000000000000000000000000000000000000 --- a/spaces/widged/bart-generation/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr -import transformers -from transformers import BartTokenizer, BartForConditionalGeneration -model_name = 'facebook/bart-large-cnn' -tokenizer = BartTokenizer.from_pretrained(model_name) -model = BartForConditionalGeneration.from_pretrained(model_name) -def summarize(inp): - inp = inp.replace('\n','') - inp = tokenizer.encode(inp, return_tensors='pt', max_length=1024) - summary_ids = model.generate(inp, num_beams=4, max_length=150, early_stopping=True) - summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) - return summary -gr.Interface(fn=summarize, inputs=gr.inputs.Textbox(lines=7, label="Input Text"), outputs="text").launch(inline=False) diff --git a/spaces/wuhuik/bingo/src/components/welcome-screen.tsx b/spaces/wuhuik/bingo/src/components/welcome-screen.tsx deleted file mode 100644 index f7449fcbb6c621875e235db98f2790bf7894fb0a..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/components/welcome-screen.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { useBing } from '@/lib/hooks/use-bing' - -const exampleMessages = [ - { - heading: '🧐 提出复杂问题', - message: `我可以为我挑剔的只吃橙色食物的孩子做什么饭?` - }, - { - heading: '🙌 获取更好的答案', - message: '销量最高的 3 种宠物吸尘器有哪些优点和缺点?' - }, - { - heading: '🎨 获得创意灵感', - message: `以海盗的口吻写一首关于外太空鳄鱼的俳句` - } -] - -export function WelcomeScreen({ setInput }: Pick, 'setInput'>) { - return ( -
              - {exampleMessages.map(example => ( - - ))} -
              - ) -} diff --git a/spaces/wwwwwwww2/bingo/src/components/chat-history.tsx b/spaces/wwwwwwww2/bingo/src/components/chat-history.tsx deleted file mode 100644 index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/components/chat-history.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons" - -export function ChatHistory() { - return ( -
              -
              - 历史记录 -
              -
              -
              -
              -
              -
              -
              - -
              -

              无标题的聊天

              -
              -

              上午1:42

              -
              - - - - - - - - -
              -
              -
              -
              -
              -
              -
              -
              - ) -} diff --git a/spaces/wy213/213a/src/components/chat-suggestions.tsx b/spaces/wy213/213a/src/components/chat-suggestions.tsx deleted file mode 100644 index 00c2fee295c9e010946046eb71705a5e131f7a5a..0000000000000000000000000000000000000000 --- a/spaces/wy213/213a/src/components/chat-suggestions.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import React, { useMemo } from 'react' -import Image from 'next/image' -import HelpIcon from '@/assets/images/help.svg' -import { SuggestedResponse } from '@/lib/bots/bing/types' -import { useBing } from '@/lib/hooks/use-bing' -import { atom, useAtom } from 'jotai' - -type Suggestions = SuggestedResponse[] -const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text })) -const suggestionsAtom = atom([]) - -type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions } - -export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) { - const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom) - const toggleSuggestions = (() => { - if (currentSuggestions === helpSuggestions) { - setSuggestions(suggestions) - } else { - setSuggestions(helpSuggestions) - } - }) - - useMemo(() => { - setSuggestions(suggestions) - window.scrollBy(0, 2000) - }, [suggestions.length]) - - return currentSuggestions?.length ? ( -
              -
              - - { - currentSuggestions.map(suggestion => ( - - )) - } -
              -
              - ) : null -} diff --git a/spaces/wydgg/bingo-wyd-ai/src/components/chat.tsx b/spaces/wydgg/bingo-wyd-ai/src/components/chat.tsx deleted file mode 100644 index a37ab1cc96ca2e6bfd9acbe313a8d946bfd5c3d4..0000000000000000000000000000000000000000 --- a/spaces/wydgg/bingo-wyd-ai/src/components/chat.tsx +++ /dev/null @@ -1,93 +0,0 @@ -'use client' - -import { useCallback, useEffect, useMemo, useState } from 'react' -import { useAtom } from 'jotai' -import Image from 'next/image' -import { cn } from '@/lib/utils' -import { ChatList } from '@/components/chat-list' -import { ChatPanel } from '@/components/chat-panel' -import { WelcomeScreen } from '@/components/welcome-screen' -import { ChatScrollAnchor } from '@/components/chat-scroll-anchor' -import { ToneSelector } from './tone-selector' -import { ChatHeader } from './chat-header' -import { ChatSuggestions } from './chat-suggestions' -import { bingConversationStyleAtom } from '@/state' -import { ButtonScrollToBottom } from '@/components/button-scroll-to-bottom' -import StopIcon from '@/assets/images/stop.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { ChatNotification } from './chat-notification' -import { Settings } from './settings' -import { ChatHistory } from './chat-history' - -export type ChatProps = React.ComponentProps<'div'> & { initialMessages?: ChatMessageModel[] } - -export default function Chat({ className }: ChatProps) { - - const [bingStyle, setBingStyle] = useAtom(bingConversationStyleAtom) - const { - messages, - sendMessage, - resetConversation, - stopGenerating, - setInput, - bot, - input, - generating, - isSpeaking, - uploadImage, - attachmentList, - setAttachmentList, - } = useBing() - - useEffect(() => { - window.scrollTo({ - top: document.body.offsetHeight, - behavior: 'smooth' - }) - }, []) - - return ( -
              - -
              - - - - {messages.length ? ( - <> - - - - - - {generating ? ( -
              - -
              - ) : null} - - ) : null} -
              - - -
              - ) -} diff --git a/spaces/wydgg/bingo-wyd-ai/src/lib/hooks/chat-history.ts b/spaces/wydgg/bingo-wyd-ai/src/lib/hooks/chat-history.ts deleted file mode 100644 index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000 --- a/spaces/wydgg/bingo-wyd-ai/src/lib/hooks/chat-history.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { zip } from 'lodash-es' -import { ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { Storage } from '../storage' - -/** - * conversations:$botId => Conversation[] - * conversation:$botId:$cid:messages => ChatMessageModel[] - */ - -interface Conversation { - id: string - createdAt: number -} - -type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] } - -async function loadHistoryConversations(botId: BotId): Promise { - const key = `conversations:${botId}` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -async function deleteHistoryConversation(botId: BotId, cid: string) { - const conversations = await loadHistoryConversations(botId) - const newConversations = conversations.filter((c) => c.id !== cid) - await Storage.set({ [`conversations:${botId}`]: newConversations }) -} - -async function loadConversationMessages(botId: BotId, cid: string): Promise { - const key = `conversation:${botId}:${cid}:messages` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) { - const conversations = await loadHistoryConversations(botId) - if (!conversations.some((c) => c.id === cid)) { - conversations.unshift({ id: cid, createdAt: Date.now() }) - await Storage.set({ [`conversations:${botId}`]: conversations }) - } - const key = `conversation:${botId}:${cid}:messages` - await Storage.set({ [key]: messages }) -} - -export async function loadHistoryMessages(botId: BotId): Promise { - const conversations = await loadHistoryConversations(botId) - const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id))) - return zip(conversations, messagesList).map(([c, messages]) => ({ - id: c!.id, - createdAt: c!.createdAt, - messages: messages!, - })) -} - -export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) { - const messages = await loadConversationMessages(botId, conversationId) - const newMessages = messages.filter((m) => m.id !== messageId) - await setConversationMessages(botId, conversationId, newMessages) - if (!newMessages.length) { - await deleteHistoryConversation(botId, conversationId) - } -} diff --git a/spaces/xfys/yolov5_tracking/yolov5/segment/train.py b/spaces/xfys/yolov5_tracking/yolov5/segment/train.py deleted file mode 100644 index 073fc742005b856992f853af2a54093c638d79b6..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/yolov5/segment/train.py +++ /dev/null @@ -1,666 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Train a YOLOv5 segment model on a segment dataset -Models and datasets download automatically from the latest YOLOv5 release. - -Usage - Single-GPU training: - $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) - $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 - -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data -""" - -import argparse -import math -import os -import random -import subprocess -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -import yaml -from torch.optim import lr_scheduler -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import segment.val as validate # for end-of-epoch mAP -from models.experimental import attempt_load -from models.yolo import SegmentationModel -from utils.autoanchor import check_anchors -from utils.autobatch import check_train_batch_size -from utils.callbacks import Callbacks -from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, - check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, - get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, - labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) -from utils.loggers import GenericLogger -from utils.plots import plot_evolve, plot_labels -from utils.segment.dataloaders import create_dataloader -from utils.segment.loss import ComputeLoss -from utils.segment.metrics import KEYS, fitness -from utils.segment.plots import plot_images_and_masks, plot_results_with_masks -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - smart_resume, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -GIT_INFO = check_git_info() - - -def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio - # callbacks.run('on_pretrain_routine_start') - - # Directories - w = save_dir / 'weights' # weights dir - (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / 'last.pt', w / 'best.pt' - - # Hyperparameters - if isinstance(hyp, str): - with open(hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - opt.hyp = hyp.copy() # for saving hyps to checkpoints - - # Save run settings - if not evolve: - yaml_save(save_dir / 'hyp.yaml', hyp) - yaml_save(save_dir / 'opt.yaml', vars(opt)) - - # Loggers - data_dict = None - if RANK in {-1, 0}: - logger = GenericLogger(opt=opt, console_logger=LOGGER) - - # Config - plots = not evolve and not opt.noplots # create plots - overlap = not opt.no_overlap - cuda = device.type != 'cpu' - init_seeds(opt.seed + 1 + RANK, deterministic=True) - with torch_distributed_zero_first(LOCAL_RANK): - data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict['train'], data_dict['val'] - nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset - - # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') - if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report - else: - model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - amp = check_amp(model) # check AMP - - # Freeze - freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze - for k, v in model.named_parameters(): - v.requires_grad = True # train all layers - # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) - if any(x in k for x in freeze): - LOGGER.info(f'freezing {k}') - v.requires_grad = False - - # Image size - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple - - # Batch size - if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size - batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({'batch_size': batch_size}) - # loggers.on_params_update({"batch_size": batch_size}) - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) - - # Scheduler - if opt.cos_lr: - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # Resume - best_fitness, start_epoch = 0.0, 0 - if pretrained: - if resume: - best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) - del ckpt, csd - - # DP mode - if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning( - 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' - ) - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and RANK != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') - - # Trainloader - train_loader, dataset = create_dataloader( - train_path, - imgsz, - batch_size // WORLD_SIZE, - gs, - single_cls, - hyp=hyp, - augment=True, - cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, - rank=LOCAL_RANK, - workers=workers, - image_weights=opt.image_weights, - quad=opt.quad, - prefix=colorstr('train: '), - shuffle=True, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - ) - labels = np.concatenate(dataset.labels, 0) - mlc = int(labels[:, 0].max()) # max label class - assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' - - # Process 0 - if RANK in {-1, 0}: - val_loader = create_dataloader(val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - prefix=colorstr('val: '))[0] - - if not resume: - if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor - model.half().float() # pre-reduce anchor precision - - if plots: - plot_labels(labels, names, save_dir) - # callbacks.run('on_pretrain_routine_end', labels, names) - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Model attributes - nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3 / nl # scale to layers - hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp['label_smoothing'] = opt.label_smoothing - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights - model.names = names - - # Start training - t0 = time.time() - nb = len(train_loader) # number of batches - nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - last_opt_step = -1 - maps = np.zeros(nc) # mAP per class - results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) - stopper, stop = EarlyStopping(patience=opt.patience), False - compute_loss = ComputeLoss(model, overlap=overlap) # init loss class - # callbacks.run('on_train_start') - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...') - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - # callbacks.run('on_train_epoch_start') - model.train() - - # Update image weights (optional, single-GPU only) - if opt.image_weights: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - - # Update mosaic border (optional) - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(4, device=device) # mean losses - if RANK != -1: - train_loader.sampler.set_epoch(epoch) - pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%11s' * 8) % - ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) - if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar - optimizer.zero_grad() - for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ - # callbacks.run('on_train_batch_start') - ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) - - # Multi-scale - if opt.multi_scale: - sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - - # Forward - with torch.cuda.amp.autocast(amp): - pred = model(imgs) # forward - loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) - if RANK != -1: - loss *= WORLD_SIZE # gradient averaged between devices in DDP mode - if opt.quad: - loss *= 4. - - # Backward - scaler.scale(loss).backward() - - # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html - if ni - last_opt_step >= accumulate: - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - last_opt_step = ni - - # Log - if RANK in {-1, 0}: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % - (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) - # if callbacks.stop_training: - # return - - # Mosaic plots - if plots: - if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') - if ni == 10: - files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, 'Mosaics', epoch) - # end batch ------------------------------------------------------------------------------------------------ - - # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for loggers - scheduler.step() - - if RANK in {-1, 0}: - # mAP - # callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) - final_epoch = (epoch + 1 == epochs) or stopper.possible_stop - if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap) - - # Update best mAP - fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - stop = stopper(epoch=epoch, fitness=fi) # early stop check - if fi > best_fitness: - best_fitness = fi - log_vals = list(mloss) + list(results) + lr - # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) - # Log val metrics and media - metrics_dict = dict(zip(KEYS, log_vals)) - logger.log_metrics(metrics_dict, epoch) - - # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f'epoch{epoch}.pt') - logger.log_model(w / f'epoch{epoch}.pt') - del ckpt - # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) - - # EarlyStopping - if RANK != -1: # if DDP training - broadcast_list = [stop if RANK == 0 else None] - dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks - if RANK != 0: - stop = broadcast_list[0] - if stop: - break # must break all DDP ranks - - # end epoch ---------------------------------------------------------------------------------------------------- - # end training ----------------------------------------------------------------------------------------------------- - if RANK in {-1, 0}: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if f is best: - LOGGER.info(f'\nValidating {f}...') - results, _, _ = validate.run( - data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=plots, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap) # val best model with plots - if is_coco: - # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) - logger.log_metrics(metrics_dict, epoch) - - # callbacks.run('on_train_end', last, best, epoch, results) - # on train end callback using genericLogger - logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) - if not opt.evolve: - logger.log_model(best, epoch) - if plots: - plot_results_with_masks(file=save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] - files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, 'Results', epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) - torch.cuda.empty_cache() - return results - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=100, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Instance Segmentation Args - parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') - parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt, callbacks=Callbacks()): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # Resume - if opt.resume and not opt.evolve: # resume from specified or most recent last.pt - last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) - opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml - opt_data = opt.data # original dataset - if opt_yaml.is_file(): - with open(opt_yaml, errors='ignore') as f: - d = yaml.safe_load(f) - else: - d = torch.load(last, map_location='cpu')['opt'] - opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate - if is_url(opt_data): - opt.data = check_file(opt_data) # avoid HUB resume auth timeout - else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ - check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - if opt.evolve: - if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg - opt.project = str(ROOT / 'runs/evolve-seg') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - if opt.name == 'cfg': - opt.name = Path(opt.cfg).stem # use model.yaml as name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' - assert not opt.image_weights, f'--image-weights {msg}' - assert not opt.evolve, f'--evolve {msg}' - assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') - - # Train - if not opt.evolve: - train(opt.hyp, opt, device, callbacks) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = { - 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) - - with open(opt.hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - if 'anchors' not in hyp: # anchors commented in hyp.yaml - hyp['anchors'] = 3 - if opt.noautoanchor: - del hyp['anchors'], meta['anchors'] - opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' - if opt.bucket: - # download evolve.csv if exists - subprocess.run([ - 'gsutil', - 'cp', - f'gs://{opt.bucket}/evolve.csv', - str(evolve_csv),]) - - for _ in range(opt.evolve): # generations to evolve - if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 12] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, callbacks) - callbacks = Callbacks() - # Write mutation results - print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket) - - # Plot results - plot_evolve(evolve_csv) - LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}') - - -def run(**kwargs): - # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == '__main__': - opt = parse_opt() - main(opt) diff --git a/spaces/xfys/yolov5_tracking/yolov5/utils/aws/userdata.sh b/spaces/xfys/yolov5_tracking/yolov5/utils/aws/userdata.sh deleted file mode 100644 index 5fc1332ac1b0d1794cf8f8c5f6918059ae5dc381..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/yolov5/utils/aws/userdata.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html -# This script will run only once on first instance start (for a re-start script see mime.sh) -# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir -# Use >300 GB SSD - -cd home/ubuntu -if [ ! -d yolov5 ]; then - echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 - cd yolov5 - bash data/scripts/get_coco.sh && echo "COCO done." & - sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & - python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & - wait && echo "All tasks done." # finish background tasks -else - echo "Running re-start script." # resume interrupted runs - i=0 - list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' - while IFS= read -r id; do - ((i++)) - echo "restarting container $i: $id" - sudo docker start $id - # sudo docker exec -it $id python train.py --resume # single-GPU - sudo docker exec -d $id python utils/aws/resume.py # multi-scenario - done <<<"$list" -fi diff --git a/spaces/xfys/yolov5_tracking/yolov5/utils/loggers/comet/hpo.py b/spaces/xfys/yolov5_tracking/yolov5/utils/loggers/comet/hpo.py deleted file mode 100644 index fc49115c13581554bebe1ddddaf3d5e10caaae07..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/yolov5/utils/loggers/comet/hpo.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import json -import logging -import os -import sys -from pathlib import Path - -import comet_ml - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - -# Project Configuration -config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') - - -def get_args(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - - # Comet Arguments - parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') - parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') - parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') - parser.add_argument('--comet_optimizer_workers', - type=int, - default=1, - help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} - - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get('batch_size') - opt.epochs = parameters.get('epochs') - - device = select_device(opt.device, batch_size=opt.batch_size) - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == '__main__': - opt = get_args(known=True) - - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.project = str(opt.project) - - optimizer_id = os.getenv('COMET_OPTIMIZER_ID') - if optimizer_id is None: - with open(opt.comet_optimizer_config) as f: - optimizer_config = json.load(f) - optimizer = comet_ml.Optimizer(optimizer_config) - else: - optimizer = comet_ml.Optimizer(optimizer_id) - - opt.comet_optimizer_id = optimizer.id - status = optimizer.status() - - opt.comet_optimizer_objective = status['spec']['objective'] - opt.comet_optimizer_metric = status['spec']['metric'] - - logger.info('COMET INFO: Starting Hyperparameter Sweep') - for parameter in optimizer.get_parameters(): - run(parameter['parameters'], opt) diff --git a/spaces/xiao2023/DeepDanbooru_string/app.py b/spaces/xiao2023/DeepDanbooru_string/app.py deleted file mode 100644 index 49019837c9207cc68cb37be0342f3bc44fd0decb..0000000000000000000000000000000000000000 --- a/spaces/xiao2023/DeepDanbooru_string/app.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import argparse -import functools -import os -import html -import pathlib -import tarfile - -import deepdanbooru as dd -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image -import tensorflow as tf -import piexif -import piexif.helper - -TITLE = 'DeepDanbooru String' - -TOKEN = os.environ['TOKEN'] -MODEL_REPO = 'CikeyQI/DeepDanbooru_string' -MODEL_FILENAME = 'model-resnet_custom_v3.h5' -LABEL_FILENAME = 'tags.txt' - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--score-slider-step', type=float, default=0.05) - parser.add_argument('--score-threshold', type=float, default=0.5) - parser.add_argument('--theme', type=str, default='dark-grass') - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - return parser.parse_args() - - -def load_sample_image_paths() -> list[pathlib.Path]: - image_dir = pathlib.Path('images') - if not image_dir.exists(): - dataset_repo = 'hysts/sample-images-TADNE' - path = huggingface_hub.hf_hub_download(dataset_repo, - 'images.tar.gz', - repo_type='dataset', - use_auth_token=TOKEN) - with tarfile.open(path) as f: - f.extractall() - return sorted(image_dir.glob('*')) - - -def load_model() -> tf.keras.Model: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - MODEL_FILENAME, - use_auth_token=TOKEN) - model = tf.keras.models.load_model(path) - return model - - -def load_labels() -> list[str]: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - LABEL_FILENAME, - use_auth_token=TOKEN) - with open(path) as f: - labels = [line.strip() for line in f.readlines()] - return labels - -def plaintext_to_html(text): - text = "

              " + "
              \n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

              " - return text - -def predict(image: PIL.Image.Image, score_threshold: float, - model: tf.keras.Model, labels: list[str]) -> dict[str, float]: - rawimage = image - _, height, width, _ = model.input_shape - image = np.asarray(image) - image = tf.image.resize(image, - size=(height, width), - method=tf.image.ResizeMethod.AREA, - preserve_aspect_ratio=True) - image = image.numpy() - image = dd.image.transform_and_pad_image(image, width, height) - image = image / 255. - probs = model.predict(image[None, ...])[0] - probs = probs.astype(float) - res = dict() - for prob, label in zip(probs.tolist(), labels): - if prob < score_threshold: - continue - res[label] = prob - b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True)) - a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)') - c = ', '.join(list(b.keys())) - - items = rawimage.info - geninfo = '' - - if "exif" in rawimage.info: - exif = piexif.load(rawimage.info["exif"]) - exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') - try: - exif_comment = piexif.helper.UserComment.load(exif_comment) - except ValueError: - exif_comment = exif_comment.decode('utf8', errors="ignore") - - items['exif comment'] = exif_comment - geninfo = exif_comment - - for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', - 'loop', 'background', 'timestamp', 'duration']: - items.pop(field, None) - - geninfo = items.get('parameters', geninfo) - - info = f""" -

              PNG Info

              -""" - for key, text in items.items(): - info += f""" -
              -

              {plaintext_to_html(str(key))}

              -

              {plaintext_to_html(str(text))}

              -
              -""".strip()+"\n" - - if len(info) == 0: - message = "Nothing found in the image." - info = f"

              {message}

              " - - return (a,c,res,info) - - -def main(): - args = parse_args() - model = load_model() - labels = load_labels() - - func = functools.partial(predict, model=model, labels=labels) - func = functools.update_wrapper(func, predict) - - gr.Interface( - func, - [ - gr.inputs.Image(type='pil', label='Input'), - gr.inputs.Slider(0, - 1, - step=args.score_slider_step, - default=args.score_threshold, - label='Score Threshold'), - ], - [ - gr.outputs.Textbox(label='Output (string)'), - gr.outputs.Textbox(label='Output (raw string)'), - gr.outputs.Label(label='Output (label)'), - gr.outputs.HTML() - ], - examples=[ - ['miku.jpg',0.5], - ['miku2.jpg',0.5] - ], - title=TITLE, - description=''' -Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer. - -Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) - -PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - ''', - theme=args.theme, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/xin/PatentSolver/App/bin/FiguresCleaner.py b/spaces/xin/PatentSolver/App/bin/FiguresCleaner.py deleted file mode 100644 index 7831b304e944690535d3d5d6e3c61e12d7513b89..0000000000000000000000000000000000000000 --- a/spaces/xin/PatentSolver/App/bin/FiguresCleaner.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import nltk -import json - -from App.bin import constants - - -class FiguresCleaner(object): - - def __init__(self, sections): - self.sections = sections - - def clean_figures(self): - sections = self.sections - clean_content = [] - with open(constants.ASSETS + "wordAfterNumber", 'r') as l: - after_words = l.read().splitlines() - after_words_patterns = re.compile('|'.join(after_words)) - with open(constants.ASSETS + "wordBeforeNumber", 'r') as l: - before_words = l.read().splitlines() - before_words_patterns = re.compile('|'.join(before_words)) - - #sections = sections.splitlines() - words = nltk.word_tokenize(sections) - tagged_words = nltk.pos_tag(words) - for i in range(len(tagged_words)): - if i < len(tagged_words) - 1: - next_word = tagged_words[i + 1][0] - current_word = tagged_words[i][0] - previous_word = tagged_words[i - 1][0] - currentWordTag = tagged_words[i][1] - if currentWordTag == 'CD' and not re.match(after_words_patterns, - next_word) is not None and not re.match( - before_words_patterns, previous_word) is not None: - if re.search(r'\d', current_word) is not None: - continue - else: - clean_content.append(current_word + " ") - else: - clean_content.append("\n") - - return clean_content \ No newline at end of file diff --git a/spaces/xin/PatentSolver/App/bin/TechnologyFinder.py b/spaces/xin/PatentSolver/App/bin/TechnologyFinder.py deleted file mode 100644 index 4edd348a4d31e266798aa463c0f496fb2c315bd4..0000000000000000000000000000000000000000 --- a/spaces/xin/PatentSolver/App/bin/TechnologyFinder.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -* -import sys -import os -import math -import re - -from App.bin import constants - -from textblob import TextBlob as tb - -class TechnologyFinder(object): - - def __init__(self, corpus): - self.corpus = corpus - - print("Extracting technologies") - - def last_cleansing(self, tech): - tech = str(tech) - tech = re.sub(r'\s?\bcomprises\b', '', tech) - return tech - - def get_technologies(self): - - corpus = self.corpus - - technologies = [] - def tf(word, blob): - return (float)(blob.noun_phrases.count(word)) / (float)(len(blob.noun_phrases)) - - def n_containing(word, bloblist): - return sum(1 for blob in bloblist if word in blob.noun_phrases) - - def idf(word, bloblist): - return math.log(len(bloblist) / (float)(1 + n_containing(word, bloblist))) - - def tfidf(word, blob, bloblist): - return tf(word, blob) * idf(word, bloblist) - - stopwords = open(constants.ASSETS+'stopwords', 'r').read().split('\r\n') - bloblist = [] - filenamelist = [] - - for filepath,patent in corpus.items(): - - filename = os.path.basename(os.path.normpath(filepath)) - #name, extension = filename.split('.') - filenamelist.append(filepath) - - filteredtext = [t for t in patent if t.lower() not in stopwords] - filteredcontent = ''.join(filteredtext) - blob = tb(filteredcontent.lower()) - bloblist.append(blob) - - for i, blob in enumerate(bloblist): - filename = [] - technologies.append(filename) - scores = {word: tfidf(word, blob, bloblist) for word in blob.noun_phrases} - sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True) - for word, score in sorted_words[:6]: - word = self.last_cleansing(word) - print("techologies found") - filename.append(word) - - technologies_list = dict(zip(filenamelist, technologies)) - return technologies_list - diff --git a/spaces/yangogo/bingo/next.config.js b/spaces/yangogo/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/TrackList/InstrumentName.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/TrackList/InstrumentName.tsx deleted file mode 100644 index 1f2b21b2db17492ad475e364d4df603a10454bc8..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/TrackList/InstrumentName.tsx +++ /dev/null @@ -1,345 +0,0 @@ -import { observer } from "mobx-react-lite" -import { FC } from "react" -import Track from "../../../common/track/Track" -import { Localized } from "../../../components/Localized" - -export const InstrumentName: FC<{ programNumber: number | undefined }> = ({ - programNumber, -}) => { - switch (programNumber) { - case 0: - return ( - - Acoustic Grand Piano - - ) - case 1: - return ( - - Bright Acoustic Piano - - ) - case 2: - return ( - - Electric Grand Piano - - ) - case 3: - return Honky-tonk Piano - case 4: - return Electric Piano 1 - case 5: - return Electric Piano 2 - case 6: - return Harpsichord - case 7: - return Clavinet - case 8: - return Celesta - case 9: - return Glockenspiel - case 10: - return Music Box - case 11: - return Vibraphone - case 12: - return Marimba - case 13: - return Xylophone - case 14: - return Tubular Bells - case 15: - return Dulcimer - case 16: - return Drawbar Organ - case 17: - return Percussive Organ - case 18: - return Rock Organ - case 19: - return Church Organ - case 20: - return Reed Organ - case 21: - return Accordion - case 22: - return Harmonica - case 23: - return Tango Accordion - case 24: - return ( - - Acoustic Guitar (nylon) - - ) - case 25: - return ( - - Acoustic Guitar (steel) - - ) - case 26: - return ( - - Electric Guitar (jazz) - - ) - case 27: - return ( - - Electric Guitar (clean) - - ) - case 28: - return ( - - Electric Guitar (muted) - - ) - case 29: - return ( - Overdriven Guitar - ) - case 30: - return ( - Distortion Guitar - ) - case 31: - return Guitar Harmonics - case 32: - return Acoustic Bass - case 33: - return ( - - Electric Bass (finger) - - ) - case 34: - return ( - - Electric Bass (pick) - - ) - case 35: - return Fretless Bass - case 36: - return Slap Bass 1 - case 37: - return Slap Bass 2 - case 38: - return Synth Bass 1 - case 39: - return Synth Bass 2 - case 40: - return Violin - case 41: - return Viola - case 42: - return Cello - case 43: - return Contrabass - case 44: - return Tremolo Strings - case 45: - return ( - Pizzicato Strings - ) - case 46: - return Orchestral Harp - case 47: - return Timpani - case 48: - return ( - String Ensemble 1 - ) - case 49: - return ( - String Ensemble 2 - ) - case 50: - return Synth Strings 1 - case 51: - return Synth Strings 2 - case 52: - return Choir Aahs - case 53: - return Voice Oohs - case 54: - return Synth Choir - case 55: - return Orchestra Hit - case 56: - return Trumpet - case 57: - return Trombone - case 58: - return Tuba - case 59: - return Muted Trumpet - case 60: - return French Horn - case 61: - return Brass Section - case 62: - return Synth Brass 1 - case 63: - return Synth Brass 2 - case 64: - return Soprano Sax - case 65: - return Alto Sax - case 66: - return Tenor Sax - case 67: - return Baritone Sax - case 68: - return Oboe - case 69: - return English Horn - case 70: - return Bassoon - case 71: - return Clarinet - case 72: - return Piccolo - case 73: - return Flute - case 74: - return Recorder - case 75: - return Pan Flute - case 76: - return Blown Bottle - case 77: - return Shakuhachi - case 78: - return Whistle - case 79: - return Ocarina - case 80: - return Lead 1 (square) - case 81: - return ( - Lead 2 (sawtooth) - ) - case 82: - return ( - Lead 3 (calliope) - ) - case 83: - return Lead 4 (chiff) - case 84: - return Lead 5 (charang) - case 85: - return Lead 6 (voice) - case 86: - return Lead 7 (fifths) - case 87: - return ( - - Lead 8 (bass + lead) - - ) - case 88: - return Pad 1 (new age) - case 89: - return Pad 2 (warm) - case 90: - return ( - Pad 3 (polysynth) - ) - case 91: - return Pad 4 (choir) - case 92: - return Pad 5 (bowed) - case 93: - return Pad 6 (metallic) - case 94: - return Pad 7 (halo) - case 95: - return Pad 8 (sweep) - case 96: - return FX 1 (rain) - case 97: - return ( - FX 2 (soundtrack) - ) - case 98: - return FX 3 (crystal) - case 99: - return ( - FX 4 (atmosphere) - ) - case 100: - return ( - FX 5 (brightness) - ) - case 101: - return FX 6 (goblins) - case 102: - return FX 7 (echoes) - case 103: - return FX 8 (sci-fi) - case 104: - return Sitar - case 105: - return Banjo - case 106: - return Shamisen - case 107: - return Koto - case 108: - return Kalimba - case 109: - return Bagpipe - case 110: - return Fiddle - case 111: - return Shanai - case 112: - return Tinkle Bell - case 113: - return Agogo - case 114: - return Steel Drums - case 115: - return Woodblock - case 116: - return Taiko Drum - case 117: - return Melodic Tom - case 118: - return Synth Drum - case 119: - return Reverse Cymbal - case 120: - return ( - Guitar Fret Noise - ) - case 121: - return Breath Noise - case 122: - return Seashore - case 123: - return Bird Tweet - case 124: - return Telephone Ring - case 125: - return Helicopter - case 126: - return Applause - case 127: - return Gunshot - } - return <> -} - -export const TrackInstrumentName: FC<{ track: Track }> = observer( - ({ track }) => { - if (track.isRhythmTrack) { - return <>Standard Drum Kit - } - return - }, -) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/led/modeling_tf_led.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/led/modeling_tf_led.py deleted file mode 100644 index 879538bca76bf3712d1438b83b47363449de1e2e..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/led/modeling_tf_led.py +++ /dev/null @@ -1,2518 +0,0 @@ -# coding=utf-8 -# Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" TF 2.0 LED model.""" - - -from __future__ import annotations - -import random -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import numpy as np -import tensorflow as tf - -from ...activations_tf import get_tf_activation -from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions - -# Public API -from ...modeling_tf_utils import ( - TFModelInputType, - TFPreTrainedModel, - get_initializer, - keras_serializable, - unpack_inputs, -) -from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax -from ...utils import ( - ContextManagers, - ModelOutput, - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_led import LEDConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "allenai/led-base-16384" -_CONFIG_FOR_DOC = "LEDConfig" - - -LARGE_NEGATIVE = -1e8 - - -# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right -def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): - pad_token_id = tf.cast(pad_token_id, input_ids.dtype) - decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill( - (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) - ) - shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) - # replace possible -100 values in labels by `pad_token_id` - shifted_input_ids = tf.where( - shifted_input_ids == -100, - tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), - shifted_input_ids, - ) - - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) - - return shifted_input_ids - - -# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask -def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz = input_ids_shape[0] - tgt_len = input_ids_shape[1] - mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE - mask_cond = tf.range(shape_list(mask)[-1]) - - mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) - - if past_key_values_length > 0: - mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) - - return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) - - -# Copied from transformers.models.bart.modeling_tf_bart._expand_mask -def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - src_len = shape_list(mask)[1] - tgt_len = tgt_len if tgt_len is not None else src_len - one_cst = tf.constant(1.0) - mask = tf.cast(mask, dtype=one_cst.dtype) - expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) - - return (one_cst - expanded_mask) * LARGE_NEGATIVE - - -class TFLEDLearnedPositionalEmbedding(tf.keras.layers.Embedding): - """ - This module learns positional embeddings up to a fixed maximum size. - """ - - def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): - super().__init__(num_embeddings, embedding_dim, **kwargs) - - def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): - """Input is expected to be of size [bsz x seqlen].""" - seq_len = input_shape[1] - position_ids = tf.range(seq_len, delta=1, name="range") - position_ids += past_key_values_length - - return super().call(tf.cast(position_ids, dtype=tf.int32)) - - -# Copied from transformers.models.longformer.modeling_tf_longformer.TFLongformerSelfAttention with TFLongformer->TFLEDEncoder -class TFLEDEncoderSelfAttention(tf.keras.layers.Layer): - def __init__(self, config, layer_id, **kwargs): - super().__init__(**kwargs) - self.config = config - - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads}" - ) - - self.num_heads = config.num_attention_heads - self.head_dim = int(config.hidden_size / config.num_attention_heads) - self.embed_dim = config.hidden_size - self.query = tf.keras.layers.Dense( - self.embed_dim, - kernel_initializer=get_initializer(config.initializer_range), - name="query", - ) - self.key = tf.keras.layers.Dense( - self.embed_dim, - kernel_initializer=get_initializer(config.initializer_range), - name="key", - ) - self.value = tf.keras.layers.Dense( - self.embed_dim, - kernel_initializer=get_initializer(config.initializer_range), - name="value", - ) - - # separate projection layers for tokens with global attention - self.query_global = tf.keras.layers.Dense( - self.embed_dim, - kernel_initializer=get_initializer(config.initializer_range), - name="query_global", - ) - self.key_global = tf.keras.layers.Dense( - self.embed_dim, - kernel_initializer=get_initializer(config.initializer_range), - name="key_global", - ) - self.value_global = tf.keras.layers.Dense( - self.embed_dim, - kernel_initializer=get_initializer(config.initializer_range), - name="value_global", - ) - self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - self.global_dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) - self.layer_id = layer_id - attention_window = config.attention_window[self.layer_id] - - assert ( - attention_window % 2 == 0 - ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" - assert ( - attention_window > 0 - ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" - - self.one_sided_attn_window_size = attention_window // 2 - - def build(self, input_shape=None): - if not self.built: - with tf.name_scope("query_global"): - self.query_global.build((self.config.hidden_size,)) - with tf.name_scope("key_global"): - self.key_global.build((self.config.hidden_size,)) - with tf.name_scope("value_global"): - self.value_global.build((self.config.hidden_size,)) - super().build(input_shape) - - def call( - self, - inputs, - training=False, - ): - """ - LongformerSelfAttention expects *len(hidden_states)* to be multiple of *attention_window*. Padding to - *attention_window* happens in LongformerModel.forward to avoid redoing the padding on each layer. - - The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to: - - - -10000: no attention - - 0: local attention - - +10000: global attention - """ - # retrieve input args - ( - hidden_states, - attention_mask, - layer_head_mask, - is_index_masked, - is_index_global_attn, - is_global_attn, - ) = inputs - - # project hidden states - query_vectors = self.query(hidden_states) - key_vectors = self.key(hidden_states) - value_vectors = self.value(hidden_states) - batch_size, seq_len, embed_dim = shape_list(hidden_states) - - tf.debugging.assert_equal( - embed_dim, - self.embed_dim, - message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", - ) - - # normalize query - query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype)) - query_vectors = tf.reshape(query_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) - key_vectors = tf.reshape(key_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) - - # attn_probs = (batch_size, seq_len, num_heads, window*2+1) - attn_scores = self._sliding_chunks_query_key_matmul( - query_vectors, key_vectors, self.one_sided_attn_window_size - ) - - # values to pad for attention probs - remove_from_windowed_attention_mask = attention_mask != 0 - # cast to fp32/fp16 then replace 1's with -inf - float_mask = tf.cast(remove_from_windowed_attention_mask, dtype=query_vectors.dtype) * LARGE_NEGATIVE - - # diagonal mask with zeros everywhere and -inf inplace of padding - diagonal_mask = self._sliding_chunks_query_key_matmul( - tf.ones(shape_list(attention_mask)), - float_mask, - self.one_sided_attn_window_size, - ) - - # pad local attention probs - attn_scores += diagonal_mask - - tf.debugging.assert_equal( - shape_list(attn_scores), - [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], - message=( - f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," - f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" - ), - ) - - # compute global attn indices required through out forward fn - ( - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - ) = self._get_global_attn_indices(is_index_global_attn) - - # this function is only relevant for global attention - if is_global_attn: - attn_scores = self._concat_with_global_key_attn_probs( - attn_scores=attn_scores, - query_vectors=query_vectors, - key_vectors=key_vectors, - max_num_global_attn_indices=max_num_global_attn_indices, - is_index_global_attn_nonzero=is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, - ) - - attn_probs = stable_softmax(attn_scores, axis=-1) - - # softmax sometimes inserts NaN if all positions are masked, replace them with 0 - # Make sure to create a mask with the proper shape: - # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1] - # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1] - if is_global_attn: - masked_index = tf.tile( - is_index_masked[:, :, None, None], - (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1), - ) - else: - masked_index = tf.tile( - is_index_masked[:, :, None, None], - (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1), - ) - attn_probs = tf.where( - masked_index, - tf.zeros(shape_list(masked_index), dtype=attn_probs.dtype), - attn_probs, - ) - - if layer_head_mask is not None: - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) - - attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs - - # apply dropout - attn_probs = self.dropout(attn_probs, training=training) - value_vectors = tf.reshape(value_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) - - # if global attention, compute sum of global and local attn - - if is_global_attn: - attn_output = self._compute_attn_output_with_global_indices( - value_vectors=value_vectors, - attn_probs=attn_probs, - max_num_global_attn_indices=max_num_global_attn_indices, - is_index_global_attn_nonzero=is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, - ) - else: - attn_output = self._sliding_chunks_matmul_attn_probs_value( - attn_probs, value_vectors, self.one_sided_attn_window_size - ) - - tf.debugging.assert_equal( - shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size" - ) - - attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim)) - - # compute value for global attention and overwrite to attention output - if is_global_attn: - attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( - attn_output=attn_output, - hidden_states=hidden_states, - max_num_global_attn_indices=max_num_global_attn_indices, - layer_head_mask=layer_head_mask, - is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, - is_index_global_attn_nonzero=is_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, - is_index_masked=is_index_masked, - training=training, - ) - else: - # Leave attn_output unchanged - global_attn_probs = tf.zeros((batch_size, self.num_heads, max_num_global_attn_indices, seq_len)) - - # make sure that local attention probabilities are set to 0 for indices of global attn - # Make sure to create a mask with the proper shape: - # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1] - # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1] - if is_global_attn: - masked_global_attn_index = tf.tile( - is_index_global_attn[:, :, None, None], - (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1), - ) - else: - masked_global_attn_index = tf.tile( - is_index_global_attn[:, :, None, None], - (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1), - ) - attn_probs = tf.where( - masked_global_attn_index, - tf.zeros(shape_list(masked_global_attn_index), dtype=attn_probs.dtype), - attn_probs, - ) - - outputs = (attn_output, attn_probs, global_attn_probs) - - return outputs - - def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): - """ - Matrix multiplication of query and key tensors using with a sliding window attention pattern. This - implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an - overlap of size window_overlap - """ - batch_size, seq_len, num_heads, head_dim = shape_list(query) - - tf.debugging.assert_equal( - seq_len % (window_overlap * 2), - 0, - message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", - ) - tf.debugging.assert_equal( - shape_list(query), - shape_list(key), - message=( - f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" - f" {shape_list(key)}" - ), - ) - - chunks_count = seq_len // window_overlap - 1 - - # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 - query = tf.reshape( - tf.transpose(query, (0, 2, 1, 3)), - (batch_size * num_heads, seq_len, head_dim), - ) - key = tf.reshape(tf.transpose(key, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim)) - chunked_query = self._chunk(query, window_overlap) - chunked_key = self._chunk(key, window_overlap) - - # matrix multiplication - # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim - # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim - # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap - chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype) - chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query, chunked_key) # multiply - - # convert diagonals into columns - paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]]) - diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(chunked_attention_scores, paddings) - - # allocate space for the overall attention matrix where the chunks are combined. The last dimension - # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to - # window_overlap previous words). The following column is attention score from each word to itself, then - # followed by window_overlap columns for the upper triangle. - - # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions - # - copying the main diagonal and the upper triangle - # TODO: This code is most likely not very efficient and should be improved - diagonal_attn_scores_up_triang = tf.concat( - [ - diagonal_chunked_attention_scores[:, :, :window_overlap, : window_overlap + 1], - diagonal_chunked_attention_scores[:, -1:, window_overlap:, : window_overlap + 1], - ], - axis=1, - ) - - # - copying the lower triangle - diagonal_attn_scores_low_triang = tf.concat( - [ - tf.zeros( - (batch_size * num_heads, 1, window_overlap, window_overlap), - dtype=diagonal_chunked_attention_scores.dtype, - ), - diagonal_chunked_attention_scores[:, :, -(window_overlap + 1) : -1, window_overlap + 1 :], - ], - axis=1, - ) - diagonal_attn_scores_first_chunk = tf.concat( - [ - tf.roll( - diagonal_chunked_attention_scores, - shift=[1, window_overlap], - axis=[2, 3], - )[:, :, :window_overlap, :window_overlap], - tf.zeros( - (batch_size * num_heads, 1, window_overlap, window_overlap), - dtype=diagonal_chunked_attention_scores.dtype, - ), - ], - axis=1, - ) - first_chunk_mask = ( - tf.tile( - tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None], - (batch_size * num_heads, 1, window_overlap, window_overlap), - ) - < 1 - ) - diagonal_attn_scores_low_triang = tf.where( - first_chunk_mask, - diagonal_attn_scores_first_chunk, - diagonal_attn_scores_low_triang, - ) - - # merging upper and lower triangle - diagonal_attention_scores = tf.concat( - [diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang], axis=-1 - ) - - # separate batch_size and num_heads dimensions again - diagonal_attention_scores = tf.transpose( - tf.reshape( - diagonal_attention_scores, - (batch_size, num_heads, seq_len, 2 * window_overlap + 1), - ), - (0, 2, 1, 3), - ) - - diagonal_attention_scores = self._mask_invalid_locations(diagonal_attention_scores, window_overlap) - - return diagonal_attention_scores - - @staticmethod - def _mask_invalid_locations(input_tensor, window_overlap): - # create correct upper triangle bool mask - mask_2d_upper = tf.reverse( - tf.linalg.band_part(tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0), - axis=[0], - ) - - # pad to full matrix - padding = tf.convert_to_tensor( - [[0, shape_list(input_tensor)[1] - window_overlap], [0, shape_list(input_tensor)[3] - window_overlap - 1]] - ) - - # create lower mask - mask_2d = tf.pad(mask_2d_upper, padding) - - # combine with upper mask - mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1]) - - # broadcast to full matrix - mask_4d = tf.tile(mask_2d[None, :, None, :], (shape_list(input_tensor)[0], 1, 1, 1)) - - # inf tensor used for masking - inf_tensor = -float("inf") * tf.ones_like(input_tensor) - - # mask - input_tensor = tf.where(tf.math.greater(mask_4d, 0), inf_tensor, input_tensor) - - return input_tensor - - def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap): - """ - Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the - same shape as `attn_probs` - """ - - batch_size, seq_len, num_heads, head_dim = shape_list(value) - - tf.debugging.assert_equal( - seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap" - ) - tf.debugging.assert_equal( - shape_list(attn_probs)[:3], - shape_list(value)[:3], - message="value and attn_probs must have same dims (except head_dim)", - ) - tf.debugging.assert_equal( - shape_list(attn_probs)[3], - 2 * window_overlap + 1, - message="attn_probs last dim has to be 2 * window_overlap + 1", - ) - - chunks_count = seq_len // window_overlap - 1 - - # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap - chunked_attn_probs = tf.reshape( - tf.transpose(attn_probs, (0, 2, 1, 3)), - ( - batch_size * num_heads, - seq_len // window_overlap, - window_overlap, - 2 * window_overlap + 1, - ), - ) - - # group batch_size and num_heads dimensions into one - value = tf.reshape( - tf.transpose(value, (0, 2, 1, 3)), - (batch_size * num_heads, seq_len, head_dim), - ) - - # pad seq_len with w at the beginning of the sequence and another window overlap at the end - paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap], [0, 0]]) - padded_value = tf.pad(value, paddings, constant_values=-1) - - # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap - frame_size = 3 * window_overlap * head_dim - frame_hop_size = (shape_list(padded_value)[1] * head_dim - frame_size) // chunks_count - chunked_value = tf.signal.frame( - tf.reshape(padded_value, (batch_size * num_heads, -1)), - frame_size, - frame_hop_size, - ) - chunked_value = tf.reshape( - chunked_value, - (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim), - ) - - tf.debugging.assert_equal( - shape_list(chunked_value), - [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], - message="Chunked value has the wrong shape", - ) - - chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) - context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value) - context = tf.transpose( - tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)), - (0, 2, 1, 3), - ) - - return context - - @staticmethod - def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings): - """pads rows and then flips rows and columns""" - hidden_states_padded = tf.pad( - hidden_states_padded, paddings - ) # padding value is not important because it will be overwritten - batch_size, chunk_size, seq_length, hidden_dim = shape_list(hidden_states_padded) - hidden_states_padded = tf.reshape(hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length)) - - return hidden_states_padded - - @staticmethod - def _pad_and_diagonalize(chunked_hidden_states): - """ - shift every row 1 step right, converting columns into diagonals. - - Example: - - ```python - chunked_hidden_states: [ - 0.4983, - 2.6918, - -0.0071, - 1.0492, - -1.8348, - 0.7672, - 0.2986, - 0.0285, - -0.7584, - 0.4206, - -0.0405, - 0.1599, - 2.0514, - -1.1600, - 0.5372, - 0.2629, - ] - window_overlap = num_rows = 4 - ``` - - (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 - 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, - -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] - """ - total_num_heads, num_chunks, window_overlap, hidden_dim = shape_list(chunked_hidden_states) - paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0], [0, window_overlap + 1]]) - chunked_hidden_states = tf.pad( - chunked_hidden_states, paddings - ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten - chunked_hidden_states = tf.reshape( - chunked_hidden_states, (total_num_heads, num_chunks, -1) - ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap - chunked_hidden_states = chunked_hidden_states[ - :, :, :-window_overlap - ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap - chunked_hidden_states = tf.reshape( - chunked_hidden_states, - (total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim), - ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap - chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] - - return chunked_hidden_states - - @staticmethod - def _chunk(hidden_states, window_overlap): - """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" - batch_size, seq_length, hidden_dim = shape_list(hidden_states) - num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1 - - # define frame size and frame stride (similar to convolution) - frame_hop_size = window_overlap * hidden_dim - frame_size = 2 * frame_hop_size - hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim)) - - # chunk with overlap - chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size) - - tf.debugging.assert_equal( - shape_list(chunked_hidden_states), - [batch_size, num_output_chunks, frame_size], - message=( - "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" - f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." - ), - ) - - chunked_hidden_states = tf.reshape( - chunked_hidden_states, - (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim), - ) - - return chunked_hidden_states - - @staticmethod - def _get_global_attn_indices(is_index_global_attn): - """compute global attn indices required throughout forward pass""" - # helper variable - num_global_attn_indices = tf.math.count_nonzero(is_index_global_attn, axis=1) - num_global_attn_indices = tf.cast(num_global_attn_indices, dtype=tf.constant(1).dtype) - - # max number of global attn indices in batch - max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices) - - # indices of global attn - is_index_global_attn_nonzero = tf.where(is_index_global_attn) - - # helper variable - is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims( - num_global_attn_indices, axis=-1 - ) - - # location of the non-padding values within global attention indices - is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn) - - # location of the padding values within global attention indices - is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn)) - - return ( - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - ) - - def _concat_with_global_key_attn_probs( - self, - attn_scores, - key_vectors, - query_vectors, - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - ): - batch_size = shape_list(key_vectors)[0] - - # select global key vectors - global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero) - - # create only global key vectors - key_vectors_only_global = tf.scatter_nd( - is_local_index_global_attn_nonzero, - global_key_vectors, - shape=( - batch_size, - max_num_global_attn_indices, - self.num_heads, - self.head_dim, - ), - ) - - # (batch_size, seq_len, num_heads, max_num_global_attn_indices) - attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors, key_vectors_only_global) - - # (batch_size, max_num_global_attn_indices, seq_len, num_heads) - attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key, (0, 3, 1, 2)) - mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple( - shape_list(attn_probs_from_global_key_trans)[-2:] - ) - mask = tf.ones(mask_shape) * -10000.0 - mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype) - - # scatter mask - attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update( - attn_probs_from_global_key_trans, - is_local_index_no_global_attn_nonzero, - mask, - ) - - # (batch_size, seq_len, num_heads, max_num_global_attn_indices) - attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans, (0, 2, 3, 1)) - - # concat to attn_probs - # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) - attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1) - - return attn_scores - - def _compute_attn_output_with_global_indices( - self, - value_vectors, - attn_probs, - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - ): - batch_size = shape_list(attn_probs)[0] - - # cut local attn probs to global only - attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices] - - # select global value vectors - global_value_vectors = tf.gather_nd(value_vectors, is_index_global_attn_nonzero) - - # create only global value vectors - value_vectors_only_global = tf.scatter_nd( - is_local_index_global_attn_nonzero, - global_value_vectors, - shape=( - batch_size, - max_num_global_attn_indices, - self.num_heads, - self.head_dim, - ), - ) - - # compute attn output only global - attn_output_only_global = tf.einsum("blhs,bshd->blhd", attn_probs_only_global, value_vectors_only_global) - - # reshape attn probs - attn_probs_without_global = attn_probs[:, :, :, max_num_global_attn_indices:] - - # compute attn output with global - attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( - attn_probs_without_global, value_vectors, self.one_sided_attn_window_size - ) - - return attn_output_only_global + attn_output_without_global - - def _compute_global_attn_output_from_hidden( - self, - attn_output, - hidden_states, - max_num_global_attn_indices, - layer_head_mask, - is_local_index_global_attn_nonzero, - is_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - is_index_masked, - training, - ): - batch_size, seq_len = shape_list(hidden_states)[:2] - - # prepare global hidden states - global_attn_hidden_states = tf.gather_nd(hidden_states, is_index_global_attn_nonzero) - global_attn_hidden_states = tf.scatter_nd( - is_local_index_global_attn_nonzero, - global_attn_hidden_states, - shape=(batch_size, max_num_global_attn_indices, self.embed_dim), - ) - - # global key, query, value - global_query_vectors_only_global = self.query_global(global_attn_hidden_states) - global_key_vectors = self.key_global(hidden_states) - global_value_vectors = self.value_global(hidden_states) - - # normalize - global_query_vectors_only_global /= tf.math.sqrt( - tf.cast(self.head_dim, dtype=global_query_vectors_only_global.dtype) - ) - global_query_vectors_only_global = self.reshape_and_transpose(global_query_vectors_only_global, batch_size) - global_key_vectors = self.reshape_and_transpose(global_key_vectors, batch_size) - global_value_vectors = self.reshape_and_transpose(global_value_vectors, batch_size) - - # compute attn scores - global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True) - - tf.debugging.assert_equal( - shape_list(global_attn_scores), - [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], - message=( - "global_attn_scores have the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" - f" {shape_list(global_attn_scores)}." - ), - ) - - global_attn_scores = tf.reshape( - global_attn_scores, - (batch_size, self.num_heads, max_num_global_attn_indices, seq_len), - ) - global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3)) - mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple( - shape_list(global_attn_scores_trans)[-2:] - ) - global_attn_mask = tf.ones(mask_shape) * -10000.0 - global_attn_mask = tf.cast(global_attn_mask, dtype=global_attn_scores_trans.dtype) - - # scatter mask - global_attn_scores_trans = tf.tensor_scatter_nd_update( - global_attn_scores_trans, - is_local_index_no_global_attn_nonzero, - global_attn_mask, - ) - global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3)) - - # mask global attn scores - attn_mask = tf.tile(is_index_masked[:, None, None, :], (1, shape_list(global_attn_scores)[1], 1, 1)) - global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores) - global_attn_scores = tf.reshape( - global_attn_scores, - (batch_size * self.num_heads, max_num_global_attn_indices, seq_len), - ) - - # compute global attn probs - global_attn_probs_float = stable_softmax(global_attn_scores, axis=-1) - - # apply layer head masking - if layer_head_mask is not None: - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) - global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( - global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) - ) - global_attn_probs_float = tf.reshape( - global_attn_probs_float, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len) - ) - - # dropout - global_attn_probs = self.global_dropout(global_attn_probs_float, training=training) - - # global attn output - global_attn_output = tf.matmul(global_attn_probs, global_value_vectors) - - tf.debugging.assert_equal( - shape_list(global_attn_output), - [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], - message=( - "global_attn_output tensor has the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" - f" {shape_list(global_attn_output)}." - ), - ) - - global_attn_output = tf.reshape( - global_attn_output, - (batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim), - ) - - # get only non zero global attn output - nonzero_global_attn_output = tf.gather_nd( - tf.transpose(global_attn_output, (0, 2, 1, 3)), - is_local_index_global_attn_nonzero, - ) - nonzero_global_attn_output = tf.reshape( - nonzero_global_attn_output, - (shape_list(is_local_index_global_attn_nonzero)[0], -1), - ) - - # overwrite values with global attention - attn_output = tf.tensor_scatter_nd_update( - attn_output, is_index_global_attn_nonzero, nonzero_global_attn_output - ) - - global_attn_probs = tf.reshape( - global_attn_probs, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) - ) - - return attn_output, global_attn_probs - - def reshape_and_transpose(self, vector, batch_size): - return tf.reshape( - tf.transpose( - tf.reshape(vector, (batch_size, -1, self.num_heads, self.head_dim)), - (0, 2, 1, 3), - ), - (batch_size * self.num_heads, -1, self.head_dim), - ) - - -class TFLEDEncoderAttention(tf.keras.layers.Layer): - def __init__(self, config, layer_id, **kwargs): - super().__init__(**kwargs) - self.longformer_self_attn = TFLEDEncoderSelfAttention(config, layer_id=layer_id, name="longformer_self_attn") - self.output_dense = tf.keras.layers.Dense(config.d_model, use_bias=True, name="output") - - def call(self, inputs, training=False): - ( - hidden_states, - attention_mask, - layer_head_mask, - is_index_masked, - is_index_global_attn, - is_global_attn, - ) = inputs - - self_outputs = self.longformer_self_attn( - [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], - training=training, - ) - - attention_output = self.output_dense(self_outputs[0], training=training) - outputs = (attention_output,) + self_outputs[1:] - - return outputs - - -class TFLEDDecoderAttention(tf.keras.layers.Layer): - """Multi-headed attention from "Attention Is All You Need""" - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - is_decoder: bool = False, - bias: bool = True, - **kwargs, - ): - super().__init__(**kwargs) - self.embed_dim = embed_dim - - self.num_heads = num_heads - self.dropout = tf.keras.layers.Dropout(dropout) - self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" - self.scaling = self.head_dim**-0.5 - self.is_decoder = is_decoder - - self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") - self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") - self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") - self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") - - def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): - return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) - - def call( - self, - hidden_states: tf.Tensor, - key_value_states: tf.Tensor | None = None, - past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, - attention_mask: tf.Tensor | None = None, - layer_head_mask: tf.Tensor | None = None, - training=False, - ) -> Tuple[tf.Tensor, tf.Tensor | None]: - """Input shape: Batch x Time x Channel""" - - # if key_value_states are provided this layer is used as a cross-attention layer - # for the decoder - is_cross_attention = key_value_states is not None - bsz, tgt_len, embed_dim = shape_list(hidden_states) - - # get query proj - query_states = self.q_proj(hidden_states) * self.scaling - # get key, value proj - if is_cross_attention and past_key_value is not None: - # reuse k,v, cross_attentions - key_states = past_key_value[0] - value_states = past_key_value[1] - elif is_cross_attention: - # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states), -1, bsz) - elif past_key_value is not None: - # reuse k, v, self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - key_states = tf.concat([past_key_value[0], key_states], axis=2) - value_states = tf.concat([past_key_value[1], value_states], axis=2) - else: - # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - if self.is_decoder: - # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_states, value_states) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) - key_states = tf.reshape(key_states, proj_shape) - value_states = tf.reshape(value_states, proj_shape) - - src_len = shape_list(key_states)[1] - attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - - tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], - message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" - ), - ) - - if attention_mask is not None: - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - - attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + tf.cast( - attention_mask, dtype=attn_weights.dtype - ) - attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) - - attn_weights = stable_softmax(attn_weights, axis=-1) - - if layer_head_mask is not None: - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) - - attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( - attn_weights, (bsz, self.num_heads, tgt_len, src_len) - ) - attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) - - attn_probs = self.dropout(attn_weights, training=training) - - attn_output = tf.matmul(attn_probs, value_states) - - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) - - attn_output = tf.transpose( - tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) - ) - attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) - - attn_output = self.out_proj(attn_output) - attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) - - return attn_output, attn_weights, past_key_value - - -class TFLEDEncoderLayer(tf.keras.layers.Layer): - def __init__(self, config: LEDConfig, layer_id: int, **kwargs): - super().__init__(**kwargs) - self.embed_dim = config.d_model - self.self_attn = TFLEDEncoderAttention(config, layer_id, name="self_attn") - self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.activation_fn = get_tf_activation(config.activation_function) - self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) - self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") - self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") - self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") - - def call( - self, - hidden_states: tf.Tensor, - attention_mask: tf.Tensor, - layer_head_mask: tf.Tensor, - is_index_masked: tf.Tensor, - is_index_global_attn: tf.Tensor, - is_global_attn: bool, - training=False, - ): - """ - Args: - hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* - attention_mask (`tf.Tensor`): attention mask of size - *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. - layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size - *(config.encoder_attention_heads,)*. - """ - residual = hidden_states - layer_outputs = self.self_attn( - [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], - training=training, - ) - - hidden_states = layer_outputs[0] - - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) - - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.self_attn_layer_norm(hidden_states) - residual = hidden_states - hidden_states = self.activation_fn(self.fc1(hidden_states)) - hidden_states = self.activation_dropout(hidden_states, training=training) - hidden_states = self.fc2(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.final_layer_norm(hidden_states) - - return (hidden_states,) + layer_outputs[1:] - - -class TFLEDDecoderLayer(tf.keras.layers.Layer): - def __init__(self, config: LEDConfig, **kwargs): - super().__init__(**kwargs) - self.embed_dim = config.d_model - self.self_attn = TFLEDDecoderAttention( - embed_dim=self.embed_dim, - num_heads=config.decoder_attention_heads, - dropout=config.attention_dropout, - name="self_attn", - is_decoder=True, - ) - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.activation_fn = get_tf_activation(config.activation_function) - self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) - - self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") - self.encoder_attn = TFLEDDecoderAttention( - self.embed_dim, - config.decoder_attention_heads, - dropout=config.attention_dropout, - name="encoder_attn", - is_decoder=True, - ) - self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") - self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") - self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") - self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") - - def call( - self, - hidden_states, - attention_mask: tf.Tensor | None = None, - encoder_hidden_states: tf.Tensor | None = None, - encoder_attention_mask: tf.Tensor | None = None, - layer_head_mask: tf.Tensor | None = None, - encoder_layer_head_mask: tf.Tensor | None = None, - past_key_value: Tuple[tf.Tensor] | None = None, - training=False, - ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: - """ - Args: - hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* - attention_mask (`tf.Tensor`): attention mask of size - *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. - encoder_hidden_states (`tf.Tensor`): - cross attention input to the layer of shape *(batch, seq_len, embed_dim)* - encoder_attention_mask (`tf.Tensor`): encoder attention mask of size - *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. - layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size - *(config.encoder_attention_heads,)*. - encoder_layer_head_mask (`tf.Tensor`): mask for encoder attention heads in a given layer of - size *(config.encoder_attention_heads,)*. - past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states - """ - residual = hidden_states - - # Self-Attention - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - # add present self-attn cache to positions 1,2 of present_key_value tuple - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - past_key_value=self_attn_past_key_value, - attention_mask=attention_mask, - layer_head_mask=layer_head_mask, - ) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.self_attn_layer_norm(hidden_states) - - # Cross-Attention Block - cross_attn_present_key_value = None - cross_attn_weights = None - if encoder_hidden_states is not None: - residual = hidden_states - - # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple - cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None - hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( - hidden_states=hidden_states, - key_value_states=encoder_hidden_states, - attention_mask=encoder_attention_mask, - layer_head_mask=encoder_layer_head_mask, - past_key_value=cross_attn_past_key_value, - ) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.encoder_attn_layer_norm(hidden_states) - - # add cross-attn to positions 3,4 of present_key_value tuple - present_key_value = present_key_value + cross_attn_present_key_value - - # Fully Connected - residual = hidden_states - hidden_states = self.activation_fn(self.fc1(hidden_states)) - hidden_states = self.activation_dropout(hidden_states, training=training) - hidden_states = self.fc2(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.final_layer_norm(hidden_states) - - return ( - hidden_states, - self_attn_weights, - cross_attn_weights, - present_key_value, - ) - - -class TFLEDPreTrainedModel(TFPreTrainedModel): - config_class = LEDConfig - base_model_prefix = "led" - - @property - def input_signature(self): - sig = super().input_signature - sig["global_attention_mask"] = tf.TensorSpec((None, None), tf.int32, name="global_attention_mask") - return sig - - -@dataclass -# Copied from transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput with TFLongformer->TFLEDEncoder -class TFLEDEncoderBaseModelOutput(ModelOutput): - """ - Base class for Longformer's outputs, with potential hidden states, local and global attentions. - - Args: - last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape - `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` - is the number of tokens with global attention mask. - - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - last_hidden_state: tf.Tensor = None - hidden_states: Tuple[tf.Tensor] | None = None - attentions: Tuple[tf.Tensor] | None = None - global_attentions: Tuple[tf.Tensor] | None = None - - -@dataclass -class TFLEDSeq2SeqModelOutput(ModelOutput): - """ - Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential - decoding. - - Args: - last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the decoder of the model. - - If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, - hidden_size)` is output. - past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, - sequence_length, embed_size_per_head)`). - - Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be - used (see `past_key_values` input) to speed up sequential decoding. - decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape - `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. - decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the - self-attention heads. - cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the - weighted average in the cross-attention heads. - encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder of the model. - encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape - `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. - encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the - self-attention heads. - encoder_global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` - is the number of tokens with global attention mask. - - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - last_hidden_state: tf.Tensor = None - past_key_values: List[tf.Tensor] | None = None - decoder_hidden_states: Tuple[tf.Tensor] | None = None - decoder_attentions: Tuple[tf.Tensor] | None = None - cross_attentions: Tuple[tf.Tensor] | None = None - encoder_last_hidden_state: tf.Tensor | None = None - encoder_hidden_states: Tuple[tf.Tensor] | None = None - encoder_attentions: Tuple[tf.Tensor] | None = None - encoder_global_attentions: Tuple[tf.Tensor] | None = None - - -@dataclass -class TFLEDSeq2SeqLMOutput(ModelOutput): - """ - Base class for sequence-to-sequence language models outputs. - - Args: - loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): - Language modeling loss. - logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). - past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): - List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, - sequence_length, embed_size_per_head)`). - - Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be - used (see `past_key_values` input) to speed up sequential decoding. - decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape - `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. - decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the - self-attention heads. - cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the - weighted average in the cross-attention heads. - encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder of the model. - encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape - `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. - encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the - self-attention heads. - encoder_global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` - is the number of tokens with global attention mask. - - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - loss: tf.Tensor | None = None - logits: tf.Tensor = None - past_key_values: List[tf.Tensor] | None = None - decoder_hidden_states: Tuple[tf.Tensor] | None = None - decoder_attentions: Tuple[tf.Tensor] | None = None - cross_attentions: Tuple[tf.Tensor] | None = None - encoder_last_hidden_state: tf.Tensor | None = None - encoder_hidden_states: Tuple[tf.Tensor] | None = None - encoder_attentions: Tuple[tf.Tensor] | None = None - encoder_global_attentions: Tuple[tf.Tensor] | None = None - - -LED_START_DOCSTRING = r""" - This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it - as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and - behavior. - - - - TensorFlow models and layers in `transformers` accept two formats as input: - - - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional argument. - - The reason the second format is supported is that Keras methods prefer this format when passing inputs to models - and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just - pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second - format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with - the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first - positional argument: - - - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` - - Note that when creating models and layers with - [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry - about any of this, as you can just pass inputs like you would to any other Python function! - - - - Args: - config ([`LEDConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. -""" - -LED_INPUTS_DOCSTRING = r""" - Args: - input_ids (`tf.Tensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`tf.Tensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): - Indices of decoder input sequence tokens in the vocabulary. - - Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - - LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` - is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). - decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): - will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. - head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - encoder_outputs (`tf.Tensor`, *optional*): - hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. - of shape `(batch_size, sequence_length, hidden_size)` is a sequence of - past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) - contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*, defaults to `True`): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). Set to `False` during training, `True` during generation - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the - config will be used instead. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. This argument can be used only in eager mode, in graph mode the value in the config will be - used instead. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in - eager mode, in graph mode the value will always be set to True. - training (`bool`, *optional*, defaults to `False`): - Whether or not to use the model in training mode (some modules like dropout modules have different - behaviors between training and evaluation). -""" - - -@keras_serializable -class TFLEDEncoder(tf.keras.layers.Layer): - config_class = LEDConfig - """ - Transformer encoder consisting of *config.encoder_layers* self-attention layers. Each layer is a - [`TFLEDEncoderLayer`]. - - Args: - config: LEDConfig - """ - - def __init__(self, config: LEDConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): - super().__init__(**kwargs) - self.config = config - self.dropout = tf.keras.layers.Dropout(config.dropout) - if config.encoder_layerdrop > 0: - logger.warning("Layerdrop is currently disabled in TFLED models.") - self.layerdrop = 0.0 - self.padding_idx = config.pad_token_id - - if isinstance(config.attention_window, int): - assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" - assert config.attention_window > 0, "`config.attention_window` has to be positive" - config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer - else: - assert len(config.attention_window) == config.num_hidden_layers, ( - "`len(config.attention_window)` should equal `config.num_hidden_layers`. " - f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" - ) - - self.attention_window = config.attention_window - self.embed_tokens = embed_tokens - self.embed_positions = TFLEDLearnedPositionalEmbedding( - config.max_encoder_position_embeddings, - config.d_model, - name="embed_positions", - ) - self.layers = [TFLEDEncoderLayer(config, i, name=f"layers.{i}") for i in range(config.encoder_layers)] - self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") - - def get_embed_tokens(self): - return self.embed_tokens - - def set_embed_tokens(self, embed_tokens): - self.embed_tokens = embed_tokens - - @unpack_inputs - def call( - self, - input_ids=None, - inputs_embeds=None, - attention_mask=None, - global_attention_mask=None, - head_mask=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, - ): - """ - Args: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`tf.Tensor` of shape `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = shape_list(input_ids) - # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name - # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` - # is used with a name ending in `/`, that name replaces the current name scope. - # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) - context = [] - if hasattr(self.embed_tokens, "load_weight_prefix"): - context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) - with ContextManagers(context): - check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) - inputs_embeds = self.embed_tokens(input_ids) - elif inputs_embeds is not None: - input_shape = shape_list(inputs_embeds)[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if attention_mask is None: - attention_mask = tf.fill(input_shape, 1) - - # merge `global_attention_mask` and `attention_mask` - if global_attention_mask is not None: - attention_mask = attention_mask * tf.cast((global_attention_mask + 1), dtype=attention_mask.dtype) - - padding_len, input_ids, attention_mask, inputs_embeds = self._pad_to_window_size( - input_ids=input_ids, - attention_mask=attention_mask, - inputs_embeds=inputs_embeds, - pad_token_id=self.padding_idx, - ) - - input_shape = shape_list(attention_mask) - # is index masked or global attention - is_index_masked = tf.math.less(tf.cast(attention_mask, tf.int8), 1) - is_index_global_attn = tf.math.greater(tf.cast(attention_mask, tf.int8), 1) - is_global_attn = tf.math.reduce_any(is_index_global_attn) - - embed_pos = self.embed_positions(input_shape) - hidden_states = inputs_embeds + embed_pos - hidden_states = self.layernorm_embedding(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - - # check attention mask and invert - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask)[:, 0, 0, :] - attention_mask = attention_mask[:, :, None, None] - - encoder_states = () if output_hidden_states else None - all_attentions = all_global_attentions = () if output_attentions else None - - # check if head_mask has a correct number of layers specified if desired - if head_mask is not None: - tf.debugging.assert_equal( - shape_list(head_mask)[0], - len(self.layers), - message=( - f"The head_mask should be specified for {len(self.layers)} layers, but it is for" - f" {shape_list(head_mask)[0]}." - ), - ) - - # encoder layers - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - hidden_states_to_add = self.compute_hidden_states(hidden_states, padding_len) - encoder_states = encoder_states + (hidden_states_to_add,) - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - dropout_probability = random.uniform(0, 1) - if training and (dropout_probability < self.layerdrop): # skip the layer - continue - - layer_outputs = encoder_layer( - hidden_states=hidden_states, - attention_mask=attention_mask, - layer_head_mask=head_mask[idx] if head_mask is not None else None, - is_index_masked=is_index_masked, - is_index_global_attn=is_index_global_attn, - is_global_attn=is_global_attn, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1) - all_attentions = all_attentions + (tf.transpose(layer_outputs[1], (0, 2, 1, 3)),) - - # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn - all_global_attentions = all_global_attentions + (tf.transpose(layer_outputs[2], (0, 1, 3, 2)),) - - # undo padding - # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1) - hidden_states = self.compute_hidden_states(hidden_states, padding_len) - - # undo padding - if output_attentions: - all_attentions = ( - tuple([state[:, :, :-padding_len, :] for state in all_attentions]) - if padding_len > 0 - else all_attentions - ) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return TFLEDEncoderBaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=encoder_states, - attentions=all_attentions, - global_attentions=all_global_attentions, - ) - - @tf.function - def compute_hidden_states(self, hidden_states, padding_len): - return hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states - - def _pad_to_window_size( - self, - input_ids, - attention_mask, - inputs_embeds, - pad_token_id, - ): - """A helper function to pad tokens and mask to work with implementation of Longformer selfattention.""" - # padding - attention_window = ( - self.attention_window if isinstance(self.attention_window, int) else max(self.attention_window) - ) - - assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" - - input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds) - batch_size, seq_len = input_shape[:2] - padding_len = (attention_window - seq_len % attention_window) % attention_window - - if padding_len > 0: - logger.info( - f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " - f"`config.attention_window`: {attention_window}" - ) - - paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]]) - - if input_ids is not None: - input_ids = tf.pad(input_ids, paddings, constant_values=pad_token_id) - - if inputs_embeds is not None: - if padding_len > 0: - input_ids_padding = tf.fill((batch_size, padding_len), pad_token_id) - inputs_embeds_padding = self.embed_tokens(input_ids_padding) - inputs_embeds = tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2) - - attention_mask = tf.pad(attention_mask, paddings, constant_values=False) # no attention on the padding tokens - - return ( - padding_len, - input_ids, - attention_mask, - inputs_embeds, - ) - - -@keras_serializable -class TFLEDDecoder(tf.keras.layers.Layer): - config_class = LEDConfig - """ - Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFLEDDecoderLayer`] - - Args: - config: LEDConfig - embed_tokens: output embedding - """ - - def __init__(self, config: LEDConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): - super().__init__(**kwargs) - self.config = config - self.padding_idx = config.pad_token_id - self.embed_tokens = embed_tokens - if config.decoder_layerdrop > 0: - logger.warning("Layerdrop is currently disabled in TFLED models.") - self.layerdrop = 0.0 - self.embed_positions = TFLEDLearnedPositionalEmbedding( - config.max_decoder_position_embeddings, - config.d_model, - name="embed_positions", - ) - self.layers = [TFLEDDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] - self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") - - self.dropout = tf.keras.layers.Dropout(config.dropout) - - def set_embed_tokens(self, embed_tokens): - self.embed_tokens = embed_tokens - - @unpack_inputs - def call( - self, - input_ids=None, - inputs_embeds=None, - attention_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - head_mask=None, - encoder_head_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, - ): - r""" - Args: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) - attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - [What are attention masks?](../glossary#attention-mask) - encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention - of the decoder. - encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): - Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values - selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - [What are attention masks?](../glossary#attention-mask) - head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - encoder_head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention - on hidden heads. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up - decoding. If `past_key_values` are used, the user can optionally input only the last - `decoder_input_ids` (those that don't have their past key value states given to this model) of shape - `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. - inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") - elif input_ids is not None: - input_shape = shape_list(input_ids) - elif inputs_embeds is not None: - input_shape = shape_list(inputs_embeds)[:-1] - else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") - - past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 - - # embed positions - positions = self.embed_positions(input_shape, past_key_values_length) - - if inputs_embeds is None: - # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name - # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` - # is used with a name ending in `/`, that name replaces the current name scope. - # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) - context = [] - if hasattr(self.embed_tokens, "load_weight_prefix"): - context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) - with ContextManagers(context): - check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) - inputs_embeds = self.embed_tokens(input_ids) - - hidden_states = inputs_embeds - - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - if input_shape[-1] > 1: - combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) - else: - combined_attention_mask = _expand_mask( - tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] - ) - - if attention_mask is not None and input_shape[-1] > 1: - combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) - - if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) - - hidden_states = self.layernorm_embedding(hidden_states + positions) - hidden_states = self.dropout(hidden_states, training=training) - - # decoder layers - all_hidden_states = () - all_self_attns = () - all_cross_attentions = () - present_key_values = () - - # check if head_mask has a correct number of layers specified if desired - if head_mask is not None: - tf.debugging.assert_equal( - shape_list(head_mask)[0], - len(self.layers), - message=( - f"The head_mask should be specified for {len(self.layers)} layers, but it is for" - f" {shape_list(head_mask)[0]}." - ), - ) - - for idx, decoder_layer in enumerate(self.layers): - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - if output_hidden_states: - all_hidden_states += (hidden_states,) - dropout_probability = random.uniform(0, 1) - - if training and (dropout_probability < self.layerdrop): - continue - - past_key_value = past_key_values[idx] if past_key_values is not None else None - - hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( - hidden_states, - attention_mask=combined_attention_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - layer_head_mask=head_mask[idx] if head_mask is not None else None, - encoder_layer_head_mask=encoder_head_mask[idx] if encoder_head_mask is not None else None, - past_key_value=past_key_value, - ) - - if use_cache: - present_key_values += (present_key_value,) - - if output_attentions: - all_self_attns += (layer_self_attn,) - all_cross_attentions += (layer_cross_attn,) - - if output_hidden_states: - all_hidden_states += (hidden_states,) - else: - all_hidden_states = None - - all_self_attns = all_self_attns if output_attentions else None - all_cross_attentions = all_cross_attentions if output_attentions else None - - present_key_values = present_key_values if use_cache else None - - if not return_dict: - return tuple( - v - for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attentions] - if v is not None - ) - else: - return TFBaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=present_key_values, - hidden_states=all_hidden_states, - attentions=all_self_attns, - cross_attentions=all_cross_attentions, - ) - - -@keras_serializable -class TFLEDMainLayer(tf.keras.layers.Layer): - config_class = LEDConfig - - def __init__(self, config: LEDConfig, **kwargs): - super().__init__(**kwargs) - self.config = config - self.shared = tf.keras.layers.Embedding( - input_dim=config.vocab_size, - output_dim=config.d_model, - embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), - name="led.shared", - ) - # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) - self.shared.load_weight_prefix = "led.shared" - - self.encoder = TFLEDEncoder(config, self.shared, name="encoder") - self.decoder = TFLEDDecoder(config, self.shared, name="decoder") - - def get_input_embeddings(self): - return self.shared - - def set_input_embeddings(self, new_embeddings): - self.shared = new_embeddings - self.encoder.embed_tokens = self.shared - self.decoder.embed_tokens = self.shared - - @unpack_inputs - def call( - self, - input_ids=None, - attention_mask=None, - decoder_input_ids=None, - decoder_attention_mask=None, - head_mask=None, - decoder_head_mask=None, - encoder_outputs: Optional[Union[Tuple, TFLEDEncoderBaseModelOutput]] = None, - global_attention_mask=None, - past_key_values=None, - inputs_embeds=None, - decoder_inputs_embeds=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, - **kwargs, - ): - if decoder_input_ids is None and decoder_inputs_embeds is None: - use_cache = False - - if encoder_outputs is None: - encoder_outputs = self.encoder( - input_ids=input_ids, - attention_mask=attention_mask, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - # If the user passed a tuple for encoder_outputs, we wrap it in a TFLEDEncoderBaseModelOutput when return_dict=True - elif return_dict and not isinstance(encoder_outputs, TFLEDEncoderBaseModelOutput): - encoder_outputs = TFLEDEncoderBaseModelOutput( - last_hidden_state=encoder_outputs[0], - hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, - attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, - ) - # If the user passed a TFLEDEncoderBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False - elif not return_dict and not isinstance(encoder_outputs, tuple): - encoder_outputs = encoder_outputs.to_tuple() - - decoder_outputs = self.decoder( - decoder_input_ids, - attention_mask=decoder_attention_mask, - encoder_hidden_states=encoder_outputs[0], - encoder_attention_mask=attention_mask, - head_mask=decoder_head_mask, - encoder_head_mask=head_mask, - past_key_values=past_key_values, - inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - if not return_dict: - return decoder_outputs + encoder_outputs - - return TFLEDSeq2SeqModelOutput( - last_hidden_state=decoder_outputs.last_hidden_state, - past_key_values=decoder_outputs.past_key_values, - decoder_hidden_states=decoder_outputs.hidden_states, - decoder_attentions=decoder_outputs.attentions, - cross_attentions=decoder_outputs.cross_attentions, - encoder_last_hidden_state=encoder_outputs.last_hidden_state, - encoder_hidden_states=encoder_outputs.hidden_states, - encoder_attentions=encoder_outputs.attentions, - encoder_global_attentions=encoder_outputs.global_attentions, - ) - - -@add_start_docstrings( - "The bare LED Model outputting raw hidden-states without any specific head on top.", - LED_START_DOCSTRING, -) -class TFLEDModel(TFLEDPreTrainedModel): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.led = TFLEDMainLayer(config, name="led") - - def get_encoder(self): - return self.led.encoder - - def get_decoder(self): - return self.led.decoder - - @unpack_inputs - @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFLEDSeq2SeqModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: tf.Tensor | None = None, - decoder_input_ids: tf.Tensor | None = None, - decoder_attention_mask: tf.Tensor | None = None, - head_mask: tf.Tensor | None = None, - decoder_head_mask: tf.Tensor | None = None, - encoder_outputs: tf.Tensor | None = None, - global_attention_mask: tf.Tensor | None = None, - past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, - inputs_embeds: tf.Tensor | None = None, - decoder_inputs_embeds: tf.Tensor | None = None, - use_cache: bool | None = None, - output_attentions: bool | None = None, - output_hidden_states: bool | None = None, - return_dict: bool | None = None, - training: bool = False, - **kwargs, - ) -> Tuple[tf.Tensor] | TFLEDSeq2SeqModelOutput: - outputs = self.led( - input_ids=input_ids, - attention_mask=attention_mask, - decoder_input_ids=decoder_input_ids, - decoder_attention_mask=decoder_attention_mask, - encoder_outputs=encoder_outputs, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - decoder_head_mask=decoder_head_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - decoder_inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - return outputs - - def serving_output(self, output): - pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None - dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None - dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None - cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None - enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None - enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None - enc_g_attns = tf.convert_to_tensor(output.encoder_global_attentions) if self.config.output_attentions else None - - return TFLEDSeq2SeqModelOutput( - last_hidden_state=output.last_hidden_state, - past_key_values=pkv, - decoder_hidden_states=dec_hs, - decoder_attentions=dec_attns, - cross_attentions=cross_attns, - encoder_last_hidden_state=output.encoder_last_hidden_state, - encoder_hidden_states=enc_hs, - encoder_attentions=enc_attns, - encoder_global_attentions=enc_g_attns, - ) - - -# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer -class BiasLayer(tf.keras.layers.Layer): - """ - Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, - so all weights have to be registered in a layer. - """ - - def __init__(self, shape, initializer, trainable, name, **kwargs): - super().__init__(name=name, **kwargs) - # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of - # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: - # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 - self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) - - def call(self, x): - return x + self.bias - - -@add_start_docstrings( - "The LED Model with a language modeling head. Can be used for summarization.", - LED_START_DOCSTRING, -) -class TFLEDForConditionalGeneration(TFLEDPreTrainedModel): - _keys_to_ignore_on_load_unexpected = [ - r"led.encoder.embed_tokens.weight", - r"led.decoder.embed_tokens.weight", - ] - - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.led = TFLEDMainLayer(config, name="led") - self.use_cache = config.use_cache - # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.bias_layer = BiasLayer( - name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False - ) - - # TODO (Joao): investigate why LED has numerical issues in XLA generate - self.supports_xla_generation = False - - def get_decoder(self): - return self.led.decoder - - def get_encoder(self): - return self.led.encoder - - def get_bias(self): - return {"final_logits_bias": self.bias_layer.bias} - - def set_bias(self, value): - # Replaces the existing layers containing bias for correct (de)serialization. - vocab_size = value["final_logits_bias"].shape[-1] - self.bias_layer = BiasLayer( - name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False - ) - self.bias_layer.bias.assign(value["final_logits_bias"]) - - def get_output_embeddings(self): - return self.get_input_embeddings() - - def set_output_embeddings(self, value): - self.set_input_embeddings(value) - - @unpack_inputs - @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=TFLEDSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - decoder_input_ids: np.ndarray | tf.Tensor | None = None, - decoder_attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - decoder_head_mask: np.ndarray | tf.Tensor | None = None, - encoder_outputs: TFLEDEncoderBaseModelOutput | None = None, - global_attention_mask: np.ndarray | tf.Tensor | None = None, - past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, - use_cache: bool | None = None, - output_attentions: bool | None = None, - output_hidden_states: bool | None = None, - return_dict: bool | None = None, - labels: tf.Tensor | None = None, - training: bool = False, - ) -> Tuple[tf.Tensor] | TFLEDSeq2SeqLMOutput: - """ - Returns: - - Examples: - - ```python - >>> from transformers import AutoTokenizer, TFLEDForConditionalGeneration - >>> import tensorflow as tf - - >>> mname = "allenai/led-base-16384" - >>> tokenizer = AutoTokenizer.from_pretrained(mname) - >>> TXT = "My friends are but they eat too many carbs." - >>> model = TFLEDForConditionalGeneration.from_pretrained(mname) - >>> batch = tokenizer([TXT], return_tensors="tf") - >>> logits = model(inputs=batch.input_ids).logits - >>> probs = tf.nn.softmax(logits[0]) - >>> # probs[5] is associated with the mask token - ```""" - - if labels is not None: - use_cache = False - if decoder_input_ids is None and decoder_inputs_embeds is None: - decoder_input_ids = shift_tokens_right( - labels, self.config.pad_token_id, self.config.decoder_start_token_id - ) - - outputs = self.led( - input_ids, - attention_mask=attention_mask, - decoder_input_ids=decoder_input_ids, - decoder_attention_mask=decoder_attention_mask, - encoder_outputs=encoder_outputs, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - decoder_head_mask=decoder_head_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - decoder_inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - lm_logits = tf.matmul(outputs[0], self.led.shared.weights, transpose_b=True) - lm_logits = self.bias_layer(lm_logits) - masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) - - if not return_dict: - output = (lm_logits,) + outputs[1:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - return TFLEDSeq2SeqLMOutput( - loss=masked_lm_loss, - logits=lm_logits, - past_key_values=outputs.past_key_values, # index 1 of d outputs - decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs - decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs - cross_attentions=outputs.cross_attentions, # index 4 of d outputs - encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs - encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out - encoder_attentions=outputs.encoder_attentions, # 2 of e out - encoder_global_attentions=outputs.encoder_global_attentions, - ) - - def serving_output(self, output): - pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None - dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None - dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None - cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None - enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None - enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None - enc_g_attns = tf.convert_to_tensor(output.encoder_global_attentions) if self.config.output_attentions else None - - return TFLEDSeq2SeqLMOutput( - logits=output.logits, - past_key_values=pkv, - decoder_hidden_states=dec_hs, - decoder_attentions=dec_attns, - cross_attentions=cross_attns, - encoder_last_hidden_state=output.encoder_last_hidden_state, - encoder_hidden_states=enc_hs, - encoder_attentions=enc_attns, - encoder_global_attentions=enc_g_attns, - ) - - def prepare_inputs_for_generation( - self, - decoder_input_ids, - past_key_values=None, - attention_mask=None, - head_mask=None, - decoder_head_mask=None, - use_cache=None, - encoder_outputs=None, - **kwargs, - ): - # cut decoder_input_ids if past is used - if past_key_values is not None: - decoder_input_ids = decoder_input_ids[:, -1:] - - return { - "input_ids": None, # encoder_outputs is defined. input_ids not needed - "encoder_outputs": encoder_outputs, - "past_key_values": past_key_values, - "decoder_input_ids": decoder_input_ids, - "attention_mask": attention_mask, - "head_mask": head_mask, - "decoder_head_mask": decoder_head_mask, - "use_cache": use_cache, # change this to avoid caching (presumably for debugging) - } - - def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): - return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) - - def hf_compute_loss(self, labels, logits): - """CrossEntropyLoss that ignores pad tokens""" - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.NONE - ) - if self.config.tf_legacy_loss: - melted_labels = tf.reshape(labels, (-1,)) - active_loss = tf.not_equal(melted_labels, self.config.pad_token_id) - reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) - labels = tf.boolean_mask(melted_labels, active_loss) - return loss_fn(labels, reduced_logits) - - # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway - unmasked_loss = loss_fn(tf.nn.relu(labels), logits) - # make sure only non-padding labels affect the loss - loss_mask = tf.cast(labels != self.config.pad_token_id, dtype=unmasked_loss.dtype) - masked_loss = unmasked_loss * loss_mask - reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) - return tf.reshape(reduced_masked_loss, (1,)) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vencoder/ContentVec256L9.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/vencoder/ContentVec256L9.py deleted file mode 100644 index b0089c789cd87cfd3b1badb2fc45cb1b88041eab..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vencoder/ContentVec256L9.py +++ /dev/null @@ -1,35 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import torch -from fairseq import checkpoint_utils - -class ContentVec256L9(SpeechEncoder): - def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None): - print("load model(s) from {}".format(vec_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - self.hidden_dim = 256 - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.model = models[0].to(self.dev) - self.model.eval() - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.to(wav.device), - "padding_mask": padding_mask.to(wav.device), - "output_layer": 9, # layer 9 - } - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = self.model.final_proj(logits[0]) - return feats.transpose(1, 2) diff --git a/spaces/yuhangzang/ContextDet-Demo/util/__init__.py b/spaces/yuhangzang/ContextDet-Demo/util/__init__.py deleted file mode 100644 index 4ebdc90b7f3ac2ed5a085066dcf20722b90cbc77..0000000000000000000000000000000000000000 --- a/spaces/yuhangzang/ContextDet-Demo/util/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# ------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# ------------------------------------------------------------------------ diff --git a/spaces/zfz/img-to-music/README.md b/spaces/zfz/img-to-music/README.md deleted file mode 100644 index ff1948d1b95ee1f8d7a3396aefb285c729d18687..0000000000000000000000000000000000000000 --- a/spaces/zfz/img-to-music/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Img To Music -emoji: 🌅🎶 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.16.0 -app_file: app.py -pinned: true -duplicated_from: fffiloni/img-to-music ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/inc.js b/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/inc.js deleted file mode 100644 index aa4d83ab4c289548a50cd9c9c83e95b856a8da2f..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/inc.js +++ /dev/null @@ -1,15 +0,0 @@ -const SemVer = require('../classes/semver') - -const inc = (version, release, options, identifier) => { - if (typeof (options) === 'string') { - identifier = options - options = undefined - } - - try { - return new SemVer(version, options).inc(release, identifier).version - } catch (er) { - return null - } -} -module.exports = inc diff --git a/spaces/zwq2018/Data-Copilot/tool.py b/spaces/zwq2018/Data-Copilot/tool.py deleted file mode 100644 index 09815dad8c728a053c98c4229c52cadd9a3744af..0000000000000000000000000000000000000000 --- a/spaces/zwq2018/Data-Copilot/tool.py +++ /dev/null @@ -1,1930 +0,0 @@ -import tushare as ts -import matplotlib.pyplot as plt -import pandas as pd -import os -import random -from matplotlib.ticker import MaxNLocator -#from prettytable import PrettyTable -#from blessed import Terminal -import time -from datetime import datetime, timedelta -import numpy as np -import mplfinance as mpf - -from typing import Optional -import matplotlib.font_manager as fm -from matplotlib.lines import Line2D -from typing import Union, Any -from sklearn.linear_model import LinearRegression - - -# plt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] -# plt.rcParams['axes.unicode_minus'] = False - - -font_path = './fonts/SimHei.ttf' -font_prop = fm.FontProperties(fname=font_path) - - -tushare_token = os.getenv('TUSHARE_TOKEN') -pro = ts.pro_api(tushare_token) - -# def last_month_end(date_str:str=''): -# date_obj = datetime.strptime(date_str, '%Y%m%d') -# current_month = date_obj.month -# current_year = date_obj.year -# -# if current_month == 1: -# last_month = 12 -# last_year = current_year - 1 -# else: -# last_month = current_month - 1 -# last_year = current_year -# -# if date_obj.month != (date_obj + timedelta(days=1)).month: -# last_month_end_date = date_obj -# else: -# last_day_of_last_month = (date_obj.replace(day=1) - timedelta(days=1)).day -# last_month_end_date = datetime(last_year, last_month, last_day_of_last_month) -# -# return last_month_end_date.strftime('%Y%m%d') - - - -def get_last_year_date(date_str: str = '') -> str: - """ - This function takes a date string in the format YYYYMMDD and returns the date string one year prior to the input date. - - Args: - - date_str: string, the input date in the format YYYYMMDD - - Returns: - - string, the date one year prior to the input date in the format YYYYMMDD - """ - dt = datetime.strptime(date_str, '%Y%m%d') - # To calculate the date one year ago - one_year_ago = dt - timedelta(days=365) - - # To format the date as a string - one_year_ago_str = one_year_ago.strftime('%Y%m%d') - - return one_year_ago_str - - -def get_adj_factor(stock_code: str = '', start_date: str = '', end_date: str = '') -> pd.DataFrame: - # Get stock price adjustment factors. Retrieve the stock price adjustment factors for a single stock's entire historical data or for all stocks on a single trading day. - # The input includes the stock code, start date, end date, and trading date, all in string format with the date in the YYYYMMDD format - # The return value is a dataframe containing the stock code, trading date, and adjustment factor - # ts_code str 股票代码 - # adj_factor float 复权因子 - """ - This function retrieves the adjusted stock prices for a given stock code and date range. - - Args: - - stock_code: string, the stock code to retrieve data for - - start_date: string, the start date in the format YYYYMMDD - - end_date: string, the end date in the format YYYYMMDD - - Returns: - - dataframe, a dataframe containing the stock code, trade date, and adjusted factor - - This will retrieve the adjusted stock prices for the stock with code '000001.SZ' between the dates '20220101' and '20220501'. - """ - df = pro.adj_factor(**{ - "ts_code": stock_code, - "trade_date": "", - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "ts_code", - "trade_date", - "adj_factor" - ]) - - return df - -def get_stock_code(stock_name: str) -> str: - # Retrieve the stock code of a given stock name. If we call get_stock_code('贵州茅台'), it will return '600519.SH'. - - - df = pd.read_csv('tushare_stock_basic_20230421210721.csv') - try: - code = df.loc[df.name==stock_name].ts_code.iloc[0] - return code - except: - return None - - - - -def get_stock_name_from_code(stock_code: str) -> str: - """ - Reads a local file to retrieve the stock name from a given stock code. - - Args: - - stock_code (str): The code of the stock. - - Returns: - - str: The stock name of the given stock code. - """ - # For example,if we call get_stock_name_from_code('600519.SH'), it will return '贵州茅台'. - - - df = pd.read_csv('tushare_stock_basic_20230421210721.csv') - name = df.loc[df.ts_code == stock_code].name.iloc[0] - - return name - -def get_stock_prices_data(stock_name: str='', start_date: str='', end_date: str='', freq:str='daily') -> pd.DataFrame: - """ - Retrieves the daily/weekly/monthly price data for a given stock code during a specific time period. get_stock_prices_data('贵州茅台','20200120','20220222','daily') - - Args: - - stock_name (str) - - start_date (str): The start date in the format 'YYYYMMDD'. - - end_date (str): The end date in 'YYYYMMDD'. - - freq (str): The frequency of the price data, can be 'daily', 'weekly', or 'monthly'. - - Returns: - - pd.DataFrame: A dataframe that contains the daily/weekly/monthly data. The output columns contain stock_code, trade_date, open, high, low, close, pre_close(昨天收盘价), change(涨跌额), pct_chg(涨跌幅),vol(成交量),amount(成交额) - """ - - stock_code = get_stock_code(stock_name) - - if freq == 'daily': - stock_data = pro.daily(**{ - "ts_code": stock_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "offset": "", - "limit": "" - }, fields=[ - "ts_code", - "trade_date", - "open", - "high", - "low", - "close", - "pre_close", - "change", - "pct_chg", - "vol", - "amount" - ]) - - elif freq == 'weekly': - stock_data = pro.weekly(**{ - "ts_code": stock_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "ts_code", - "trade_date", - "close", - "open", - "high", - "low", - "pre_close", - "change", - "pct_chg", - "vol", - "amount" - ]) - elif freq == 'monthly': - stock_data = pro.monthly(**{ - "ts_code": stock_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "ts_code", - "trade_date", - "close", - "open", - "high", - "low", - "pre_close", - "change", - "pct_chg", - "vol", - "amount" - ]) - - - adj_f = get_adj_factor(stock_code, start_date, end_date) - stock_data = pd.merge(stock_data, adj_f, on=['ts_code', 'trade_date']) - # Multiply the values of open, high, low, and close by their corresponding adjustment factors. - # To obtain the adjusted close price - stock_data[['open', 'high', 'low', 'close']] *= stock_data['adj_factor'].values.reshape(-1, 1) - - #stock_data.rename(columns={'vol': 'volume'}, inplace=True) - df = pd.read_csv('tushare_stock_basic_20230421210721.csv') - stock_data_merged = pd.merge(stock_data, df, on='ts_code') - stock_data_merged.rename(columns={'ts_code': 'stock_code'}, inplace=True) - stock_data_merged.rename(columns={'name': 'stock_name'}, inplace=True) - stock_data_merged = stock_data_merged.sort_values(by='trade_date', ascending=True) # To sort the DataFrame by date in ascending order - return stock_data_merged - - - -def get_stock_technical_data(stock_name: str, start_date: str, end_date: str) -> pd.DataFrame: - """ - Retrieves the daily technical data of a stock including macd turnover rate, volume, PE ratio, etc. Those technical indicators are usually plotted as subplots in a k-line chart. - - Args: - stock_name (str): - start_date (str): Start date "YYYYMMDD" - end_date (str): End date "YYYYMMDD" - - Returns: - pd.DataFrame: A DataFrame containing the technical data of the stock, - including various indicators such as ts_code, trade_date, close, macd_dif, macd_dea, macd, kdj_k, kdj_d, kdj_j, rsi_6, rsi_12, boll_upper, boll_mid, boll_lower, cci, turnover_rate, turnover_rate_f, volume_ratio, pe_ttm(市盈率), pb(市净率), ps_ttm, dv_ttm, total_share, float_share, free_share, total_mv, circ_mv - - """ - - # Technical factors - stock_code = get_stock_code(stock_name) - stock_data1 = pro.stk_factor(**{ - "ts_code": stock_code, - "start_date": start_date, - "end_date": end_date, - "trade_date": '', - "limit": "", - "offset": "" - }, fields=[ - "ts_code", - "trade_date", - "close", - "macd_dif", - "macd_dea", - "macd", - "kdj_k", - "kdj_d", - "kdj_j", - "rsi_6", - "rsi_12", - "rsi_24", - "boll_upper", - "boll_mid", - "boll_lower", - "cci" - ]) - # Trading factors - stock_data2 = pro.daily_basic(**{ - "ts_code": stock_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "ts_code", # - "trade_date", - "turnover_rate", - "turnover_rate_f", - "volume_ratio", - "pe_ttm", - "pb", - "ps_ttm", - "dv_ttm", - "total_share", - "float_share", - "free_share", - "total_mv", - "circ_mv" - ]) - - # - stock_data = pd.merge(stock_data1, stock_data2, on=['ts_code', 'trade_date']) - df = pd.read_csv('tushare_stock_basic_20230421210721.csv') - stock_data_merged = pd.merge(stock_data, df, on='ts_code') - stock_data_merged = stock_data_merged.sort_values(by='trade_date', ascending=True) - - stock_data_merged.drop(['symbol'], axis=1, inplace=True) - - stock_data_merged.rename(columns={'ts_code': 'stock_code'}, inplace=True) - stock_data_merged.rename(columns={'name': 'stock_name'}, inplace=True) - - return stock_data_merged - - -def plot_stock_data(stock_data: pd.DataFrame, ax: Optional[plt.Axes] = None, figure_type: str = 'line', title_name: str ='') -> plt.Axes: - - """ - This function plots stock data. - - Args: - - stock_data: pandas DataFrame, the stock data to plot. The DataFrame should contain three columns: - - Column 1: trade date in 'YYYYMMDD' - - Column 2: Stock name or code (string format) - - Column 3: Index value (numeric format) - The DataFrame can be time series data or cross-sectional data. If it is time-series data, the first column represents different trade time, the second column represents the same name. For cross-sectional data, the first column is the same, the second column contains different stocks. - - - ax: matplotlib Axes object, the axes to plot the data on - - figure_type: the type of figure (either 'line' or 'bar') - - title_name - - Returns: - - matplotlib Axes object, the axes containing the plot - """ - - index_name = stock_data.columns[2] - name_list = stock_data.iloc[:,1] - date_list = stock_data.iloc[:,0] - if name_list.nunique() == 1 and date_list.nunique() != 1: - # Time Series Data - unchanged_var = name_list.iloc[0] # stock name - x_dim = date_list # tradingdate - x_name = stock_data.columns[0] - - elif name_list.nunique() != 1 and date_list.nunique() == 1: - # Cross-sectional Data - unchanged_var = date_list.iloc[0] # tradingdate - x_dim = name_list # stock name - x_name = stock_data.columns[1] - - data_size = x_dim.shape[0] - - - - start_x_dim, end_x_dim = x_dim.iloc[0], x_dim.iloc[-1] - - start_y = stock_data.iloc[0, 2] - end_y = stock_data.iloc[-1, 2] - - - def generate_random_color(): - r = random.randint(0, 255)/ 255.0 - g = random.randint(0, 100)/ 255.0 - b = random.randint(0, 255)/ 255.0 - return (r, g, b) - - color = generate_random_color() - if ax is None: - _, ax = plt.subplots() - - if figure_type =='line': - # - - ax.plot(x_dim, stock_data.iloc[:, 2], label = unchanged_var+'_' + index_name, color=color,linewidth=3) - # - plt.scatter(x_dim, stock_data.iloc[:, 2], color=color,s=3) # Add markers to the data points - - # - #ax.scatter(x_dim, stock_data.iloc[:, 2],label = unchanged_var+'_' + index_name, color=color, s=3) - # - - ax.annotate(unchanged_var + ':' + str(round(start_y, 2)) + ' @' + start_x_dim, xy=(start_x_dim, start_y), - xytext=(start_x_dim, start_y), - textcoords='data', fontsize=14,color=color, horizontalalignment='right',fontproperties=font_prop) - - ax.annotate(unchanged_var + ':' + str(round(end_y, 2)) +' @' + end_x_dim, xy=(end_x_dim, end_y), - xytext=(end_x_dim, end_y), - textcoords='data', fontsize=14, color=color, horizontalalignment='left',fontproperties=font_prop) - - - elif figure_type == 'bar': - ax.bar(x_dim, stock_data.iloc[:, 2], label = unchanged_var + '_' + index_name, width=0.3, color=color) - ax.annotate(unchanged_var + ':' + str(round(start_y, 2)) + ' @' + start_x_dim, xy=(start_x_dim, start_y), - xytext=(start_x_dim, start_y), - textcoords='data', fontsize=14, color=color, horizontalalignment='right',fontproperties=font_prop) - - ax.annotate(unchanged_var + ':' + str(round(end_y, 2)) + ' @' + end_x_dim, xy=(end_x_dim, end_y), - xytext=(end_x_dim, end_y), - textcoords='data', fontsize=14, color=color, horizontalalignment='left',fontproperties=font_prop) - - plt.xticks(x_dim,rotation=45) # - ax.xaxis.set_major_locator(MaxNLocator( integer=True, prune=None, nbins=100)) # - - - plt.xlabel(x_name, fontproperties=font_prop,fontsize=18) - plt.ylabel(f'{index_name}', fontproperties=font_prop,fontsize=16) - ax.set_title(title_name , fontproperties=font_prop,fontsize=16) - plt.legend(prop=font_prop) # 显示图例 - fig = plt.gcf() - fig.set_size_inches(18, 12) - - return ax - - -def query_fund_Manager(Manager_name: str) -> pd.DataFrame: - # 代码fund_code,公告日期ann_date,基金经理名字name,性别gender,出生年份birth_year,学历edu,国籍nationality,开始管理日期begin_date,结束日期end_date,简历resume - """ - Retrieves information about a fund manager. - - Args: - Manager_name (str): The name of the fund manager. - - Returns: - df (DataFrame): A DataFrame containing the fund manager's information, including the fund codes, announcement dates, - manager's name, gender, birth year, education, nationality, start and end dates of managing funds, - and the manager's resume. - """ - - df = pro.fund_manager(**{ - "ts_code": "", - "ann_date": "", - "name": Manager_name, - "offset": "", - "limit": "" - }, fields=[ - "ts_code", - "ann_date", - "name", - "gender", - "birth_year", - "edu", - "nationality", - "begin_date", - "end_date", - "resume" - ]) - # - df.rename(columns={'ts_code': 'fund_code'}, inplace=True) - # To query the fund name based on the fund code and store it in a new column called fund_name, while removing the rows where the fund name is not found - df['fund_name'] = df['fund_code'].apply(lambda x: query_fund_name_or_code('', x)) - df.dropna(subset=['fund_name'], inplace=True) - df.rename(columns={'name': 'manager_name'}, inplace=True) - # - df_out = df[['fund_name','fund_code','ann_date','manager_name','begin_date','end_date']] - - return df_out - - -# def save_stock_prices_to_csv(stock_prices: pd.DataFrame, stock_name: str, file_path: str) -> None: -# -# """ -# Saves the price data of a specific stock symbol during a specific time period to a local CSV file. -# -# Args: -# - stock_prices (pd.DataFrame): A pandas dataframe that contains the daily price data for the given stock symbol during the specified time period. -# - stock_name (str): The name of the stock. -# - file_path (str): The file path where the CSV file will be saved. -# -# Returns: -# - None: The function only saves the CSV file to the specified file path. -# """ -# # The function checks if the directory to save the CSV file exists and creates it if it does not exist. -# # The function then saves the price data of the specified stock symbol during the specified time period to a local CSV file with the name {stock_name}_price_data.csv in the specified file path. -# -# -# if not os.path.exists(file_path): -# os.makedirs(file_path) -# -# -# file_path = f"{file_path}{stock_name}_stock_prices.csv" -# stock_prices.to_csv(file_path, index_label='Date') -# print(f"Stock prices for {stock_name} saved to {file_path}") - - -def calculate_stock_index(stock_data: pd.DataFrame, index:str='close') -> pd.DataFrame: - """ - Calculate a specific index of a stock based on its price information. - - Args: - stock_data (pd.DataFrame): DataFrame containing the stock's price information. - index (str, optional): The index to calculate. The available options depend on the column names in the - input stock price data. Additionally, there are two special indices: 'candle_K' and 'Cumulative_Earnings_Rate'. - - Returns: - DataFrame containing the corresponding index data of the stock. In general, it includes three columns: 'trade_date', 'name', and the corresponding index value. - Besides, if index is 'candle_K', the function returns the DataFrame containing 'trade_date', 'Open', 'High', 'Low', 'Close', 'Volume','name' column. - If index is a technical index such as 'macd' or a trading index likes 'pe_ttm', the function returns the DataFrame with corresponding columns. - """ - - - if 'stock_name' not in stock_data.columns and 'index_name' in stock_data.columns: - stock_data.rename(columns={'index_name': 'stock_name'}, inplace=True) - # - index = index.lower() - if index=='Cumulative_Earnings_Rate' or index =='Cumulative_Earnings_Rate'.lower() : - stock_data[index] = (1 + stock_data['pct_chg'] / 100.).cumprod() - 1. - stock_data[index] = stock_data[index] * 100. - if 'stock_name' in stock_data.columns : - selected_index = stock_data[['trade_date', 'stock_name', index]].copy() - # - if 'fund_name' in stock_data.columns: - selected_index = stock_data[['trade_date', 'fund_name', index]].copy() - return selected_index - - elif index == 'candle_K' or index == 'candle_K'.lower(): - #tech_df = tech_df.drop(['name', 'symbol', 'industry', 'area','market','list_date','ts_code','close'], axis=1) - # Merge two DataFrames based on the 'trade_date' column. - - stock_data = stock_data.rename( - columns={'open': 'Open', 'high': 'High', 'low': 'Low', 'close': 'Close', - 'vol': 'Volume'}) - selected_index = stock_data[['trade_date', 'Open', 'High', 'Low', 'Close', 'Volume','stock_name']].copy() - return selected_index - - elif index =='macd': - selected_index = stock_data[['trade_date','macd','macd_dea','macd_dif']].copy() - return selected_index - - elif index =='rsi': - selected_index = stock_data[['trade_date','rsi_6','rsi_12']].copy() - return selected_index - - elif index =='boll': - selected_index = stock_data[['trade_date', 'boll_upper', 'boll_lower','boll_mid']].copy() - return selected_index - - elif index =='kdj': - selected_index = stock_data[['trade_date', 'kdj_k', 'kdj_d','kdj_j']].copy() - return selected_index - - elif index =='cci': - selected_index = stock_data[['trade_date', 'cci']].copy() - return selected_index - - elif index == '换手率': - selected_index = stock_data[['trade_date', 'turnover_rate','turnover_rate_f']].copy() - return selected_index - - elif index == '市值': - selected_index = stock_data[['trade_date', 'total_mv','circ_mv']].copy() - return selected_index - - - elif index in stock_data.columns: - stock_data = stock_data - - if 'stock_name' in stock_data.columns : - selected_index = stock_data[['trade_date', 'stock_name', index]].copy() - - if 'fund_name' in stock_data.columns: - selected_index = stock_data[['trade_date', 'fund_name', index]].copy() - # Except for candlestick chart and technical indicators, the remaining outputs consist of three columns: date, name, and indicator. - return selected_index - - - -def rank_index_cross_section(stock_data: pd.DataFrame, Top_k: int = -1, ascending: bool = False) -> pd.DataFrame: - """ - Sort the cross-sectional data based on the given index. - - Args: - stock_data : DataFrame containing the cross-sectional data. It should have three columns, and the last column represents the variable to be sorted. - Top_k : The number of data points to retain after sorting. (Default: -1, which retains all data points) - ascending: Whether to sort the data in ascending order or not. (Default: False) - - Returns: - stock_data_selected : DataFrame containing the sorted data. It has the same structure as the input DataFrame. - """ - - index = stock_data.columns[-1] - stock_data = stock_data.sort_values(by=index, ascending=ascending) - #stock_data_selected = stock_data[['trade_date','stock_name', index]].copy() - stock_data_selected = stock_data[:Top_k] - stock_data_selected = stock_data_selected.drop_duplicates(subset=['stock_name'], keep='first') - return stock_data_selected - - -def get_company_info(stock_name: str='') -> pd.DataFrame: - # ts_code: str 股票代码, exchange:str 交易所代码SSE上交所 SZSE深交所, chairman:str 法人代表, manager:str 总经理, secretary:str 董秘 # reg_capital:float 注册资本, setup_date:str 注册日期, province:str 所在省份 ,city:str 所在城市 - # introduction:str 公司介绍, website:str 公司主页 , email:str 电子邮件, office:str 办公室 # ann_date: str 公告日期, business_scope:str 经营范围, employees:int 员工人数, main_business:str 主要业务及产品 - """ - This function retrieves company information including stock code, exchange, chairman, manager, secretary, - registered capital, setup date, province, city, website, email, employees, business scope, main business, - introduction, office, and announcement date. - - Args: - - stock_name (str): The name of the stock. - - Returns: - - pd.DataFrame: A DataFrame that contains the company information. - """ - - stock_code = get_stock_code(stock_name) - df = pro.stock_company(**{ - "ts_code": stock_code,"exchange": "","status": "", "limit": "","offset": "" - }, fields=[ - "ts_code","exchange","chairman", "manager","secretary", "reg_capital","setup_date", "province","city", - "website", "email","employees","business_scope","main_business","introduction","office", "ann_date" - ]) - - - en_to_cn = { - 'ts_code': '股票代码', - 'exchange': '交易所代码', - 'chairman': '法人代表', - 'manager': '总经理', - 'secretary': '董秘', - 'reg_capital': '注册资本', - 'setup_date': '注册日期', - 'province': '所在省份', - 'city': '所在城市', - 'introduction': '公司介绍', - 'website': '公司主页', - 'email': '电子邮件', - 'office': '办公室', - 'ann_date': '公告日期', - 'business_scope': '经营范围', - 'employees': '员工人数', - 'main_business': '主要业务及产品' - } - - df.rename(columns=en_to_cn, inplace=True) - df.insert(0, '股票名称', stock_name) - # for column in df.columns: - # print(f"[{column}]: {df[column].values[0]}") - - - return df - - - - - -# def get_Financial_data(stock_code: str, report_date: str, financial_index: str = '' ) -> pd.DataFrame: -# # report_date的格式为"YYYYMMDD",包括"yyyy0331"为一季报,"yyyy0630"为半年报,"yyyy0930"为三季报,"yyyy1231"为年报 -# # index包含: # current_ratio 流动比率 # quick_ratio 速动比率 # netprofit_margin 销售净利率 # grossprofit_margin 销售毛利率 # roe 净资产收益率 # roe_dt 净资产收益率(扣除非经常损益) -# # roa 总资产报酬率 # debt_to_assets 资产负债率 # roa_yearly 年化总资产净利率 # q_dtprofit 扣除非经常损益后的单季度净利润 # q_eps 每股收益(单季度) -# # q_netprofit_margin 销售净利率(单季度) # q_gsprofit_margin 销售毛利率(单季度) # basic_eps_yoy 基本每股收益同比增长率(%) # netprofit_yoy 归属母公司股东的净利润同比增长率(%) # q_netprofit_yoy 归属母公司股东的净利润同比增长率(%)(单季度) # q_netprofit_qoq 归属母公司股东的净利润环比增长率(%)(单季度) # equity_yoy 净资产同比增长率 -# """ -# Retrieves financial data for a specific stock within a given date range. -# -# Args: -# stock_code (str): The stock code or symbol of the company for which financial data is requested. -# report_date (str): The report date in the format "YYYYMMDD" . -# financial_index (str, optional): The financial indicator to be queried. If not specified, all available financial -# indicators will be included. -# -# Returns: -# pd.DataFrame: A DataFrame containing the financial data for the specified stock and date range. The DataFrame -# consists of the following columns: "stock_name", -# "trade_date" (reporting period), and the requested financial indicator(s). -# -# """ -# stock_data = pro.fina_indicator(**{ -# "ts_code": stock_code, -# "ann_date": "", -# "start_date": '', -# "end_date": '', -# "period": report_date, -# "update_flag": "1", -# "limit": "", -# "offset": "" -# }, fields=["ts_code","end_date", financial_index]) -# -# stock_name = get_stock_name_from_code(stock_code) -# stock_data['stock_name'] = stock_name -# stock_data = stock_data.sort_values(by='end_date', ascending=True) # 按照日期升序排列 -# # 把end_data列改名为trade_date -# stock_data.rename(columns={'end_date': 'trade_date'}, inplace=True) -# stock_financial_data = stock_data[['stock_name', 'trade_date', financial_index]] -# return stock_financial_data - - -def get_Financial_data_from_time_range(stock_name:str, start_date:str, end_date:str, financial_index:str='') -> pd.DataFrame: - # start_date='20190101',end_date='20221231',financial_index='roe', The returned data consists of the ROE values for the entire three-year period from 2019 to 2022. - # To query quarterly or annual financial report data for a specific moment, "yyyy0331"为一季报,"yyyy0630"为半年报,"yyyy0930"为三季报,"yyyy1231"为年报,例如get_Financial_data_from_time_range("600519.SH", "20190331", "20190331", "roe") means to query the return on equity (ROE) data from the first quarter of 2019, - # # current_ratio 流动比率 # quick_ratio 速动比率 # netprofit_margin 销售净利率 # grossprofit_margin 销售毛利率 # roe 净资产收益率 # roe_dt 净资产收益率(扣除非经常损益) - # roa 总资产报酬率 # debt_to_assets 资产负债率 # roa_yearly 年化总资产净利率 # q_dtprofit 扣除非经常损益后的单季度净利润 # q_eps 每股收益(单季度) - # q_netprofit_margin 销售净利率(单季度) # q_gsprofit_margin 销售毛利率(单季度) # basic_eps_yoy 基本每股收益同比增长率(%) # netprofit_yoy 归属母公司股东的净利润同比增长率(%) # q_netprofit_yoy 归属母公司股东的净利润同比增长率(%)(单季度) # q_netprofit_qoq 归属母公司股东的净利润环比增长率(%)(单季度) # equity_yoy 净资产同比增长率 - """ - Retrieves the financial data for a given stock within a specified date range. - - Args: - stock_name (str): The stock code. - start_date (str): The start date of the data range in the format "YYYYMMDD". - end_date (str): The end date of the data range in the format "YYYYMMDD". - financial_index (str, optional): The financial indicator to be queried. - - Returns: - pd.DataFrame: A DataFrame containin financial data for the specified stock and date range. - -""" - stock_code = get_stock_code(stock_name) - stock_data = pro.fina_indicator(**{ - "ts_code": stock_code, - "ann_date": "", - "start_date": start_date, - "end_date": end_date, - "period": '', - "update_flag": "1", - "limit": "", - "offset": "" - }, fields=["ts_code", "end_date", financial_index]) - - #stock_name = get_stock_name_from_code(stock_code) - stock_data['stock_name'] = stock_name - stock_data = stock_data.sort_values(by='end_date', ascending=True) # 按照日期升序排列 - # 把end_data列改名为trade_date - stock_data.rename(columns={'end_date': 'trade_date'}, inplace=True) - stock_financial_data = stock_data[['stock_name', 'trade_date', financial_index]] - return stock_financial_data - - -def get_GDP_data(start_quarter:str='', end_quarter:str='', index:str='gdp_yoy') -> pd.DataFrame: - # The available indicators for query include the following 9 categories: # gdp GDP累计值(亿元)# gdp_yoy 当季同比增速(%)# pi 第一产业累计值(亿元)# pi_yoy 第一产业同比增速(%)# si 第二产业累计值(亿元)# si_yoy 第二产业同比增速(%)# ti 第三产业累计值(亿元) # ti_yoy 第三产业同比增速(%) - """ - Retrieves GDP data for the chosen index and specified time period. - - Args: - - start_quarter (str): The start quarter of the query, in YYYYMMDD format. - - end_quarter (str): The end quarter, in YYYYMMDD format. - - index (str): The specific GDP index to retrieve. Default is `gdp_yoy`. - - Returns: - - pd.DataFrame: A pandas DataFrame with three columns: `quarter`, `country`, and the selected `index`. - """ - - # The output is a DataFrame with three columns: - # the first column represents the quarter (quarter), the second column represents the country (country), and the third column represents the index (index). - df = pro.cn_gdp(**{ - "q":'', - "start_q": start_quarter, - "end_q": end_quarter, - "limit": "", - "offset": "" - }, fields=[ - "quarter", - "gdp", - "gdp_yoy", - "pi", - "pi_yoy", - "si", - "si_yoy", - "ti", - "ti_yoy" - ]) - df = df.sort_values(by='quarter', ascending=True) # - df['country'] = 'China' - df = df[['quarter', 'country', index]].copy() - - - return df - -def get_cpi_ppi_currency_supply_data(start_month: str = '', end_month: str = '', type: str = 'cpi', index: str = '') -> pd.DataFrame: - # The query types (type) include three categories: CPI, PPI, and currency supply. Each type corresponds to different indices. - # Specifically, CPI has 12 indices, PPI has 30 indices, and currency supply has 9 indices. - # The output is a DataFrame table with three columns: the first column represents the month (month), the second column represents the country (country), and the third column represents the index (index). - - # type='cpi',monthly CPI data include the following 12 categories: - # nt_val 全国当月值 # nt_yoy 全国同比(%)# nt_mom 全国环比(%)# nt_accu 全国累计值# town_val 城市当月值# town_yoy 城市同比(%)# town_mom 城市环比(%)# town_accu 城市累计值# cnt_val 农村当月值# cnt_yoy 农村同比(%)# cnt_mom 农村环比(%)# cnt_accu 农村累计值 - - # type = 'ppi', monthly PPI data include the following 30 categories: - # ppi_yoy PPI:全部工业品:当月同比 - # ppi_mp_yoy PPI:生产资料:当月同比 - # ppi_mp_qm_yoy PPI:生产资料:采掘业:当月同比 - # ppi_mp_rm_yoy PPI:生产资料:原料业:当月同比 - # ppi_mp_p_yoy PPI:生产资料:加工业:当月同比 - # ppi_cg_yoy PPI:生活资料:当月同比 - # ppi_cg_f_yoy PPI:生活资料:食品类:当月同比 - # ppi_cg_c_yoy PPI:生活资料:衣着类:当月同比 - # ppi_cg_adu_yoy PPI:生活资料:一般日用品类:当月同比 - # ppi_cg_dcg_yoy PPI:生活资料:耐用消费品类:当月同比 - # ppi_mom PPI:全部工业品:环比 - # ppi_mp_mom PPI:生产资料:环比 - # ppi_mp_qm_mom PPI:生产资料:采掘业:环比 - # ppi_mp_rm_mom PPI:生产资料:原料业:环比 - # ppi_mp_p_mom PPI:生产资料:加工业:环比 - # ppi_cg_mom PPI:生活资料:环比 - # ppi_cg_f_mom PPI:生活资料:食品类:环比 - # ppi_cg_c_mom PPI:生活资料:衣着类:环比 - # ppi_cg_adu_mom PPI:生活资料:一般日用品类:环比 - # ppi_cg_dcg_mom PPI:生活资料:耐用消费品类:环比 - # ppi_accu PPI:全部工业品:累计同比 - # ppi_mp_accu PPI:生产资料:累计同比 - # ppi_mp_qm_accu PPI:生产资料:采掘业:累计同比 - # ppi_mp_rm_accu PPI:生产资料:原料业:累计同比 - # ppi_mp_p_accu PPI:生产资料:加工业:累计同比 - # ppi_cg_accu PPI:生活资料:累计同比 - # ppi_cg_f_accu PPI:生活资料:食品类:累计同比 - # ppi_cg_c_accu PPI:生活资料:衣着类:累计同比 - # ppi_cg_adu_accu PPI:生活资料:一般日用品类:累计同比 - # ppi_cg_dcg_accu PPI:生活资料:耐用消费品类:累计同比 - - # type = 'currency_supply', monthly currency supply data include the following 9 categories: - # m0 M0(亿元)# m0_yoy M0同比(%)# m0_mom M0环比(%)# m1 M1(亿元)# m1_yoy M1同比(%)# m1_mom M1环比(%)# m2 M2(亿元)# m2_yoy M2同比(%)# m2_mom M2环比(%) - - """ - This function is used to retrieve China's monthly CPI (Consumer Price Index), PPI (Producer Price Index), - and monetary supply data published by the National Bureau of Statistics, - and return a DataFrame table containing month, country, and index values. - The function parameters include start month, end month, query type, and query index. - For query indexes that are not within the query range, the default index for the corresponding type is returned. - - Args: - - start_month (str): start month of the query, in the format of YYYYMMDD. - - end_month (str):end month in YYYYMMDD - - type (str): required parameter, query type, including three types: cpi, ppi, and currency_supply. - - index (str): optional parameter, query index, the specific index depends on the query type. - If the query index is not within the range, the default index for the corresponding type is returned. - - Returns: - - pd.DataFrame: DataFrame type, including three columns: month, country, and index value. - """ - - if type == 'cpi': - - df = pro.cn_cpi(**{ - "m": '', - "start_m": start_month, - "end_m": end_month, - "limit": "", - "offset": "" - }, fields=[ - "month", "nt_val","nt_yoy", "nt_mom","nt_accu", "town_val", "town_yoy", "town_mom", - "town_accu", "cnt_val", "cnt_yoy", "cnt_mom", "cnt_accu"]) - # If the index is not within the aforementioned range, the index is set as "nt_yoy". - if index not in df.columns: - index = 'nt_yoy' - - - elif type == 'ppi': - df = pro.cn_ppi(**{ - "m": '', - "start_m": start_month, - "end_m": end_month, - "limit": "", - "offset": "" - }, fields=[ - "month", "ppi_yoy", "ppi_mp_yoy", "ppi_mp_qm_yoy", "ppi_mp_rm_yoy", "ppi_mp_p_yoy", "ppi_cg_yoy", - "ppi_cg_f_yoy", "ppi_cg_c_yoy", "ppi_cg_adu_yoy", "ppi_cg_dcg_yoy", - "ppi_mom", "ppi_mp_mom", "ppi_mp_qm_mom", "ppi_mp_rm_mom", "ppi_mp_p_mom", "ppi_cg_mom", "ppi_cg_f_mom", - "ppi_cg_c_mom", "ppi_cg_adu_mom", "ppi_cg_dcg_mom", - "ppi_accu", "ppi_mp_accu", "ppi_mp_qm_accu", "ppi_mp_rm_accu", "ppi_mp_p_accu", "ppi_cg_accu", - "ppi_cg_f_accu", "ppi_cg_c_accu", "ppi_cg_adu_accu", "ppi_cg_dcg_accu" - ]) - if index not in df.columns: - index = 'ppi_yoy' - - elif type == 'currency_supply': - df = pro.cn_m(**{ - "m": '', - "start_m": start_month, - "end_m": end_month, - "limit": "", - "offset": "" - }, fields=[ - "month", "m0", "m0_yoy","m0_mom", "m1", - "m1_yoy", "m1_mom", "m2", "m2_yoy", "m2_mom"]) - if index not in df.columns: - index = 'm2_yoy' - - - df = df.sort_values(by='month', ascending=True) # - df['country'] = 'China' - df = df[['month', 'country', index]].copy() - return df - -def predict_next_value(df: pd.DataFrame, pred_index: str = 'nt_yoy', pred_num:int = 1. ) -> pd.DataFrame: - """ - Predict the next n values of a specific column in the DataFrame using linear regression. - - Parameters: - df (pandas.DataFrame): The input DataFrame. - pred_index (str): The name of the column to predict. - pred_num (int): The number of future values to predict. - - Returns: - pandas.DataFrame: The DataFrame with the predicted values appended to the specified column - and other columns filled as pred+index. - """ - input_array = df[pred_index].values - - # Convert the input array into the desired format. - x = np.array(range(len(input_array))).reshape(-1, 1) - y = input_array.reshape(-1, 1) - - # Train a linear regression model. - model = LinearRegression() - model.fit(x, y) - - # Predict the future n values. - next_indices = np.array(range(len(input_array), len(input_array) + pred_num)).reshape(-1, 1) - predicted_values = model.predict(next_indices).flatten() - - for i, value in enumerate(predicted_values, 1): - row_data = {pred_index: value} - for other_col in df.columns: - if other_col != pred_index: - row_data[other_col] = 'pred' + str(i) - df = df.append(row_data, ignore_index=True) - - # Return the updated DataFrame - return df - - - - - - -def get_latest_new_from_web(src: str = 'sina') -> pd.DataFrame: - - # 新浪财经 sina 获取新浪财经实时资讯 - # 同花顺 10jqka 同花顺财经新闻 - # 东方财富 eastmoney 东方财富财经新闻 - # 云财经 yuncaijing 云财经新闻 - """ - Retrieves the latest news data from major news websites, including Sina Finance, 10jqka, Eastmoney, and Yuncaijing. - - Args: - src (str): The name of the news website. Default is 'sina'. Optional parameters include: 'sina' for Sina Finance, - '10jqka' for 10jqka, 'eastmoney' for Eastmoney, and 'yuncaijing' for Yuncaijing. - - Returns: - pd.DataFrame: A DataFrame containing the news data, including two columns for date/time and content. - """ - - df = pro.news(**{ - "start_date": '', - "end_date": '', - "src": src, - "limit": "", - "offset": "" - }, fields=[ - "datetime", - "content", - ]) - df = df.apply(lambda x: '[' + x.name + ']' + ': ' + x.astype(str)) - return df - - -# def show_dynamic_table(df: pd.DataFrame) -> None: -# ''' -# This function displays a dynamic table in the terminal window, where each row of the input DataFrame is shown one by one. -# Arguments: -# df: A Pandas DataFrame containing the data to be displayed in the dynamic table. -# -# Returns: None. This function does not return anything. -# -# ''' -# -# return df -# # table = PrettyTable(df.columns.tolist(),align='l') -# -# # 将 DataFrame 的数据添加到表格中 -# # for row in df.itertuples(index=False): -# # table.add_row(row) -# -# # 初始化终端 -# # term = Terminal() -# # -# # # 在终端窗口中滚动显示表格 -# # with term.fullscreen(): -# # with term.cbreak(): -# # print(term.clear()) -# # with term.location(0, 0): -# # # 将表格分解为多行,并遍历每一行 -# # lines = str(table).split('\n') -# # for i, line in enumerate(lines): -# # with term.location(0, i): -# # print(line) -# # time.sleep(1) -# # -# # while True: -# # # 读取输入 -# # key = term.inkey(timeout=0.1) -# # -# # # 如果收到q键,则退出 -# # if key.lower() == 'q': -# # break - - -def get_index_constituent(index_name: str = '', start_date:str ='', end_date:str ='') -> pd.DataFrame: - """ - Query the constituent stocks of basic index (中证500) or a specified SW (申万) industry index - - args: - index_name: the name of the index. - start_date: the start date in "YYYYMMDD". - end_date: the end date in "YYYYMMDD". - - return: - A pandas DataFrame containing the following columns: - index_code - index_name - stock_code: the code of the constituent stock. - stock_name: the name of the constituent stock. - weight: the weight of the constituent stock. - """ - - if '申万' in index_name: - if '申万一级行业' in index_name: - # index_name取后面的名字 - index_name = index_name[6:] - df1 = pd.read_csv('SW2021_industry_L1.csv') - index_code = df1[df1['industry_name'] == index_name]['index_code'].iloc[0] - elif '申万二级行业' in index_name: - index_name = index_name[6:] - df1 = pd.read_csv('SW2021_industry_L2.csv') - index_code = df1[df1['industry_name'] == index_name]['index_code'].iloc[0] - elif '申万三级行业' in index_name: - index_name = index_name[6:] - df1 = pd.read_csv('SW2021_industry_L3.csv') - index_code = df1[df1['industry_name'] == index_name]['index_code'].iloc[0] - - print('The industry code for ', index_name, ' is: ', index_code) - - # 拉取数据 - df = pro.index_member(**{ - "index_code": index_code , #'851251.SI' - "is_new": "", - "ts_code": "", - "limit": "", - "offset": "" - }, fields=[ - "index_code", - "con_code", - "in_date", - "out_date", - "is_new", - "index_name", - "con_name" - ]) - # - # For each stock, filter the start_date and end_date that are between in_date and out_date. - df = df[(df['in_date'] <= start_date)] - df = df[(df['out_date'] >= end_date) | (df['out_date'].isnull())] - - - - df.rename(columns={'con_code': 'stock_code'}, inplace=True) - - df.rename(columns={'con_name': 'stock_name'}, inplace=True) - # - df['weight'] = np.nan - - df = df[['index_code', "index_name", 'stock_code', 'stock_name','weight']] - - else: # 宽基指数 - df1 = pro.index_basic(**{ - "ts_code": "", - "market": "", - "publisher": "", - "category": "", - "name": index_name, - "limit": "", - "offset": "" - }, fields=[ - "ts_code", - "name", - ]) - - index_code = df1["ts_code"][0] - print(f'index_code for basic index {index_name} is {index_code}') - - - # Step 2: Retrieve the constituents of an index based on the index code and given date. - df = pro.index_weight(**{ - "index_code": index_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "index_code", - "con_code", - "trade_date", - "weight" - ]) - # df = df.sort_values(by='trade_date', ascending=True) # - df['index_name'] = index_name - last_day = df['trade_date'][0] - # for the last trading day - df = df[df['trade_date'] == last_day] - df_stock = pd.read_csv('tushare_stock_basic_20230421210721.csv') - # Merge based on the stock code. - df = pd.merge(df, df_stock, how='left', left_on='con_code', right_on='ts_code') - # df.rename(columns={'name_y': 'name'}, inplace=True) - df = df.drop(columns=['symbol', 'area', 'con_code']) - df.sort_values(by='weight', ascending=False, inplace=True) - df.rename(columns={'name': 'stock_name'}, inplace=True) - df.rename(columns={'ts_code': 'stock_code'}, inplace=True) - df.dropna(axis=0, how='any', inplace=True) - # - df = df[['index_code', "index_name", 'stock_code', 'stock_name', 'weight']] - - return df - -# Determine whether the given name is a stock or a fund., -def is_fund(ts_name: str = '') -> bool: - # call get_stock_code()和query_fund_name_or_code() - if get_stock_code(ts_name) is not None and query_fund_name_or_code(ts_name) is None: - return False - elif get_stock_code(ts_name) is None and query_fund_name_or_code(ts_name) is not None: - return True - - - - -def calculate_earning_between_two_time(stock_name: str = '', start_date: str = '', end_date: str = '', index: str = 'close') -> float: - """ - Calculates the rate of return for a specified stock/fund between two dates. - - Args: - stock_name: stock_name or fund_name - start_date - end_date - index (str): The index used to calculate the stock return, including 'open' and 'close'. - - Returns: - float: The rate of return for the specified stock between the two dates. - """ - if is_fund(stock_name): - fund_code = query_fund_name_or_code(stock_name) - stock_data = query_fund_data(fund_code, start_date, end_date) - if index =='': - index = 'adj_nav' - else: - stock_data = get_stock_prices_data(stock_name, start_date, end_date,'daily') - try: - end_price = stock_data.iloc[-1][index] - start_price = stock_data.iloc[0][index] - earning = cal_dt(end_price, start_price) - # earning = round((end_price - start_price) / start_price * 100, 2) - except: - print(ts_code,start_date,end_date) - print('##################### 该股票没有数据 #####################') - return None - # percent = earning * 100 - # percent_str = '{:.2f}%'.format(percent) - - return earning - - -def loop_rank(df: pd.DataFrame, func: callable, *args, **kwargs) -> pd.DataFrame: - """ - It iteratively applies the given function to each row and get a result using function. It then stores the calculated result in 'new_feature' column. - - Args: - df: DataFrame with a single column - func : The function to be applied to each row: func(row, *args, **kwargs) - *args: Additional positional arguments for `func` function. - **kwargs: Additional keyword arguments for `func` function. - - Returns: - pd.DataFrame: A output DataFrame with three columns: the constant column, input column, and new_feature column. - The DataFrame is sorted based on the new_feature column in descending order. - - """ - df['new_feature'] = None - loop_var = df.columns[0] - for _, row in df.iterrows(): - res = None - var = row[loop_var] # - - if var is not None: - if loop_var == 'stock_name': - stock_name = var - elif loop_var == 'stock_code': - stock_name = get_stock_name_from_code(var) - elif loop_var == 'fund_name': - stock_name = var - elif loop_var == 'fund_code': - stock_name = query_fund_name_or_code('',var) - time.sleep(0.4) - try: - res = func(stock_name, *args, **kwargs) # - except: - raise ValueError('#####################Error for func#####################') - # res represents the result obtained for the variable. For example, if the variable is a stock name, res could be the return rate of that stock over a certain period or a specific feature value of that stock. Therefore, res should be a continuous value. - # If the format of res is a float, then it can be used directly. However, if res is in DataFrame format, you can retrieve the value corresponding to the index. - if isinstance(res, pd.DataFrame) and not res.empty: - # - try: - res = round(res.loc[:,args[-1]][0], 2) - df.loc[df[loop_var] == var, 'new_feature'] = res - except: - raise ValueError('##################### Error ######################') - elif isinstance(res, float): # - res = res - df.loc[df[loop_var] == var, 'new_feature'] = res - print(var, res) - - - # Remove the rows where the new_feature column is empty. - df = df.dropna(subset=['new_feature']) - stock_data = df.sort_values(by='new_feature', ascending=False) - # - stock_data.insert(0, 'unchanged', loop_var) - stock_data = stock_data.loc[:,[stock_data.columns[0], loop_var, 'new_feature']] - - return stock_data - -def output_mean_median_col(data: pd.DataFrame, col: str = 'new_feature') -> float: - # It calculates the mean and median value for the specified column. - - mean = round(data[col].mean(), 2) - median = round(data[col].median(), 2) - # - #print(title, mean) - return (mean, median) - - -# def output_median_col(data: pd.DataFrame, col: str, title_name: str = '') -> float: -# # It calculates the median value for the specified column and returns the median as a float value. -# -# median = round(data[col].median(), 2) -# #print(title_name, median) -# -# return median - - -def output_weighted_mean_col(data: pd.DataFrame, col: str, weight_col: pd.Series) -> float: - - """ - Calculates the weighted mean of a column and returns the result as a float. - - Args: - data (pd.DataFrame): The input cross-sectional or time-series data containing the feature columns. - col (str): The name of the feature column to calculate the weighted mean for. - weight_col (pd.Series): The weights used for the calculation, as a pandas Series. - - Returns: - float: The weighted mean of the specified feature column. - """ - - weighted_mean = round(np.average(data[col], weights = weight_col)/100., 2) - return weighted_mean - - - -def get_index_data(index_name: str = '', start_date: str = '', end_date: str = '', freq: str = 'daily') -> pd.DataFrame: - """ - This function retrieves daily, weekly, or monthly data for a given stock index. - - Arguments: - - index_name: Name of the index - - start_date: Start date in 'YYYYMMDD' - - end_date: End date in 'YYYYMMDD' - - freq: Frequency 'daily', 'weekly', or 'monthly' - - Returns: - A DataFrame containing the following columns: - trade_date, ts_code, close, open, high, low, pre_close: Previous day's closing price, change(涨跌额), pct_chg(涨跌幅), vol(成交量), amount(成交额), name: Index Name - """ - df1 = pro.index_basic(**{ - "ts_code": "", - "market": "", - "publisher": "", - "category": "", - "name": index_name, - "limit": "", - "offset": "" - }, fields=[ - "ts_code", - "name", - ]) - - index_code = df1["ts_code"][0] - print(f'index_code for index {index_name} is {index_code}') - # - if freq == 'daily': - df = pro.index_daily(**{ - "ts_code": index_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "trade_date", - "ts_code", - "close", - "open", - "high", - "low", - "pre_close", - "change", - "pct_chg", - "vol", - "amount" - ]) - elif freq == 'weekly': - df = pro.index_weekly(**{ - "ts_code": index_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "trade_date", - "ts_code", - "close", - "open", - "high", - "low", - "pre_close", - "change", - "pct_chg", - "vol", - "amount" - ]) - elif freq == 'monthly': - df = pro.index_monthly(**{ - "ts_code": index_code, - "trade_date": '', - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "trade_date", - "ts_code", - "close", - "open", - "high", - "low", - "pre_close", - "change", - "pct_chg", - "vol", - "amount" - ]) - - df = df.sort_values(by='trade_date', ascending=True) # - df['index_name'] = index_name - return df - - - - - -def get_north_south_money(start_date: str = '', end_date: str = '', trade_date: str = '') -> pd.DataFrame: - # - # trade_date: 交易日期 - # ggt_ss: 港股通(上海) - # ggt_sz: 港股通(深圳) - # hgt: 沪股通(亿元) - # sgt: 深股通(亿元) - # north_money: 北向资金(亿元)= hgt + sgt - # south_money: 南向资金(亿元)= ggt_ss + ggt_sz - # name: 固定为'A-H',代表A股和H股 - # accumulate_north_money: 累计北向资金流入 - # accumulate_south_money: 累计南向资金流入 - - - month_df = pro.moneyflow_hsgt(**{ - "trade_date": trade_date, - "start_date": start_date, - "end_date": end_date, - "limit": "", - "offset": "" - }, fields=[ - "trade_date", - "ggt_ss", - "ggt_sz", - "hgt", - "sgt", - "north_money", - "south_money" - ]) - - month_df[['ggt_ss','ggt_sz','hgt','sgt','north_money','south_money']] = month_df[['ggt_ss','ggt_sz','hgt','sgt','north_money','south_money']]/100.0 - month_df = month_df.sort_values(by='trade_date', ascending=True) # - month_df['stock_name'] = 'A-H' - month_df['accumulate_north_money'] = month_df['north_money'].cumsum() - month_df['accumulate_south_money'] = month_df['south_money'].cumsum() - return month_df - - - -def plot_k_line(stock_data: pd.DataFrame, title: str = '') -> None: - """ - Plots a K-line chart of stock price and volume. - - Args: - stock_data : A pandas DataFrame containing the stock price information, in which each row - represents a daily record. The DataFrame must contain the 'trade_date','open', 'close', 'high', 'low','volume', 'name' columns, which is used for k-line and volume. - 如果dataframe中还含有'macd','kdj', 'rsi', 'cci', 'boll','pe_ttm','turnover_rate'等列,则在k线图下方绘制这些指标的子图. - title : The title of the K-line chart. - - Returns: - None - """ - - # - stock_data['trade_date'] = pd.to_datetime(stock_data['trade_date'], format='%Y%m%d') - stock_data.set_index('trade_date', inplace=True) - # - custom_style = mpf.make_marketcolors(up='r', down='k', inherit=True) - china_style = mpf.make_mpf_style(marketcolors=custom_style) - - # MACD - # stock_data['macd1'] = stock_data['Close'].ewm(span=12).mean() - stock_data['Close'].ewm(span=26).mean() - # stock_data['macd_signal1'] = stock_data['macd'].ewm(span=9).mean() - - # - #mpf.plot(stock_data, type='candle', volume=True, title=title, mav=(5, 10, 20), style = china_style, addplot = macd) - add_plot = [] - # The index column is located after the name column in the last few columns. - # Retrieve the column names after the 'name' column. - index_list = stock_data.columns[stock_data.columns.get_loc('stock_name')+1:] - - index_df = stock_data[index_list] - - color_list = ['green','blue','red','yellow','black','purple','orange','pink','brown','gray'] - custom_lines = [] - for i in range(len(index_list)): - # If the column names contain 'boll', set panel to 0. Otherwise, set panel to 2. - if 'boll' in index_list[i]: - sub_plot = mpf.make_addplot(index_df[index_list[i]], panel=0, ylabel=index_list[i], color=color_list[i], type='line', secondary_y=True) - elif index_list[i] =='macd': - sub_plot = mpf.make_addplot(index_df[index_list[i]], panel=2, ylabel=index_list[i], color=color_list[i], type='bar', secondary_y=False) - - else: - sub_plot = mpf.make_addplot(index_df[index_list[i]], panel=2, ylabel=index_list[i], color=color_list[i], type='line', secondary_y=False) - - custom_line = Line2D([0], [0], color=color_list[i], lw=1, linestyle='dashed') - - - add_plot.append(sub_plot) - custom_lines.append(custom_line) - - mav_colors = ['red', 'green', 'blue'] - - fig, axes = mpf.plot(stock_data, type='candle', volume=True, title=title, mav=(5, 10, 20), mavcolors=mav_colors, style=china_style, addplot=add_plot, returnfig=True) - - - mav_labels = ['5-day MA', '10-day MA', '20-day MA'] - # - legend_lines = [plt.Line2D([0], [0], color=color, lw=2) for color in mav_colors] - - # - axes[0].legend(legend_lines, mav_labels) - - if len(index_list) ==1: - label = index_list[0] - elif len(index_list) > 1: - label_list = [i.split('_')[0] for i in index_list] - # - label = list(set(label_list))[0] - - if len(index_list) >= 1: - if 'boll' in label: - axes[0].legend(custom_lines, index_list, loc='lower right') - - elif len(index_list) > 1: - axes[-2].set_ylabel(label) - axes[-2].legend(custom_lines, index_list, loc='lower right') - - # - fig.set_size_inches(20, 16) - # - for ax in axes: - ax.grid(True) - - #fig.show() - return axes - - -def cal_dt(num_at_time_2: float = 0.0, num_at_time_1: float = 0.0) -> float: - """ - This function calculates the percentage change of a metric from one time to another. - - Args: - - num_at_time_2: the metric value at time 2 (end time) - - num_at_time_1: the metric value at time 1 (start time) - - Returns: - - float: the percentage change of the metric from time 1 to time 2 - - """ - if num_at_time_1 == 0: - num_at_time_1 = 0.0000000001 - return round((num_at_time_2 - num_at_time_1) / num_at_time_1, 4) - - -def query_fund_info(fund_code: str = '') -> pd.DataFrame: - # - # fund_code str Y 基金代码 # fund_name str Y 简称 # management str Y 管理人 # custodian str Y 托管人 # fund_type str Y 投资类型 # found_date str Y 成立日期 # due_date str Y 到期日期 # list_date str Y 上市时间 # issue_date str Y 发行日期 # delist_date str Y 退市日期 # issue_amount float Y 发行份额(亿) # m_fee float Y 管理费 # c_fee float Y 托管费 - # duration_year float Y 存续期 # p_value float Y 面值 # min_amount float Y 起点金额(万元) # benchmark str Y 业绩比较基准 # status str Y 存续状态D摘牌 I发行 L已上市 # invest_type str Y 投资风格 # type str Y 基金类型 # purc_startdate str Y 日常申购起始日 # redm_startdate str Y 日常赎回起始日 # market str Y E场内O场外 - """ - Retrieves information about a fund based on the fund code. - - Args: - fund_code (str, optional): Fund code. Defaults to ''. - - Returns: - df (DataFrame): A DataFrame containing various information about the fund, including fund code, fund name, - management company, custodian company, investment type, establishment date, maturity date, - listing date, issuance date, delisting date, issue amount, management fee, custodian fee, - fund duration, face value, minimum investment amount, benchmark, fund status, investment style, - fund type, start date for daily purchases, start date for daily redemptions, and market type. - The column 'ts_code' is renamed to 'fund_code', and 'name' is renamed to 'fund_name' in the DataFrame. - """ - df = pro.fund_basic(**{ - "ts_code": fund_code, - "market": "", - "update_flag": "", - "offset": "", - "limit": "", - "status": "", - "name": "" - }, fields=[ - "ts_code", - "name", - "management", - "custodian", - "fund_type", - "found_date", - "due_date", - "list_date", - "issue_date", - "delist_date", - "issue_amount", - "m_fee", - "c_fee", - "duration_year", - "p_value", - "min_amount", - "benchmark", - "status", - "invest_type", - "type", - "purc_startdate", - "redm_startdate", - "market" - ]) - # - df.rename(columns={'ts_code': 'fund_code'}, inplace=True) - df.rename(columns={'name': 'fund_name'}, inplace=True) - return df - -def query_fund_data(fund_code: str = '', start_date: str = '', end_date: str = '') -> pd.DataFrame: - # - # ts_code str Y TS代码 # ann_date str Y 公告日期 # nav_date str Y 净值日期 # unit_nav float Y 单位净值 # accum_nav float Y 累计净值 - # accum_div float Y 累计分红 # net_asset float Y 资产净值 # total_netasset float Y 合计资产净值 # adj_nav float Y 复权单位净值 pct_chg 每日涨跌幅 - """ - Retrieves fund data based on the fund code, start date, and end date. - - Args: - fund_code (str, optional): Fund code. Defaults to ''. - start_date (str, optional): Start date in YYYYMMDD format. Defaults to ''. - end_date (str, optional): End date in YYYYMMDD format. Defaults to ''. - - Returns: - df (DataFrame): A DataFrame containing fund data, including TS code, announcement date, net asset value date, - unit net asset value, accumulated net asset value, accumulated dividends, net asset value, - total net asset value, adjusted unit net asset value, and fund name. The 'ts_code' column is renamed - to 'fund_code', 'nav_date' is renamed to 'trade_date', and the DataFrame is sorted by the trade date - in ascending order. If the fund code does not exist, None is returned. - """ - df = pro.fund_nav(**{ - "ts_code": fund_code, - "nav_date": "", - "offset": "", - "limit": "", - "market": "", - "start_date": start_date, - "end_date": end_date - }, fields=[ - "ts_code", - "ann_date", - "nav_date", - "unit_nav", - "accum_nav", - "accum_div", - "net_asset", - "total_netasset", - "adj_nav", - "update_flag" - ]) - try: - fund_name= query_fund_name_or_code(fund_code=fund_code) - df['fund_name'] = fund_name - # - df.rename(columns={'ts_code': 'fund_code'}, inplace=True) - df.rename(columns={'nav_date': 'trade_date'}, inplace=True) - df.sort_values(by='trade_date', ascending=True, inplace=True) - except: - print(fund_code,'基金代码不存在') - return None - # - df['pct_chg'] = df['adj_nav'].pct_change() - # - df.loc[0, 'pct_chg'] = 0.0 - - - return df - -def query_fund_name_or_code(fund_name: str = '', fund_code: str = '') -> str: - # - """ - Retrieves the fund code based on the fund name or Retrieves the fund name based on the fund code. - - Args: - fund_name (str, optional): Fund name. Defaults to ''. - fund_code (str, optional): Fund code. Defaults to ''. - - Returns: - code or name: Fund code if fund_name is provided and fund_code is empty. Fund name if fund_code is provided and fund_name is empty. - """ - - - #df = pd.read_csv('./tushare_fund_basic_20230508193747.csv') - # Query the fund code based on the fund name. - if fund_name != '' and fund_code == '': - # - df = pd.read_csv('./tushare_fund_basic_all.csv') - # - # df = pro.fund_basic(**{ - # "ts_code": "", - # "market": "", - # "update_flag": "", - # "offset": "", - # "limit": "", - # "status": "", - # "name": fund_name - # }, fields=[ - # "ts_code", - # "name" - # ]) - try: - # - code = df[df['name'] == fund_name]['ts_code'].values[0] - except: - #print(fund_name,'基金名称不存在') - return None - return code - # Query the fund name based on the fund code. - if fund_code != '' and fund_name == '': - df = pd.read_csv('./tushare_fund_basic_all.csv') - try: - name = df[df['ts_code'] == fund_code]['name'].values[0] - except: - #print(fund_code,'基金代码不存在') - return None - return name - - - -def print_save_table(df: pd.DataFrame, title_name: str, save:bool = False ,file_path: str = './output/') -> None: - """ - It prints the dataframe as a formatted table using the PrettyTable library and saves it to a CSV file at the specified file path. - - Args: - - df: the dataframe to be printed and saved to a CSV file - - title_name: the name of the table to be printed and saved - - save: whether to save the table to a CSV file - - file_path: the file path where the CSV file should be saved. - - Returns: None - """ - - # 创建表格table.max_width = 20 - - # table = PrettyTable(df.columns.tolist()) - # table.align = 'l' - # table.max_width = 40 - # - # # - # for row in df.itertuples(index=False): - # table.add_row(row) - - #print(table) - - - if not os.path.exists(file_path): - os.makedirs(file_path) - - if file_path is not None and save == True: - file_path = file_path + title_name + '.csv' - df.to_csv(file_path, index=False) - return df - - - -# -def merge_indicator_for_same_stock(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame: - """ - Merges two DataFrames (two indicators of the same stock) based on common names for same stock. Data from two different stocks cannot be merged - - Args: - df1: DataFrame contains some indicators for stock A. - df2: DataFrame contains other indicators for stock A. - - Returns: - pd.DataFrame: The merged DataFrame contains two different indicators. - """ - if len(set(df1.columns).intersection(set(df2.columns))) > 0: - # If there are identical column names, merge the two DataFrames based on the matching column names. - # - common_cols = list(set(df1.columns).intersection(set(df2.columns))) - # - df = pd.merge(df1, df2, on=common_cols) - return df - else: - # - raise ValueError('The two dataframes have no columns in common.') - -def select_value_by_column(df1:pd.DataFrame, col_name: str = '', row_index: int = -1) -> Union[pd.DataFrame, Any]: - """ - Selects a specific column or a specific value within a DataFrame. - - Args: - df1: The input DataFrame. - col_name: The name of the column to be selected. - row_index: The index of the row to be selected. - - Returns: - Union[pd.DataFrame, Any]. row_index=-1: df1[col_name].to_frame() or df1[col_name][row_index] - """ - if row_index == -1: - # - return df1[col_name].to_frame() - else: - # - return df1[col_name][row_index] - - - -if __name__ == "__main__": - stock_name='成都银行' - stock_name2='五粮液' - stock_name3 = '宁德时代' - start = '20230104' - end = '20230504' - fund_name = "华商优势行业" #'易方达蓝筹精选' - - start_quarter = '201001' - end_quarter = '202303' - title_name ='上证50成分股收益率' - ax = None - res = is_fund('易方达蓝筹精选') - #_, ax = plt.subplots() - # code = query_fund_name_or_code('华商优势行业') - # ------------step1 数据查询层 获取股票代码 - # start_last_year = get_last_year_date(start) - # end_last_year = get_last_year_date(end) - stock_code = get_stock_code(stock_name) - # name = get_stock_name_from_code(stock_code) - # print(name) - # print(stock_code) - # stock_code2 = get_stock_code(stock_name2) - # stock_code3 = get_stock_code(stock_name3) - # stock_technical_data = get_Financial_data(stock_code, start, end) - # macrodata = get_ppi_data('', start_quarter, end_quarter, 'ppi_yoy') - # index_daily = get_index_data('沪深300',start,end,'daily') - # index_daily2 = get_index_data('中证500',start,end,'daily') - # index_daily3 = get_index_data('中证1000',start,end,'daily') - # index_daily4 = get_index_data('创业板指',start,end,'daily') - #stock_data = get_index_constituent('上证50','20230101','20230508') - # money = get_north_south_money('20230425', '20230426') - # stock_data = get_stock_prices_data(stock_code, start, end) - - # stock_data = get_stock_monthly_prices_data("","", "",'20230331') - # stock_data = get_stock_prices_data('', start, end, 'daily') - # fund_df = query_fund_Manager('周海栋') - # - # fund_code = select_value_by_column(fund_df, 'fund_code', -1) - # res_earning = loop_rank(fund_code, calculate_earning_between_two_time, start, end, 'adj_nav') - # print(res_earning) - #fund_code = query_fund_name_or_code(fund_name,'') - - - #fund_data = query_fund_data(fund_code, start, end) - #df_daily = get_daily_trading_data(stock_code,'20200101', '20230526') - # stock_data2 = get_stock_prices_data(stock_code2, start, end,'daily') - # stock_data3 = get_stock_prices_data(stock_code3, start, end,'daily') - - # dynamic_new = get_latest_new_from_web('sina') - #stock_df = get_sw_industry_stock('城商行Ⅱ','L2') - # df_macro = get_cpi_ppi_currency_supply_data('200101','202304','cpi','nt_yoy') - # df_macro = get_cpi_ppi_currency_supply_data('200101','202304','ppi','ppi_yoy') - # df_macro = get_cpi_ppi_currency_supply_data('200101','202304','currency_supply','m2_yoy') - # df_gdp = get_GDP_data('2001Q1','2023Q1','gdp_yoy') - # df_gdp = predict_next_value(df_gdp, 'gdp_yoy', 4) - #company_df = get_company_info('贵州茅台') - #print_save_table(company_df, '贵州茅台公司信息') - #fin_df = get_Financial_data_from_time_range(stock_code, '20200101', '20230526','roe') - - #tech_df = get_stock_technical_data(stock_code, start, end) - - - - # ----------------------------------step2 数据处理层 在截面或者时序数据------------------------------------------------------- - # 提取相应指标, 数据处理, 排序,提取,求差,加工.., - # fund_info = query_fund_info('005827.OF') - # value = select_value_by_column(fund_info, 'fund_name', 0) - #fund_index = calculate_stock_index(fund_data,'adj_nav') - #stock_index = rank_index_cross_section(stock_data, 'pct_chg', -1, False) - #stock_index = calculate_stock_index(stock_data, 'pct_chg') - #stock_index_each_day = calculate_stock_index(money, 'north_money') - #stock_index = calculate_stock_index(fin_df, 'roe') - # stock_index2 = calculate_stock_index(stock_data2, 'Cumulative_Earnings_Rate') - # stock_index3 = calculate_stock_index(stock_data3, 'Cumulative_Earnings_Rate') - # stock_index4 = calculate_stock_index(index_daily4, 'Cumulative_Earnings_Rate') - # stock_index2 = calculate_stock_index(stock_data2, 'Cumulative_Earnings_Rate') - #stock_index = calculate_stock_index(stock_data1, 'close') - #stock_index2 = calculate_stock_index(tech_df, 'macd') - #stock_index1 = calculate_stock_index(stock_data, 'candle_K') - #stock_index2 = calculate_stock_index(df_daily, 'pe_ttm') - #merge_df = merge_data(stock_index1, stock_index2) - #res_earning = loop_rank(stock_data, 'stock_name', calculate_earning_between_two_time, start, end) - # index_profit_yoy = loop_rank(stock_data, 'stock_name', get_Financial_data, start, end, 'profit_dedt') - # index_profit_yoy = loop_rank(stock_data, 'stock_name', get_Financial_data, start, end, 'netprofit_yoy') - - #res_earning_top_n = rank_index_cross_section(stock_index, 10, False) - #index_profit_yoy_last = loop_rank(stock_data, 'stock_name', get_Financial_data, start_last_year, end_last_year, 'profit_dedt') - # profit_yoy = calculate_stock_index(stock_technical_data, 'dt_netprofit_yoy') - # accumulate_north_month = calculate_stock_index(money, 'accumulate_south_money') - # accumulate_north_month = calculate_stock_index(res_earning, 'accumulate_south_money') - # stock_code = get_stock_code(stock_name) - # fin_df1 = get_Financial_data_from_time_range(stock_code, '20150101', '20230526', 'roa') - # fin_df2 = get_Financial_data_from_time_range(stock_code, '20150101', '20230526', 'roa') - # ax = plot_stock_data(fin_df1, ax, 'line', title_name) - # ax = plot_stock_data(fin_df2, ax, 'line', title_name) - #stock_data = get_index_constituent('上证50','20220105', '20230505') - # stock_data = get_index_constituent('申万二级行业城商行Ⅱ','20220105', '20220505') - # #stock_list = select_value_by_column(stock_data, 'stock_name', -1) - # - # index_profit_yoy = loop_rank(stock_list, get_Financial_data, start, 'netprofit_yoy') - # median = output_median_col(index_profit_yoy, 'new_feature') - # ax = plot_stock_data(index_profit_yoy, ax, 'bar', '上证50的最近季度归母净利润同比增长率') - - - - - - - # ----------------------------------step3 可视化层:文字,图片,表格等多种模态数据输出------------------------------------------------------- - #ax = plot_stock_data(stock_index, ax, 'line', title_name) - #ax = plot_stock_data(stock_index_each_day, ax, 'bar', title_name) - #print_save_table(fund_info, title_name) - - #_, sum_new = output_mean_sum_col(index_profit_yoy,'new_feature') - #_, sum_old = output_mean_sum_col(index_profit_yoy_last,'new_feature') - - - #print('科创50成分股的最近季度归母净利润同比增长率中位数%:', median) - #dt = cal_dt(sum_new, sum_old) - #print('上证50成分股的最近季度归母净利润同比增长率:',dt) - - #plot_k_line(merge_df, title_name) - # ax = plot_stock_data(index_profit_yoy, ax, 'bar', '上证50成分股的最近季度归母净利润同比增长率') - #ax = plot_stock_data(accumulate_north_month, ax, 'line', '2023年1月至4月南向资金累计流向') - - # ax2 = plot_stock_data(stock_index2, ax1, 'line', '贵州茅台VS五粮液近十年收益率对比图') - # ax = plot_stock_data(stock_index, ax,'line', title_name) - # ax = plot_stock_data(stock_index2, ax,'line', title_name) - # ax = plot_stock_data(stock_index3, ax,'line', title_name) - # ax = plot_stock_data(stock_index4, ax,'line', title_name) - - #ax = plot_stock_data(df_gdp, ax, 'line','2010-2022年国内每季度gdp增速同比') - print_save_table(df_gdp,'GDP预测',True) - - # show_dynamic_table(dynamic_new) - - - # ax = plot_stock_data(res_earning, None, 'bar', '张坤管理各个基金收益率') - # stock_data = get_index_constituent('上证50', '20230101', '20230508') - # stock_list = select_value_by_column(stock_data, 'stock_name', -1) - # res_earning = loop_rank(stock_list, calculate_earning_between_two_time, start, end) - # res_earning_top_n = rank_index_cross_section(res_earnng, 10, False) - # ax = plot_stock_data(res_earning_top_n, ax, 'bar', title_name) - - # stock_data = get_index_constituent('上证50', '20230101', '20230508') - # stock_list = select_value_by_column(stock_data, 'stock_name', -1) - # res_earning = loop_rank(stock_list, calculate_earning_between_two_time, '20230101', '20230508') - # res_earning_top_n = rank_index_cross_section(res_earning, 10, False) - # ax = plot_stock_data(res_earning_top_n, ax, 'bar', title_name) - - # fund_code = query_fund_name_or_code(fund_name, '') - # fund_data = query_fund_data(fund_code, start, end) - # fund_index = calculate_stock_index(fund_data, 'adj_nav') - # ax = plot_stock_data(fund_index, ax, 'line', title_name) - # fund_df = query_fund_Manager('张坤') - # fund_code = select_value_by_column(fund_df, 'fund_code', -1) - # res_earning = loop_rank(fund_code, calculate_earning_between_two_time, start, end, 'adj_nav') - # ax = plot_stock_data(res_earning, None, 'bar', '张坤管理各个基金收益率') - # company_df = get_company_info('贵州茅台') - # print_save_table(company_df,'gzmt', False) - - - - - if ax is not None: - plt.grid() - plt.show() - - - -# xxx基金经理管理的几只基金中,收益率最高的那只基金的规模是多少----找基金经理search,按收益率排序rank,找到收益率最高的那个select,显示基金信息 show -# 食品饮料行业中所有股票近十年涨幅最大的股票的信息----找行业search(行业分类--找到行业代码,根据行业代码找到股票成分), 收益率排序rank,找到涨幅最大的那个select,显示股票信息show - - - - - - - - - - - - diff --git a/spaces/zxy666/bingo-chatai666/README.md b/spaces/zxy666/bingo-chatai666/README.md deleted file mode 100644 index d65eafbc8431818f738e8e086455fa6159f101bb..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/README.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: bingo -emoji: 📉 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
              - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -
              - -## 演示站点 - -https://bing.github1s.tk - - - -[![img](./docs/images/demo.png)](https://bing.github1s.tk) - -## 功能和特点 - -- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。 -- 支持 Docker 构建,方便快捷地部署和访问。 -- Cookie 可全局配置,全局共享。 -- 支持持续语音对话 - -## RoadMap - - - [x] 支持 wss 转发 - - [x] 支持一键部署 - - [x] 优化移动端展示 - - [x] 支持画图 - - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器) - - [x] 支持语音输出(需要手动开启) - - [x] 支持图片输入 - - [x] 支持自定义域名 - - [ ] 支持历史记录 - - [ ] 适配深色模式 - - [ ] 支持内置提示词 - - [ ] 支持离线访问 - - [ ] 国际化翻译 - -## 一键部署 -你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。 - -### 部署到 Huggingface -1. 点击此图标 -[![Deploy to HuggingFace](https://img.shields.io/badge/%E7%82%B9%E5%87%BB%E9%83%A8%E7%BD%B2-%F0%9F%A4%97-fff)](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。 - -2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。 - -> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的 -> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名) -> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4) - -### 使用Cloudflare Workers自定义域名 - -> 核心代码 [worker.js](./cloudflare/worker.js) - -- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up) - -- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google) - -- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。 - -- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。 - -- 触发器 中自定义访问域名。 - -### 部署其它平台 -
              - -由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看 - - -#### 部署到 Netlify -[![Deploy to Netlify Button](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo) - -#### 部署到 Vercel -如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用 - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example) - -#### 部署到 Render - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/weaigc/bingo) -
              - -## 环境和依赖 - -- Node.js >= 18 -- Bing AI 的[身份信息](#如何获取-BING_HEADER)) - -## 安装和使用 - -> 由于目前微软封杀比较严重,推荐优先使用 [部署 Huggingface](#部署到-huggingface) 。 - -* 使用 Node 启动 - -```bash -git clone https://github.com/weaigc/bingo.git -npm i # 推荐使用 pnpm i -npm run build -npm run start -``` - -* 使用 Docker 启动 -```bash -docker pull weaigc/bingo -docker run --rm -it -p 7860:7860 weaigc/bingo -# 或者 -docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo -``` - -## 如何获取 BING_HEADER -> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量 - -打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge ,通过人机校验,然后 - -![BING HEADER](./docs/images/curl.png) - -> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证) - -以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。 -
              -正常格式/网页端保存的格式(格式仅供参考) - -``` -curl 'https://www.bing.com/turing/captcha/challenge' \ - -H 'authority: www.bing.com' \ - -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \ - -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \ - -H 'cache-control: max-age=0' \ - -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \ - -H 'dnt: 1' \ - -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \ - -H 'sec-ch-ua-arch: "x86"' \ - -H 'sec-ch-ua-bitness: "64"' \ - -H 'sec-ch-ua-full-version: "116.0.1938.29"' \ - -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \ - -H 'sec-ch-ua-mobile: ?0' \ - -H 'sec-ch-ua-model: ""' \ - -H 'sec-ch-ua-platform: "Windows"' \ - -H 'sec-ch-ua-platform-version: "15.0.0"' \ - -H 'sec-fetch-dest: document' \ - -H 'sec-fetch-mode: navigate' \ - -H 'sec-fetch-site: none' \ - -H 'sec-fetch-user: ?1' \ - -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \ - -H 'sec-ms-gec-version: 1-116.0.1938.29' \ - -H 'upgrade-insecure-requests: 1' \ - -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \ - -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \ - -H 'x-edge-shopping-flag: 1' \ - --compressed -``` -
              - -
              -转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式) - -``` -Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA== -``` -
              - - -## 鸣谢 - - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。 - - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。 - - -## 答疑及交流 - - - -## License - -MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE). - -