parquet-converter commited on
Commit
3deef71
·
1 Parent(s): 3325bf0

Update parquet files (step 3 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1368565466ki/Satdia/commons.py +0 -172
  2. spaces/17TheWord/RealESRGAN/Training.md +0 -100
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass Learn the Secrets of Professional Video Editing and Motion Graphics.md +0 -110
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Game Shark Ps2 V6 Iso717 The Best Way to Cheat in PS2 Games.md +0 -162
  5. spaces/1gistliPinn/ChatGPT4/Examples/Dotfuscator Professional Edition 5.0 Cracked.md +0 -6
  6. spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Sda Youth Song Book.md +0 -30
  7. spaces/1gistliPinn/ChatGPT4/Examples/Filme Noi Cu Subtitrare In Romana Download Free.md +0 -26
  8. spaces/1line/AutoGPT/autogpt/commands/twitter.py +0 -26
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beast Quest MOD APK The Ultimate Adventure Game with Infinite Resources in 2023.md +0 -100
  10. spaces/1phancelerku/anime-remove-background/Enjoy the Best Vegas Casino Experience with Lucky Play Casino - Download Now!.md +0 -98
  11. spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_and_k_diffusion_objects.py +0 -33
  12. spaces/1vash/demo-flask-docker-template/static/style.css +0 -45
  13. spaces/232labs/VToonify/vtoonify/model/raft/core/utils/augmentor.py +0 -246
  14. spaces/2ndelement/voicevox/test/test_mora_list.py +0 -20
  15. spaces/4Taps/SadTalker/src/utils/text2speech.py +0 -12
  16. spaces/AICODER009/food_detection/model.py +0 -36
  17. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/train_util.py +0 -178
  18. spaces/AIxPha/Real-CUGAN/upcunet_v3.py +0 -714
  19. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/cbhg.py +0 -121
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/live2d.d.ts +0 -11
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/chess/RandomSymobl.js +0 -38
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.js +0 -16
  23. spaces/AjulorC/question_answering_bot_deployed_with_Gradio/app.py +0 -39
  24. spaces/Akmyradov/TurkmenTTSweSTT/vits/modules.py +0 -390
  25. spaces/AlexWang/lama/bin/mask_example.py +0 -14
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text_inversion.md +0 -277
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet.py +0 -1002
  28. spaces/AnimaLab/bias-test-gpt-pairs/README.md +0 -14
  29. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/training.py +0 -737
  30. spaces/AnnasBlackHat/Image-Similarity/src/model/simlarity_model.py +0 -9
  31. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/bbox.py +0 -72
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/windows.py +0 -195
  33. spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_l.py +0 -15
  34. spaces/Audio-AGI/AudioSep/data/audiotext_dataset.py +0 -91
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/__init__.py +0 -19
  36. spaces/Bart92/RVC_HF/lib/infer_pack/commons.py +0 -166
  37. spaces/BetterAPI/BetterChat/svelte.config.js +0 -26
  38. spaces/BetterAPI/BetterChat_new/src/lib/actions/snapScrollToBottom.ts +0 -54
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distro/distro.py +0 -1399
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/config.py +0 -377
  41. spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md +0 -12
  42. spaces/CVPR/WALT/mmdet/models/dense_heads/cascade_rpn_head.py +0 -784
  43. spaces/CVPR/drawings-to-human/static/index.html +0 -209
  44. spaces/CVPR/regionclip-demo/detectron2/evaluation/__init__.py +0 -13
  45. spaces/Caoyunkang/Segment-Any-Anomaly/utils/metrics.py +0 -219
  46. spaces/CarlDennis/HYTTS/text/mandarin.py +0 -170
  47. spaces/CikeyQI/meme-api/meme_generator/memes/beat_head/__init__.py +0 -47
  48. spaces/Cong723/gpt-academic-public/crazy_functions/高级功能函数模板.py +0 -29
  49. spaces/CosmoAI/ChitChat/README.md +0 -13
  50. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/collect_env.py +0 -14
spaces/1368565466ki/Satdia/commons.py DELETED
@@ -1,172 +0,0 @@
1
- import math
2
- import torch
3
- from torch.nn import functional as F
4
- import torch.jit
5
-
6
-
7
- def script_method(fn, _rcb=None):
8
- return fn
9
-
10
-
11
- def script(obj, optimize=True, _frames_up=0, _rcb=None):
12
- return obj
13
-
14
-
15
- torch.jit.script_method = script_method
16
- torch.jit.script = script
17
-
18
-
19
- def init_weights(m, mean=0.0, std=0.01):
20
- classname = m.__class__.__name__
21
- if classname.find("Conv") != -1:
22
- m.weight.data.normal_(mean, std)
23
-
24
-
25
- def get_padding(kernel_size, dilation=1):
26
- return int((kernel_size*dilation - dilation)/2)
27
-
28
-
29
- def convert_pad_shape(pad_shape):
30
- l = pad_shape[::-1]
31
- pad_shape = [item for sublist in l for item in sublist]
32
- return pad_shape
33
-
34
-
35
- def intersperse(lst, item):
36
- result = [item] * (len(lst) * 2 + 1)
37
- result[1::2] = lst
38
- return result
39
-
40
-
41
- def kl_divergence(m_p, logs_p, m_q, logs_q):
42
- """KL(P||Q)"""
43
- kl = (logs_q - logs_p) - 0.5
44
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
45
- return kl
46
-
47
-
48
- def rand_gumbel(shape):
49
- """Sample from the Gumbel distribution, protect from overflows."""
50
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
51
- return -torch.log(-torch.log(uniform_samples))
52
-
53
-
54
- def rand_gumbel_like(x):
55
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
56
- return g
57
-
58
-
59
- def slice_segments(x, ids_str, segment_size=4):
60
- ret = torch.zeros_like(x[:, :, :segment_size])
61
- for i in range(x.size(0)):
62
- idx_str = ids_str[i]
63
- idx_end = idx_str + segment_size
64
- ret[i] = x[i, :, idx_str:idx_end]
65
- return ret
66
-
67
-
68
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
69
- b, d, t = x.size()
70
- if x_lengths is None:
71
- x_lengths = t
72
- ids_str_max = x_lengths - segment_size + 1
73
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
74
- ret = slice_segments(x, ids_str, segment_size)
75
- return ret, ids_str
76
-
77
-
78
- def get_timing_signal_1d(
79
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
80
- position = torch.arange(length, dtype=torch.float)
81
- num_timescales = channels // 2
82
- log_timescale_increment = (
83
- math.log(float(max_timescale) / float(min_timescale)) /
84
- (num_timescales - 1))
85
- inv_timescales = min_timescale * torch.exp(
86
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
87
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
88
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
89
- signal = F.pad(signal, [0, 0, 0, channels % 2])
90
- signal = signal.view(1, channels, length)
91
- return signal
92
-
93
-
94
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return x + signal.to(dtype=x.dtype, device=x.device)
98
-
99
-
100
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
101
- b, channels, length = x.size()
102
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
103
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
104
-
105
-
106
- def subsequent_mask(length):
107
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
108
- return mask
109
-
110
-
111
- @torch.jit.script
112
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
113
- n_channels_int = n_channels[0]
114
- in_act = input_a + input_b
115
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
116
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
117
- acts = t_act * s_act
118
- return acts
119
-
120
-
121
- def convert_pad_shape(pad_shape):
122
- l = pad_shape[::-1]
123
- pad_shape = [item for sublist in l for item in sublist]
124
- return pad_shape
125
-
126
-
127
- def shift_1d(x):
128
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
129
- return x
130
-
131
-
132
- def sequence_mask(length, max_length=None):
133
- if max_length is None:
134
- max_length = length.max()
135
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
136
- return x.unsqueeze(0) < length.unsqueeze(1)
137
-
138
-
139
- def generate_path(duration, mask):
140
- """
141
- duration: [b, 1, t_x]
142
- mask: [b, 1, t_y, t_x]
143
- """
144
- device = duration.device
145
-
146
- b, _, t_y, t_x = mask.shape
147
- cum_duration = torch.cumsum(duration, -1)
148
-
149
- cum_duration_flat = cum_duration.view(b * t_x)
150
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
151
- path = path.view(b, t_x, t_y)
152
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
153
- path = path.unsqueeze(1).transpose(2,3) * mask
154
- return path
155
-
156
-
157
- def clip_grad_value_(parameters, clip_value, norm_type=2):
158
- if isinstance(parameters, torch.Tensor):
159
- parameters = [parameters]
160
- parameters = list(filter(lambda p: p.grad is not None, parameters))
161
- norm_type = float(norm_type)
162
- if clip_value is not None:
163
- clip_value = float(clip_value)
164
-
165
- total_norm = 0
166
- for p in parameters:
167
- param_norm = p.grad.data.norm(norm_type)
168
- total_norm += param_norm.item() ** norm_type
169
- if clip_value is not None:
170
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
171
- total_norm = total_norm ** (1. / norm_type)
172
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/Training.md DELETED
@@ -1,100 +0,0 @@
1
- # :computer: How to Train Real-ESRGAN
2
-
3
- The training codes have been released. <br>
4
- Note that the codes have a lot of refactoring. So there may be some bugs/performance drops. Welcome to report issues and I will also retrain the models.
5
-
6
- ## Overview
7
-
8
- The training has been divided into two stages. These two stages have the same data synthesis process and training pipeline, except for the loss functions. Specifically,
9
-
10
- 1. We first train Real-ESRNet with L1 loss from the pre-trained model ESRGAN.
11
- 1. We then use the trained Real-ESRNet model as an initialization of the generator, and train the Real-ESRGAN with a combination of L1 loss, perceptual loss and GAN loss.
12
-
13
- ## Dataset Preparation
14
-
15
- We use DF2K (DIV2K and Flickr2K) + OST datasets for our training. Only HR images are required. <br>
16
- You can download from :
17
-
18
- 1. DIV2K: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip
19
- 2. Flickr2K: https://cv.snu.ac.kr/research/EDSR/Flickr2K.tar
20
- 3. OST: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/datasets/OST_dataset.zip
21
-
22
- For the DF2K dataset, we use a multi-scale strategy, *i.e.*, we downsample HR images to obtain several Ground-Truth images with different scales.
23
-
24
- We then crop DF2K images into sub-images for faster IO and processing.
25
-
26
- You need to prepare a txt file containing the image paths. The following are some examples in `meta_info_DF2Kmultiscale+OST_sub.txt` (As different users may have different sub-images partitions, this file is not suitable for your purpose and you need to prepare your own txt file):
27
-
28
- ```txt
29
- DF2K_HR_sub/000001_s001.png
30
- DF2K_HR_sub/000001_s002.png
31
- DF2K_HR_sub/000001_s003.png
32
- ...
33
- ```
34
-
35
- ## Train Real-ESRNet
36
-
37
- 1. Download pre-trained model [ESRGAN](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) into `experiments/pretrained_models`.
38
- ```bash
39
- wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth -P experiments/pretrained_models
40
- ```
41
- 1. Modify the content in the option file `options/train_realesrnet_x4plus.yml` accordingly:
42
- ```yml
43
- train:
44
- name: DF2K+OST
45
- type: RealESRGANDataset
46
- dataroot_gt: datasets/DF2K # modify to the root path of your folder
47
- meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt
48
- io_backend:
49
- type: disk
50
- ```
51
- 1. If you want to perform validation during training, uncomment those lines and modify accordingly:
52
- ```yml
53
- # Uncomment these for validation
54
- # val:
55
- # name: validation
56
- # type: PairedImageDataset
57
- # dataroot_gt: path_to_gt
58
- # dataroot_lq: path_to_lq
59
- # io_backend:
60
- # type: disk
61
-
62
- ...
63
-
64
- # Uncomment these for validation
65
- # validation settings
66
- # val:
67
- # val_freq: !!float 5e3
68
- # save_img: True
69
-
70
- # metrics:
71
- # psnr: # metric name, can be arbitrary
72
- # type: calculate_psnr
73
- # crop_border: 4
74
- # test_y_channel: false
75
- ```
76
- 1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training:
77
- ```bash
78
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
79
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --debug
80
- ```
81
- 1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
82
- ```bash
83
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
84
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --auto_resume
85
- ```
86
-
87
- ## Train Real-ESRGAN
88
-
89
- 1. After the training of Real-ESRNet, you now have the file `experiments/train_RealESRNetx4plus_1000k_B12G4_fromESRGAN/model/net_g_1000000.pth`. If you need to specify the pre-trained path to other files, modify the `pretrain_network_g` value in the option file `train_realesrgan_x4plus.yml`.
90
- 1. Modify the option file `train_realesrgan_x4plus.yml` accordingly. Most modifications are similar to those listed above.
91
- 1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training:
92
- ```bash
93
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
94
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --debug
95
- ```
96
- 1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
97
- ```bash
98
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
99
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --auto_resume
100
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass Learn the Secrets of Professional Video Editing and Motion Graphics.md DELETED
@@ -1,110 +0,0 @@
1
- <br />
2
- <h1>Adobe After Effects CS6 Free Download with Crack 64 Bit Kickass</h1>
3
- <p>If you are looking for a powerful and versatile software to create stunning motion graphics and visual effects for your videos, you might want to check out Adobe After Effects CS6. This software is widely used by professionals and amateurs alike to produce high-quality videos for various purposes, such as film, TV, web, and social media. In this article, we will show you what Adobe After Effects CS6 is, what features it has, why you need it, and how to download it for free with crack 64 bit kickass.</p>
4
- <h2>What is Adobe After Effects CS6?</h2>
5
- <p>Adobe After Effects CS6 is a software that allows you to create and edit motion graphics and visual effects using a timeline-based interface. You can use it to animate text, images, shapes, masks, and layers in 2D or 3D space. You can also apply various effects and presets to enhance your animations and add realism. You can also import and export files from other Adobe products, such as Photoshop, Illustrator, Premiere Pro, and Media Encoder.</p>
6
- <h2>adobe after effects cs6 free download with crack 64 bit kickass</h2><br /><p><b><b>Download</b> &#9734;&#9734;&#9734; <a href="https://byltly.com/2uKyOW">https://byltly.com/2uKyOW</a></b></p><br /><br />
7
- <h3>Features of Adobe After Effects CS6</h3>
8
- <p>Adobe After Effects CS6 has many features that make it a powerful and versatile software for video editing. Here are some of the main features that you can enjoy:</p>
9
- <h4>Motion Graphics and Visual Effects</h4>
10
- <p>You can create stunning motion graphics and visual effects using Adobe After Effects CS6. You can use the built-in tools and effects to animate text, images, shapes, masks, and layers in 2D or 3D space. You can also use expressions and scripts to control your animations more precisely. You can also use keyframes and motion paths to define the movement of your elements. You can also use the graph editor to fine-tune the speed and timing of your animations.</p>
11
- <h4>3D Camera Tracker and Ray-Traced 3D Renderer</h4>
12
- <p>You can also create realistic 3D scenes using Adobe After Effects CS6. You can use the 3D camera tracker to analyze your footage and generate a 3D camera that matches the movement of your original camera. This way, you can add new elements to your scene that follow the same perspective and depth as your original footage. You can also use the ray-traced 3D renderer to create 3D shapes and text with realistic shadows, reflections, and depth of field.</p>
13
- <h4>Global Performance Cache and Rolling Shutter Repair</h4>
14
- <p>You can also enjoy faster performance and better quality using Adobe After Effects CS6. You can use the global performance cache to save your previews in the background so that you don't have to wait for them to render again when you make changes. This way, you can work more efficiently and smoothly. You can also use the rolling shutter repair effect to fix the distortion caused by rolling shutter cameras. This way, you can improve the quality of your footage.</p>
15
- <p>how to get adobe after effects cs6 for free with crack 64 bit<br />
16
- adobe after effects cs6 full version free download 64 bit with crack<br />
17
- adobe after effects cs6 64 bit crack download kickass<br />
18
- adobe after effects cs6 torrent download 64 bit with crack<br />
19
- adobe after effects cs6 free download full version for windows 10 64 bit with crack<br />
20
- adobe after effects cs6 portable free download 64 bit with crack<br />
21
- adobe after effects cs6 serial number 64 bit free download with crack<br />
22
- adobe after effects cs6 keygen free download 64 bit with crack<br />
23
- adobe after effects cs6 patch free download 64 bit with crack<br />
24
- adobe after effects cs6 activation code free download 64 bit with crack<br />
25
- adobe after effects cs6 license key free download 64 bit with crack<br />
26
- adobe after effects cs6 offline installer free download 64 bit with crack<br />
27
- adobe after effects cs6 setup free download 64 bit with crack<br />
28
- adobe after effects cs6 highly compressed free download 64 bit with crack<br />
29
- adobe after effects cs6 rar file free download 64 bit with crack<br />
30
- adobe after effects cs6 zip file free download 64 bit with crack<br />
31
- adobe after effects cs6 iso file free download 64 bit with crack<br />
32
- adobe after effects cs6 direct link free download 64 bit with crack<br />
33
- adobe after effects cs6 google drive free download 64 bit with crack<br />
34
- adobe after effects cs6 mega link free download 64 bit with crack<br />
35
- adobe after effects cs6 mediafire link free download 64 bit with crack<br />
36
- adobe after effects cs6 dropbox link free download 64 bit with crack<br />
37
- adobe after effects cs6 one drive link free download 64 bit with crack<br />
38
- adobe after effects cs6 zippyshare link free download 64 bit with crack<br />
39
- adobe after effects cs6 uptobox link free download 64 bit with crack<br />
40
- adobe after effects cs6 openload link free download 64 bit with crack<br />
41
- adobe after effects cs6 streamable link free download 64 bit with crack<br />
42
- adobe after effects cs6 youtube link free download 64 bit with crack<br />
43
- adobe after effects cs6 vimeo link free download 64 bit with crack<br />
44
- adobe after effects cs6 dailymotion link free download 64 bit with crack<br />
45
- adobe after effects cs6 tutorial pdf free download 64 bit with crack<br />
46
- adobe after effects cs6 plugins pack free download 64 bit with crack<br />
47
- adobe after effects cs6 templates pack free download 64 bit with crack<br />
48
- adobe after effects cs6 presets pack free download 64 bit with crack<br />
49
- adobe after effects cs6 scripts pack free download 64 bit with crack<br />
50
- adobe after effects cs6 expressions pack free download 64 bit with crack<br />
51
- adobe after effects cs6 fonts pack free download 64 bit with crack<br />
52
- adobe after effects cs6 transitions pack free download 64 bit with crack<br />
53
- adobe after effects cs6 animations pack free download 64 bit with crack<br />
54
- adobe after effects cs6 motion graphics pack free download 64 bit with crack<br />
55
- adobe after effects cs6 visual effects pack free download 64 bit with crack<br />
56
- adobe after effects cs6 text effects pack free download 64 bit with crack<br />
57
- adobe after effects cs6 sound effects pack free download 64 bit with crack<br />
58
- adobe after effects cs6 video editing software free download 64 bit with crack<br />
59
- best alternative to adobe after effects cs6 for windows pc 64 bit without cracking or downloading anything</p>
60
- <h4>Variable Mask Feathering and Shape Layer Extrusion</h4>
61
- <p>You can also create more advanced masks and shapes using Adobe After Effects CS6. You can use the variable mask feathering tool to adjust the feathering of your masks along different points on the mask edge. This way, you can create more realistic masks that blend well with your background. You can also use the shape layer extrusion tool to extrude your shape layers into 3D objects with bevels and materials. This way, you can create more complex shapes that add depth to your scene.</p>
62
- <h3>Why You Need Adobe After Effects CS6?</h3>
63
- <p>Adobe After Effects CS6 is a software that you need if you want to create stunning videos for various purposes. Here are some of the reasons why you need it:</p>
64
- <h4>Create Stunning Videos for Various Purposes</h4>
65
- <p>You can use Adobe After Effects CS6 to create stunning videos for various purposes, such as film, TV, web, and social media. You can use it to create cinematic titles, intros, transitions, lower thirds, logos, credits, and more. You can also use it to create visual effects such as explosions, fire, smoke, rain, snow, lightning, etc. You can also use it to create motion graphics such as infographics, charts, graphs, maps, etc.</p>
66
- <h4>Enhance Your Creativity and Productivity</h4>
67
- <p>You can also use Adobe After Effects CS6 to enhance your creativity and productivity. You can use it to experiment with different ideas and styles without worrying about rendering time or quality. You can also use it to customize your workspace according to your preferences and workflow. You can also use it to collaborate with other artists using the Creative Cloud services.</p>
68
- <h4>Work with Other Adobe Products Seamlessly</h4>
69
- <p>You can also use Adobe After Effects CS6 to work with other Adobe products seamlessly. You can import and export files from other Adobe products such as Photoshop, Illustrator, Premiere Pro, Media Encoder etc. without losing quality or compatibility. You can also use dynamic link to update changes between applications without rendering or exporting.</p>
70
- <h3>How to Download Adobe After Effects CS6 with Crack 64 Bit Kickass?</h3>
71
- <p>If you want to download Adobe After Effects CS6 with crack 64 bit kickass for free, you need to follow these steps:</p>
72
- <h4>Step 1: Download the Torrent File from Kickass</h4>
73
- <p>The first step is to download the torrent file from kickass website. To do this, you need to have a torrent client installed on your computer such as uTorrent or BitTorrent. Then you need to go to kickass website (https://katcr.to/) and search for "Adobe After Effects CS6". Then you need to find a torrent file that has good seeds and peers (the more the better) and click on "Download Torrent". Then you need to save the torrent file on your computer.</p>
74
- <h4>Step 2: Install Adobe After Effects CS6 on Your Computer</h4>
75
- <p>The second step is to install Adobe After Effects CS6 on your computer using the torrent file that you downloaded in step 1. To do this, you need to open the torrent file using your torrent client (uTorrent or BitTorrent) and start downloading the files inside it. Then you need to wait until the download is complete (it may take some time depending on your internet speed). Then you need to open the folder where the files are downloaded (usually in Downloads) and run the setup.exe file as administrator. Then you need to follow the instructions on the screen until the installation is complete.</p>
76
- <h4>Step 3: Apply the Crack File to Activate the Software</h4>
77
- <p>The third step is to apply the crack file that came with the torrent file that you downloaded in step 1. To do this, you need to open the folder where the crack file is located (usually in Downloads) and copy it (Ctrl+C). Then you need to go to the folder where Adobe After Effects CS6 is installed (usually in C:\Program Files\Adobe\Adobe After Effects CS6) and paste it (Ctrl+V). Then you need to replace the original file when prompted (click Yes). Then you need to run Adobe After Effects CS6 as administrator (right-click on its icon > Run as administrator). Then you should see a message saying "Adobe Application Manager has been patched successfully". Then you need to close Adobe After Effects CS6.</p>
78
- <h3>Conclusion</h3>
79
- <p>In conclusion, Adobe After Effects CS6 is a powerful and versatile software that allows you to create stunning motion graphics and visual effects for your videos. You can enjoy its many features such as motion graphics and visual effects, 3D camera tracker and ray-traced 3D renderer, global performance cache and rolling shutter repair, variable mask feathering and shape layer extrusion, and more. You can also use it for various purposes such as film, TV, web, and social media. You can also enhance your creativity and productivity, and work with other Adobe products seamlessly. You free with crack 64 bit kickass by following these steps: 1. Download the torrent file from kickass website using a torrent client such as uTorrent or BitTorrent. 2. Install Adobe After Effects CS6 on your computer using the torrent file that you downloaded. 3. Apply the crack file that came with the torrent file to activate the software by copying and pasting it in the installation folder and replacing the original file. By doing this, you can enjoy Adobe After Effects CS6 for free and create amazing videos for your projects.</p>
80
- <h2>FAQs</h2>
81
- <p>Here are some of the frequently asked questions about Adobe After Effects CS6:</p>
82
- <table>
83
- <tr>
84
- <th>Question</th>
85
- <th>Answer</th>
86
- </tr>
87
- <tr>
88
- <td>Is Adobe After Effects CS6 compatible with Windows 10?</td>
89
- <td>Yes, Adobe After Effects CS6 is compatible with Windows 10. However, you may need to update your drivers and software to ensure optimal performance.</td>
90
- </tr>
91
- <tr>
92
- <td>Is Adobe After Effects CS6 safe to download?</td>
93
- <td>Yes, Adobe After Effects CS6 is safe to download if you use a reliable torrent website and a trusted torrent client. However, you should always scan your files with an antivirus software before opening them to avoid any malware or viruses.</td>
94
- </tr>
95
- <tr>
96
- <td>Is Adobe After Effects CS6 legal to use?</td>
97
- <td>No, Adobe After Effects CS6 is not legal to use if you download it for free with crack 64 bit kickass. This is because you are violating the terms and conditions of Adobe and infringing their intellectual property rights. You may face legal consequences if you are caught using it without a valid license.</td>
98
- </tr>
99
- <tr>
100
- <td>What are the system requirements for Adobe After Effects CS6?</td>
101
- <td>The minimum system requirements for Adobe After Effects CS6 are: - Intel Core 2 Duo or AMD Phenom II processor; 64-bit support required - Microsoft Windows 7 with Service Pack 1 (64 bit), Windows 8 (64 bit), Windows 8.1 (64 bit), or Windows 10 (64 bit) - 4 GB of RAM (8 GB recommended) - 5 GB of available hard-disk space; additional free space required during installation (cannot install on removable flash storage devices) - Additional disk space for disk cache (10 GB recommended) - 1280 x 900 display - OpenGL 2.0–capable system - QuickTime 7.6.6 software required for QuickTime features - Optional: Adobe-certified GPU card for GPU-accelerated ray-traced 3D renderer </td>
102
- </tr>
103
- <tr>
104
- <td>How can I learn Adobe After Effects CS6?</td>
105
- <td>You can learn Adobe After Effects CS6 by watching online tutorials, reading books and blogs, taking courses and classes, or practicing on your own projects. You can also join online communities and forums where you can ask questions and get feedback from other users.</td>
106
- </tr>
107
- </table>
108
- </p> 0a6ba089eb<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Game Shark Ps2 V6 Iso717 The Best Way to Cheat in PS2 Games.md DELETED
@@ -1,162 +0,0 @@
1
-
2
- <h1>What is Game Shark Ps2 V6 Iso717?</h1>
3
- <p>If you are a fan of PlayStation 2 games, you might have heard of Game Shark Ps2 V6 Iso717. This is a cheat device or software that allows you to modify or enhance your gaming experience by unlocking hidden features, codes, or cheats for your PS2 games. With Game Shark Ps2 V6 Iso717, you can access unlimited lives, ammo, health, money, weapons, items, and more in your favorite PS2 games.</p>
4
- <p>Game Shark Ps2 V6 Iso717 is an ISO file that you can download and burn onto a CD or DVD. You can then insert the disc into your PS2 console and run the software. The software will scan your memory card and detect the games that you have saved. You can then select the game that you want to play and choose from a list of cheats that are available for that game. You can also create your own custom cheats by using the code generator feature.</p>
5
- <h2>Game Shark Ps2 V6 Iso717</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://byltly.com/2uKz0Y">https://byltly.com/2uKz0Y</a></b></p><br /><br />
6
- <h2>How to use Game Shark Ps2 V6 Iso717?</h2>
7
- <p>Using Game Shark Ps2 V6 Iso717 is easy and simple. Here are the steps that you need to follow:</p>
8
- <ol>
9
- <li>Download Game Shark Ps2 V6 Iso717 from one of the sources that we will mention later in this article.</li>
10
- <li>Burn the ISO file onto a CD or DVD using a software like Nero or ImgBurn.</li>
11
- <li>Insert the disc into your PS2 console and turn it on.</li>
12
- <li>The software will load automatically and display a menu with various options.</li>
13
- <li>Select "Start Game" and then choose "With Codes" or "Without Codes".</li>
14
- <li>The software will scan your memory card and show you a list of games that you have saved.</li>
15
- <li>Select the game that you want to play and press X.</li>
16
- <li>The software will show you a list of cheats that are available for that game.</li>
17
- <li>Select the cheats that you want to activate and press X.</li>
18
- <li>Press Start to begin playing the game with the cheats enabled.</li>
19
- </ol>
20
- <p>You can also create your own custom cheats by using the code generator feature. To do this, follow these steps:</p>
21
- <ol>
22
- <li>Select "Expert Mode" from the main menu.</li>
23
- <li>Select "Code Generator" from the sub-menu.</li>
24
- <li>Select the game that you want to create cheats for and press X.</li>
25
- <li>The software will show you a list of values that correspond to different aspects of the game, such as health, ammo, money, etc.</li>
26
- <li>Select the value that you want to modify and press X.</li>
27
- <li>The software will show you a list of possible codes that can change that value.</li>
28
- <li>Select the code that you want to use and press X.</li>
29
- <li>The software will ask you to name your cheat and save it on your memory card.</li>
30
- <li>You can then activate your custom cheat by selecting it from the list of cheats for that game.</li>
31
- </ol>
32
- <h3>What are the benefits of using Game Shark Ps2 V6 Iso717?</h3>
33
- <p>There are many benefits of using Game Shark Ps2 V6 Iso717 for your PS2 games. Some of them are:</p>
34
- <ul>
35
- <li>You can enjoy playing your PS2 games without any limitations or restrictions.</li>
36
- <li>You can unlock hidden features, modes, levels, characters, or items that are normally inaccessible in your PS2 games.</li>
37
- <li>You can enhance your gaming experience by increasing your performance, skills, abilities, or stats in your PS2 games.</li>
38
- <li>You can experiment with different combinations of cheats and codes to create new and fun scenarios in your PS2 games.</li>
39
- <li>You can save time and effort by skipping difficult or boring parts of your PS2 games.</li>
40
- </ul>
41
- <h4>What are some of the drawbacks of using Game Shark Ps2 V6 Iso717?</h4>
42
- <p>While using Game Shark Ps2 V6 Iso717 can be fun and exciting, there are also some drawbacks or risks that you should be aware of. Some of them are:</p>
43
- <p>Game Shark Ps2 V6 Iso717 download<br />
44
- Game Shark Ps2 V6 Iso717 cheats<br />
45
- Game Shark Ps2 V6 Iso717 codes<br />
46
- Game Shark Ps2 V6 Iso717 iso file<br />
47
- Game Shark Ps2 V6 Iso717 emulator<br />
48
- Game Shark Ps2 V6 Iso717 torrent<br />
49
- Game Shark Ps2 V6 Iso717 free<br />
50
- Game Shark Ps2 V6 Iso717 online<br />
51
- Game Shark Ps2 V6 Iso717 rom<br />
52
- Game Shark Ps2 V6 Iso717 disc<br />
53
- Game Shark Ps2 V6 Iso717 manual<br />
54
- Game Shark Ps2 V6 Iso717 review<br />
55
- Game Shark Ps2 V6 Iso717 gameplay<br />
56
- Game Shark Ps2 V6 Iso717 video<br />
57
- Game Shark Ps2 V6 Iso717 youtube<br />
58
- Game Shark Ps2 V6 Iso717 update<br />
59
- Game Shark Ps2 V6 Iso717 patch<br />
60
- Game Shark Ps2 V6 Iso717 crack<br />
61
- Game Shark Ps2 V6 Iso717 serial key<br />
62
- Game Shark Ps2 V6 Iso717 activation code<br />
63
- Game Shark Ps2 V6 Iso717 mod<br />
64
- Game Shark Ps2 V6 Iso717 hack<br />
65
- Game Shark Ps2 V6 Iso717 trainer<br />
66
- Game Shark Ps2 V6 Iso717 save file<br />
67
- Game Shark Ps2 V6 Iso717 memory card<br />
68
- Game Shark Ps2 V6 Iso717 usb stick<br />
69
- Game Shark Ps2 V6 Iso717 adapter<br />
70
- Game Shark Ps2 V6 Iso717 cable<br />
71
- Game Shark Ps2 V6 Iso717 controller<br />
72
- Game Shark Ps2 V6 Iso717 case<br />
73
- Game Shark Ps2 V6 Iso717 cover art<br />
74
- Game Shark Ps2 V6 Iso717 box art<br />
75
- Game Shark Ps2 V6 Iso717 wallpaper<br />
76
- Game Shark Ps2 V6 Iso717 theme song<br />
77
- Game Shark Ps2 V6 Iso717 soundtrack<br />
78
- Game Shark Ps2 V6 Iso717 tips and tricks<br />
79
- Game Shark Ps2 V6 Iso717 guide and walkthrough<br />
80
- Game Shark Ps2 V6 Iso717 faq and forum<br />
81
- Game Shark Ps2 V6 Iso717 best settings and configuration<br />
82
- Game Shark Ps2 V6 Iso717 compatibility and requirements<br />
83
- Game Shark Ps2 V6 Iso717 price and availability<br />
84
- Game Shark Ps2 V6 Iso717 buy and sell<br />
85
- Game Shark Ps2 V6 Iso717 trade and exchange<br />
86
- Game Shark Ps2 V6 Iso717 warranty and guarantee<br />
87
- Game Shark Ps2 V6 Iso717 customer service and support<br />
88
- Game Shark Ps2 V6 Iso717 feedback and testimonials<br />
89
- Game Shark Ps2 V6 Iso717 alternatives and competitors<br />
90
- Game Shark Ps2 V6 Iso717 pros and cons<br />
91
- Game Shark Ps2 V6 Iso717 benefits and features</p>
92
- <ul>
93
- <li>You might lose interest or challenge in playing your PS2 games if you use too many cheats or codes.</li>
94
- <li>You might encounter glitches, errors, bugs, or crashes in your PS2 games if you use incompatible or faulty cheats or codes.</li>
95
- <li>You might damage your PS2 console or memory card if you use low-quality or corrupted discs or files.</li>
96
- <li>You might violate the terms and conditions of your PS2 games or console if you use unauthorized or illegal cheats or codes.</li>
97
- </ul>
98
- <h2>Where can you download Game Shark Ps2 V6 Iso717?</h2>
99
- <p>There are many sources where you can download Game Shark Ps2 V6 Iso717 online. However, not all of them are reliable or safe. Some of them might contain viruses, malware, spyware, adware, or other harmful programs that can harm your computer or device. Some of them might also contain fake, incomplete, outdated, or corrupted files that can damage your PS2 console or memory card. Therefore, you should be careful and selective when choosing where to download Game Shark Ps2 V6 Iso717 from. Here is a table of some of the best sources where you can download Game Shark Ps2 V6 Iso717 from:</p>
100
- <table style="border-collapse: collapse; width: 100%;">
101
- <tbody>
102
- <tr style="height: 23px;">
103
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Source</strong></td>
104
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Pros</strong></td>
105
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Cons</strong></td>
106
- </tr>
107
- <tr style="height: 23px;">
108
- <td style="width: 33.3333%; height: 23px; text-align: center;">CoolROM.com</td>
109
- <td style="width: 33.3333%; height: 23px; text-align: center;">- One of the most popular and trusted sites for downloading ROMs and ISOs.<br />- Has a large collection of PS2 games and cheat devices.<br />- Provides detailed information and screenshots for each file.<br />- Allows users to rate and review each file.<br />- Has a fast and easy download process.<br /></td>
110
- <td style="width: 33.3333%; height: 23px; text-align: center;">- Some files might require additional software or tools to extract or burn.<br />- Some files might have broken links or missing parts.<br />- Some files might be region-locked or incompatible with certain consoles.<br /></td>
111
- </tr>
112
- <tr style="height: 23px;">
113
- <td style="width: 33.3333%; height: 23px; text-align: center;">OpenSea.io</td>
114
- width: 33.3333%; height: 23px; text-align: center;">- A platform for buying and selling digital collectibles and NFTs.<br />- Has a collection of Game Shark Ps2 V6 Iso717 NFTs that are verified and authentic.<br />- Provides a secure and transparent transaction process.<br />- Allows users to bid and negotiate prices.<br />- Has a user-friendly and interactive interface.<br /></td>
115
- <td style="width: 33.3333%; height: 23px; text-align: center;">- Requires users to have a cryptocurrency wallet and account.<br />- Charges fees for each transaction.<br />- Has a limited supply and availability of Game Shark Ps2 V6 Iso717 NFTs.<br /></td>
116
- </tr>
117
- <tr style="height: 23px;">
118
- <td style="width: 33.3333%; height: 23px; text-align: center;">Netlify.app</td>
119
- <td style="width: 33.3333%; height: 23px; text-align: center;">- A platform for hosting and deploying websites and web applications.<br />- Has a collection of Game Shark Ps2 V6 Iso717 files that are hosted and shared by other users.<br />- Provides a fast and reliable download speed.<br />- Allows users to preview and test the files before downloading.<br />- Has a simple and minimalist design.<br /></td>
120
- <td style="width: 33.3333%; height: 23px; text-align: center;">- Some files might be unverified or unsafe.<br />- Some files might be outdated or incompatible with certain consoles.<br />- Some files might have low quality or resolution.<br /></td>
121
- </tr>
122
- </tbody>
123
- </table>
124
- <h3>How to verify the authenticity and safety of the download?</h3>
125
- <p>Before you download Game Shark Ps2 V6 Iso717 from any source, you should verify the authenticity and safety of the file. This will help you avoid downloading fake, incomplete, corrupted, or infected files that can harm your computer or device. Here are some tips on how to verify the authenticity and safety of the download:</p>
126
- <ul>
127
- <li>Check the file size, format, and name. The file size should be around 700 MB, the format should be ISO, and the name should be Game Shark Ps2 V6 Iso717 or something similar.</li>
128
- <li>Check the source reputation, reviews, ratings, and feedback. The source should have a good reputation, positive reviews, high ratings, and helpful feedback from other users.</li>
129
- <li>Check the virus scan, malware scan, spyware scan, and adware scan. The file should be free of any viruses, malware, spyware, or adware that can harm your computer or device.</li>
130
- <li>Check the compatibility, region-lock, and update status. The file should be compatible with your PS2 console model and region, not region-locked or restricted to certain countries or areas, and updated to the latest version or patch.</li>
131
- </ul>
132
- <h4>What are some alternatives to Game Shark Ps2 V6 Iso717?</h4>
133
- <p>If you are looking for some alternatives to Game Shark Ps2 V6 Iso717, there are other cheat devices or software that you can use for your PS2 games. Some of them are:</p>
134
- <table style="border-collapse: collapse; width: 100%;">
135
- <tbody>
136
- <tr style="height: 23px;">
137
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Cheat Device/Software</strong></td>
138
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Features</strong></td>
139
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Pros</strong></td>
140
- <td style="width: 33.3333%; height: 23px; text-align: center;"><strong>Cons</strong></td>
141
- </tr>
142
- <tr style="height: 23px;">
143
- <td style="width: 33.3333%; height: 23px; text-align: center;">Code Breaker</td>
144
- <td style="width: 33.3333%; height: 23px; text-align: center;">- A cheat device that allows you to modify or enhance your PS2 games by using codes or cheats.<br />- Has a large database of codes for over 1500 PS2 games.<br />- Has a code generator feature that allows you to create your own custom codes.<br />- Has a code saver feature that allows you to save your codes on your memory card.<br /></td>
145
- <td style="width: 33.3333%; height: 23px; text-align: center;">- Has a user-friendly and intuitive interface.<br />- Has a fast and easy installation and operation process.<br />- Has a high compatibility rate with most PS2 games and consoles.<br /></td>
146
- <td style="width: 33.3333%; height: 23px; text-align: center;">- Some codes might not work properly or cause glitches in some games.<br />- Some codes might require additional hardware or software to activate.<br />- Some codes might be region-locked or incompatible with certain consoles.<br /></td>
147
- </tr>
148
- <tr style="height: 23px;">
149
- <td style="width: 33.3333%; height: 23px; text-align: center;">Action Replay Max</td>
150
- <td style="width: 33.3333%; height: 23px; text-align: center;">- A cheat device that allows you to modify or enhance your PS2 games by using codes or cheats.<br />- Has a large database of codes for over 2000 PS2 games.<br />- Has a code generator feature that allows you to create your own custom codes.<br />- Has a code saver feature that allows you to save your codes on your memory card.<br />- Has an online mode that allows you to download new codes from the internet.<br /></td>
151
- , and alternatives. You can download it from various sources, but you should verify the authenticity and safety of the file before downloading. You can also contact customer support for any help or assistance with the software. You can also update the software to the latest version or patch by following the steps that we explained in this article. We hope that this article has helped you learn more about Game Shark Ps2 V6 Iso717 and how to use it for your PS2 games. <h1>FAQs</h1>
152
- <p>Here are some of the frequently asked questions about Game Shark Ps2 V6 Iso717:</p>
153
- <ol>
154
- <li><strong>What is the difference between Game Shark Ps2 V6 Iso717 and Game Shark Ps2 V4?</strong><br />Game Shark Ps2 V6 Iso717 is an updated version of Game Shark Ps2 V4. It has more codes, features, and compatibility than Game Shark Ps2 V4. It also has a code generator feature that allows you to create your own custom codes.</li>
155
- <li><strong>Can I use Game Shark Ps2 V6 Iso717 on my PS3 or PS4 console?</strong><br />No, you cannot use Game Shark Ps2 V6 Iso717 on your PS3 or PS4 console. It is only compatible with PS2 consoles and games.</li>
156
- <li><strong>Can I use Game Shark Ps2 V6 Iso717 on my PC or laptop?</strong><br />Yes, you can use Game Shark Ps2 V6 Iso717 on your PC or laptop if you have a PS2 emulator installed on your device. A PS2 emulator is a software that allows you to run PS2 games on your PC or laptop. You can download a PS2 emulator from various sources online, but you should verify the authenticity and safety of the file before downloading.</li>
157
- <li><strong>Can I use Game Shark Ps2 V6 Iso717 online or offline?</strong><br />You can use Game Shark Ps2 V6 Iso717 both online and offline. However, you should be careful when using it online, as some games or servers might detect or ban you for using cheats or codes. You should also respect the rules and etiquette of online gaming and not ruin the fun or experience for other players.</li>
158
- <li><strong>Can I use Game Shark Ps2 V6 Iso717 with other cheat devices or software?</strong><br />Yes, you can use Game Shark Ps2 V6 Iso717 with other cheat devices or software, such as Code Breaker, Action Replay Max, or Free McBoot. However, you should be careful when using multiple cheat devices or software at once, as this might cause conflicts or errors in your PS2 console or games.</li>
159
- </ol>
160
- </p> 0a6ba089eb<br />
161
- <br />
162
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dotfuscator Professional Edition 5.0 Cracked.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>dotfuscator professional edition 5.0 cracked</h2><br /><p><b><b>Download</b> - <a href="https://imgfil.com/2uy1RX">https://imgfil.com/2uy1RX</a></b></p><br /><br />
2
- <br />
3
- With injection, Dotfuscator can easily add application monitoring to existing apps and new development. This software ... Distribute trial versions of your apps and protect your . ... rating. Protect VBA Code protects VBA projects from password cracking in MS Excel. ... Dotfuscator professional edition 4.2 ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Sda Youth Song Book.md DELETED
@@ -1,30 +0,0 @@
1
-
2
- <h1>How to Download Sda Youth Song Book for Free</h1>
3
- <p>If you are looking for a collection of songs that are suitable for young people and youth activities, you might be interested in the Sda Youth Song Book. This book contains 214 songs selected especially for Adventist youth ministries, including hymns, choruses, and contemporary songs. All songs are arranged in four-part harmony and are chorded for guitar.</p>
4
- <p>But how can you get this book for free? Here are some ways you can download it online:</p>
5
- <h2>Download Sda Youth Song Book</h2><br /><p><b><b>Download Zip</b> &#9889; <a href="https://imgfil.com/2uy12g">https://imgfil.com/2uy12g</a></b></p><br /><br />
6
- <ul>
7
- <li>Visit the Adventist Book Center website[^1^] and add the book to your cart. You can choose between the hardback or the spiral bound format. The book costs $22.99, but you can get free shipping if you order more than $100 worth of products.</li>
8
- <li>Visit the Adventist Youth Ministries website[^2^] and download the honor book for music. This book contains some of the songs from the Sda Youth Song Book, as well as requirements and activities for earning the music honor badge. You can also find other resources for Adventist youth ministries on this website.</li>
9
- <li>Visit the Adventist Youth Ministries website[^3^] and download the resource manuals for Adventurers. These manuals contain some of the songs from the Sda Youth Song Book, as well as guidelines and materials for organizing Adventurer clubs for children aged 6-9. You can also find other resources for Adventurer ministries on this website.</li>
10
- </ul>
11
- <p>These are some of the ways you can download the Sda Youth Song Book for free online. However, if you want to have a physical copy of the book, you might want to consider buying it from the Adventist Book Center or from your local church bookstore. You can also borrow it from your friends or from your church library. The Sda Youth Song Book is a great resource for enhancing your musical skills and enriching your spiritual life.</p>
12
-
13
- <h2>How singing can improve your physical health</h2>
14
- <p>Singing is not only fun, but also good for your body. Singing can have positive effects on various aspects of your physical health, such as your breathing, posture, blood pressure, and sleep quality. Here are some of the ways singing can benefit your physical health:</p>
15
- <ul>
16
- <li>Singing helps with breathing. Singing requires you to control your breath and use your diaphragm muscles. This can improve your lung capacity and function, as well as your oxygen intake. Singing can also help clear your respiratory tract and prevent infections.</li>
17
- <li>Singing helps with posture. Singing requires you to stand or sit upright, with your shoulders back and your chest open. This can improve your posture and alignment, as well as reduce tension in your neck and back muscles.</li>
18
- <li>Singing helps with blood pressure. Singing can lower your blood pressure by reducing stress hormones and increasing endorphins. Singing can also improve your blood circulation and oxygen delivery to your tissues and organs.</li>
19
- <li>Singing helps with sleep. Singing can help strengthen your throat and palate muscles, which can prevent snoring and sleep apnea. Snoring and sleep apnea are conditions that cause interruptions in your breathing during sleep, which can affect your sleep quality and health. Singing can also help you relax and fall asleep faster.</li>
20
- </ul>
21
- <h2>How singing can improve your mental health</h2>
22
- <p>Singing is not only good for your body, but also for your mind. Singing can have positive effects on various aspects of your mental health, such as your mood, stress levels, memory, and social skills. Here are some of the ways singing can benefit your mental health:</p>
23
- <ul>
24
- <li>Singing helps with mood. Singing can boost your mood by releasing dopamine and endorphins, which are neurotransmitters that make you feel happy and euphoric. Singing can also reduce cortisol, which is a hormone that causes stress and anxiety.</li>
25
- <li>Singing helps with stress. Singing can help you cope with stress by providing an outlet for expression and emotion. Singing can also distract you from negative thoughts and worries, and help you focus on the present moment.</li>
26
- <li>Singing helps with memory. Singing can improve your memory by stimulating your brain and enhancing your cognitive functions. Singing can also help you recall words and melodies, as well as improve your verbal skills and creativity.</li>
27
- <li>Singing helps with social skills. Singing can help you connect with others by fostering a sense of belonging and community. Singing in a group or a choir can also improve your communication skills, confidence, and self-esteem.</li>
28
- </ul></p> d5da3c52bf<br />
29
- <br />
30
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Filme Noi Cu Subtitrare In Romana Download Free.md DELETED
@@ -1,26 +0,0 @@
1
- <h2>filme noi cu subtitrare in romana download free</h2><br /><p><b><b>DOWNLOAD</b> &#8250; <a href="https://imgfil.com/2uy0mI">https://imgfil.com/2uy0mI</a></b></p><br /><br />
2
-
3
- 2006. Iatalo
4
-
5
- Golan Roth
6
-
7
- Etichete: Ioana, Golan Roth
8
-
9
- Discursul asupra relatiei
10
-
11
- dintre om si societate
12
-
13
- Pe 12 ianuarie 1997 a avut loc un incident de atac violent, la adresa lui Golan, pentru că el a trebuit să schimbe poliţiştii în care era prins şi pentru că el era cel mai bun prieten al mamei lui Andrei, care se afla în carantină. Am fost martorul lui Golan.
14
-
15
- Și, aşa cum se spune, am văzut, am văzut. Deocamdată, un incident şi nimic mai mult. Vreau să vă spun doar că aşa este viaţa, după un incident.
16
-
17
- Mama mea a fost ajunsă în carantină deoarece a fost bolnavă, cu criză. O mai avea, de aproape doi ani, şi atunci în carantină.
18
-
19
- Aşa că mă aflam deoparte, să-mi dau seama ce să fac. Am văzut că n-ar fi bine să-mi dea mama locul şi să o lase în carantină acolo. Aşa că, din nefericire, nu ştiam ce să fac.
20
-
21
- Nu ştiam cine să caut şi cine să văd. Deci, când se face un incident, ca atunci, oamenii se sperie, se îngrijesc unii de alţii, cine ştie cine se aşteaptă la ce.
22
-
23
- Aşa că se uitam la televizor, la ţigări. În � 4fefd39f24<br />
24
- <br />
25
- <br />
26
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/commands/twitter.py DELETED
@@ -1,26 +0,0 @@
1
- import os
2
-
3
- import tweepy
4
- from dotenv import load_dotenv
5
-
6
- load_dotenv()
7
-
8
-
9
- def send_tweet(tweet_text):
10
- consumer_key = os.environ.get("TW_CONSUMER_KEY")
11
- consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
12
- access_token = os.environ.get("TW_ACCESS_TOKEN")
13
- access_token_secret = os.environ.get("TW_ACCESS_TOKEN_SECRET")
14
- # Authenticate to Twitter
15
- auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
16
- auth.set_access_token(access_token, access_token_secret)
17
-
18
- # Create API object
19
- api = tweepy.API(auth)
20
-
21
- # Send tweet
22
- try:
23
- api.update_status(tweet_text)
24
- print("Tweet sent successfully!")
25
- except tweepy.TweepyException as e:
26
- print("Error sending tweet: {}".format(e.reason))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beast Quest MOD APK The Ultimate Adventure Game with Infinite Resources in 2023.md DELETED
@@ -1,100 +0,0 @@
1
-
2
- <h1>Beast Quest Mod APK 2023: Everything You Need to Know</h1>
3
- <p>Beast Quest is a popular mobile game based on the best-selling fantasy novels by Adam Blade. It is an action-adventure game that lets you explore the open world of Avantia, fight against dangerous creatures and giant beasts, collect treasures and artifacts, and upgrade your equipment. If you are a fan of Beast Quest, you might be interested in the mod apk version of the game that will be released in 2023. Here are some of the features, benefits, and drawbacks of the Beast Quest mod apk 2023.</p>
4
- <h2>beast quest mod apk 2023</h2><br /><p><b><b>Download File</b> &#9989; <a href="https://urlin.us/2uT34u">https://urlin.us/2uT34u</a></b></p><br /><br />
5
- <h2>What is a mod apk?</h2>
6
- <p>A mod apk is a modified version of an original application that has been altered by third-party developers to add or remove some features, enhance the performance, or unlock some premium content. A mod apk usually requires you to download and install it manually from an external source, rather than from the official app store.</p>
7
- <h2>What are the features of the Beast Quest mod apk 2023?</h2>
8
- <p>The Beast Quest mod apk 2023 will offer some features that are not available in the original game, such as:</p>
9
- <ul>
10
- <li><strong>Unlimited money and potions</strong>: You will be able to buy any item or upgrade without worrying about the cost. You will also have unlimited potions to heal yourself and boost your stats.</li>
11
- <li><strong>All beasts unlocked</strong>: You will be able to access all the beasts in the game without having to complete the quests or defeat them in boss battles. You can choose any beast to accompany you on your adventure.</li>
12
- <li><strong>No ads</strong>: You will not see any ads or pop-ups while playing the game. You can enjoy the game without any interruptions or distractions.</li>
13
- <li><strong>New locations and quests</strong>: You will be able to explore new areas and take on new challenges that are not available in the original game. You will discover new secrets and rewards along the way.</li>
14
- <li><strong>Improved graphics and sound</strong>: You will experience better graphics and sound quality than the original game. The game will run smoother and faster on your device.</li>
15
- </ul>
16
- <h2>What are the benefits of the Beast Quest mod apk 2023?</h2>
17
- <p>The Beast Quest mod apk 2023 will offer some benefits for players who want to enjoy the game more, such as:</p>
18
- <ul>
19
- <li><strong>More fun and excitement</strong>: You will be able to play the game with more freedom and variety. You can customize your hero and your beast, try different strategies and tactics, and explore new possibilities.</li>
20
- <li><strong>More challenge and reward</strong>: You will be able to face more difficult enemies and bosses, and earn more rewards for your achievements. You can test your skills and knowledge of the game.</li>
21
- <li><strong>More content and value</strong>: You will be able to access more content and features that are not available in the original game. You can extend your gameplay time and get more value for your money.</li>
22
- </ul>
23
- <h2>What are the drawbacks of the Beast Quest mod apk 2023?</h2>
24
- <p>The Beast Quest mod apk 2023 will also have some drawbacks that you should be aware of before downloading it, such as:</p>
25
- <ul>
26
- <li><strong>Potential security risks</strong>: You will be downloading and installing an unofficial version of the game from an unknown source. This could expose your device to malware, viruses, or other harmful software. You should always scan any file before opening it.</li>
27
- <li><strong>Possible compatibility issues</strong>: You will be using a modified version of the game that may not work properly on your device or with your operating system. This could cause crashes, glitches, or errors. You should always backup your data before installing any mod apk.</li>
28
- <li><strong>Lack of support and updates</strong>: You will not receive any support or updates from the official developers or publishers of the game. This could affect your gameplay experience or cause problems with future versions of the game. You should always check for updates from the mod apk source.</li>
29
- </ul>
30
- <h2>How to download and install the Beast Quest mod apk 2023?</h2>
31
- <p>If you want to try the Beast Quest mod apk 2023, you will need to follow these steps:</p>
32
- <p>beast quest unlimited money mod apk 2023<br />
33
- beast quest hack mod apk download 2023<br />
34
- beast quest mod apk latest version 2023<br />
35
- beast quest mod apk free shopping 2023<br />
36
- beast quest mod apk unlimited potions 2023<br />
37
- beast quest mod apk android 1 2023<br />
38
- beast quest mod apk offline 2023<br />
39
- beast quest mod apk revdl 2023<br />
40
- beast quest mod apk rexdl 2023<br />
41
- beast quest mod apk happymod 2023<br />
42
- beast quest mod apk no root 2023<br />
43
- beast quest mod apk obb 2023<br />
44
- beast quest mod apk data 2023<br />
45
- beast quest mod apk unlimited gems 2023<br />
46
- beast quest mod apk unlimited coins 2023<br />
47
- beast quest mod apk unlimited everything 2023<br />
48
- beast quest mod apk all unlocked 2023<br />
49
- beast quest mod apk premium 2023<br />
50
- beast quest mod apk pro 2023<br />
51
- beast quest mod apk full version 2023<br />
52
- beast quest mod apk mega mod 2023<br />
53
- beast quest mod apk god mode 2023<br />
54
- beast quest mod apk high damage 2023<br />
55
- beast quest mod apk one hit kill 2023<br />
56
- beast quest mod apk unlimited health 2023<br />
57
- beast quest ultimate heroes mod apk 2023<br />
58
- beast quest an epic adventure mod apk 2023<br />
59
- beast quest ultimate heroes hack mod apk 2023<br />
60
- beast quest an epic adventure hack mod apk 2023<br />
61
- beast quest ultimate heroes unlimited money mod apk 2023<br />
62
- beast quest an epic adventure unlimited money mod apk 2023<br />
63
- beast quest ultimate heroes latest version mod apk 2023<br />
64
- beast quest an epic adventure latest version mod apk 2023<br />
65
- beast quest ultimate heroes free shopping mod apk 2023<br />
66
- beast quest an epic adventure free shopping mod apk 2023<br />
67
- beast quest ultimate heroes unlimited potions mod apk 2023<br />
68
- beast quest an epic adventure unlimited potions mod apk 2023<br />
69
- beast quest ultimate heroes android 1 mod apk 2023<br />
70
- beast quest an epic adventure android 1 mod apk 2023<br />
71
- beast quest ultimate heroes offline mode apkpure 2022</p>
72
- <ol>
73
- <li><strong>Find a reliable source</strong>: You will need to find a website or a platform that offers the Beast Quest mod apk 2023 for download. You can search online or ask for recommendations from other players. You should always check the reviews, ratings, and feedback of the source before downloading anything.</li>
74
- <li><strong>Download the file</strong>: You will need to download the Beast Quest mod apk 2023 file to your device. You should always scan the file for any malware or viruses before opening it. You should also make sure that you have enough storage space on your device.</li>
75
- <li><strong>Enable unknown sources</strong>: You will need to enable the option to install apps from unknown sources on your device. This will allow you to install the Beast Quest mod apk 2023 without any restrictions. You can usually find this option in your device settings, security settings, or developer options.</li>
76
- <li><strong>Install the app</strong>: You will need to install the Beast Quest mod apk 2023 on your device. You should follow the instructions on the screen and agree to the terms and conditions. You should also allow the app to access any permissions or resources that it needs.</li>
77
- <li><strong>Launch the game</strong>: You will need to launch the Beast Quest mod apk 2023 on your device. You should see the modded features and options in the game menu. You can now enjoy the game with the mod apk version.</li>
78
- </ol>
79
- <h2>How to uninstall the Beast Quest mod apk 2023?</h2>
80
- <p>If you want to uninstall the Beast Quest mod apk 2023, you will need to follow these steps:</p>
81
- <ol>
82
- <li><strong>Delete the app</strong>: You will need to delete the Beast Quest mod apk 2023 from your device. You can usually do this by long-pressing the app icon and selecting the uninstall option. You can also go to your device settings, apps, and find the app and uninstall it.</li>
83
- <li><strong>Clear the cache and data</strong>: You will need to clear the cache and data of the Beast Quest mod apk 2023 from your device. This will remove any residual files or settings that may affect your device performance or storage. You can usually do this by going to your device settings, apps, and finding the app and clearing its cache and data.</li>
84
- <li><strong>Restore the original game</strong>: You will need to restore the original game of Beast Quest on your device. You can do this by downloading and installing it from the official app store. You should be able to play the game without any modded features or options.</li>
85
- </ol>
86
- <h2>Conclusion</h2>
87
- <p>The Beast Quest mod apk 2023 is a modified version of the original game that offers some features, benefits, and drawbacks for players who want to enjoy the game more. It is not an official version of the game and it may have some security, compatibility, or support issues. It is up to you whether you want to try it or not, but you should always be careful and responsible when downloading and installing any mod apk.</p>
88
- <h2>FAQs</h2>
89
- <h3>What is Beast Quest?</h3>
90
- <p>Beast Quest is a mobile game based on the best-selling fantasy novels by Adam Blade. It is an action-adventure game that lets you explore the open world of Avantia, fight against dangerous creatures and giant beasts, collect treasures and artifacts, and upgrade your equipment.</p>
91
- <h3>Is Beast Quest free to play?</h3>
92
- <p>Beast Quest is free to download and play, but it also offers some in-app purchases that can enhance your gameplay experience or unlock some premium content.</p>
93
- <h3>Is Beast Quest mod apk safe?</h3>
94
- <p>Beast Quest mod apk is not an official version of the game and it may have some security risks. You should always download and install it from a reliable source and scan it for any malware or viruses before opening it.</p>
95
- <h3>Is Beast Quest mod apk legal?</h3>
96
- <p>Beast Quest mod apk is not an authorized version of the game and it may violate some terms and conditions of the original game developers or publishers. You should always respect their intellectual property rights and use their products in a fair and ethical way.</p>
97
- <h3>How can I contact Beast Quest support?</h3>
98
- <p>If you have any questions or issues with Beast Quest, you can contact their support team by emailing them at [email protected] or visiting their website at https://support.miniclip.com/.</p> 197e85843d<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy the Best Vegas Casino Experience with Lucky Play Casino - Download Now!.md DELETED
@@ -1,98 +0,0 @@
1
-
2
- <h1>Download Lucky Play Casino: The Best Way to Enjoy Vegas Slots Anywhere You Go</h1>
3
- <p>Do you love playing slot machines but don't have the time or money to visit a real casino? Do you want to feel the excitement of hitting the jackpot and winning big prizes? If you answered yes, then you should download Lucky Play Casino, the best online casino app that lets you play authentic Vegas slots on your mobile device. In this article, we will tell you what Lucky Play Casino is, how to download it, why you should play it, and some tips and tricks to help you win more.</p>
4
- <h2>What is Lucky Play Casino?</h2>
5
- <p>Lucky Play Casino is a free online casino app that features hundreds of slot machines from American Gaming Systems (AGS), the manufacturers of your favorite slot machines in real casinos. You can play classic slots like Golden Wins, Jade Wins, Colossal Diamonds, Royal Reels, Liberty 777, So Hot 7s, and more. You can also play video slots like Monkey in the Bank, Buffalo Nation, Double the Devil, Fever 777, and many others. All the slots have stunning graphics, realistic sounds, exciting bonus rounds, and huge jackpots.</p>
6
- <h2>download lucky play casino</h2><br /><p><b><b>Download</b> &harr; <a href="https://jinyurl.com/2uNLqE">https://jinyurl.com/2uNLqE</a></b></p><br /><br />
7
- <p>But that's not all. Lucky Play Casino also offers other casino games like blackjack, video poker, and bingo. You can play these games for free or join the challenging casino tournaments and compete with other players for big payouts. You can also win progressive jackpots that keep growing until someone wins them. And if you need more coins, you can get them for free every day by spinning the wheel, watching videos, completing missions, or inviting friends.</p>
8
- <h3>Features of Lucky Play Casino</h3>
9
- <h4>- Authentic casino slots from AGS</h4>
10
- <p>Lucky Play Casino has the most authentic casino slots from AGS, the leading provider of gaming solutions for casinos worldwide. You can enjoy the same slot machines that you find in Las Vegas, Atlantic City, Macau, and other gambling destinations. You can also discover new slots that are added regularly to keep you entertained.</p>
11
- <h4>- Free online casino games with bonus rounds and jackpots</h4>
12
- <p>Lucky Play Casino gives you plenty of opportunities to win big with its free online casino games. You can play slots with bonus rounds that can multiply your winnings or trigger free spins. You can also play slots with jackpots that can award you millions of coins in one spin. And if you're lucky enough, you might even hit the ultimate prize: the Grand Jackpot.</p>
13
- <h4>- Challenging casino tournaments and progressive jackpots</h4>
14
- <p>If you want to test your skills and luck against other players, you can join the casino tournaments and play for huge payouts. You can choose from different types of tournaments like slots, blackjack, video poker, or bingo. You can also play for progressive jackpots that are linked across multiple games and increase every time someone plays them.</p>
15
- <h3>How to download Lucky Play Casino</h3>
16
- <h4>- For Android devices</h4>
17
- <p>If you have an Android device, you can download Lucky Play Casino from the Google Play Store. Just follow these steps:</p>
18
- <ol>
19
- <li>Open the Google Play Store app <li>Search for "Lucky Play Casino" and tap on the app icon <li>Tap on the "Install" button and wait for the app to download <li>Tap on the "Open" button and enjoy playing Lucky Play Casino</ol>
20
- <h4>- For iOS devices</h4>
21
- <p>If you have an iOS device, you can download Lucky Play Casino from the App Store. Just follow these steps:</p>
22
- <ol>
23
- <li>Open the App Store app <li>Search for "Lucky Play Casino" and tap on the app icon <li>Tap on the "Get" button and enter your Apple ID password if prompted <li>Wait for the app to download and install <li>Tap on the app icon and enjoy playing Lucky Play Casino</ol>
24
- <h2>Why download Lucky Play Casino?</h2>
25
- <p>Now that you know what Lucky Play Casino is and how to download it, you might be wondering why you should play it. Well, there are many reasons why Lucky Play Casino is the best online casino app for you. Here are some of them:</p>
26
- <h3>Benefits of playing Lucky Play Casino</h3>
27
- <h4>- Experience the thrill of Vegas anytime, anywhere</h4>
28
- <p>Lucky Play Casino lets you experience the thrill of Vegas without leaving your home. You can play authentic casino slots that make you feel like you're in a real casino. You can also enjoy the stunning graphics, realistic sounds, and exciting animations that make the games more fun and immersive. You can play Lucky Play Casino anytime, anywhere, as long as you have an internet connection.</p>
29
- <p>download lucky play casino app<br />
30
- download lucky play casino for android<br />
31
- download lucky play casino for pc<br />
32
- download lucky play casino free slots<br />
33
- download lucky play casino games<br />
34
- download lucky play casino online<br />
35
- download lucky play casino slots<br />
36
- how to download lucky play casino<br />
37
- where to download lucky play casino<br />
38
- why download lucky play casino<br />
39
- best way to download lucky play casino<br />
40
- benefits of downloading lucky play casino<br />
41
- reviews of download lucky play casino<br />
42
- tips for downloading lucky play casino<br />
43
- tricks for downloading lucky play casino<br />
44
- download lucky play casino and win real money<br />
45
- download lucky play casino and get bonus coins<br />
46
- download lucky play casino and enjoy free spins<br />
47
- download lucky play casino and join tournaments<br />
48
- download lucky play casino and earn rewards<br />
49
- download lucky play casino for fun and entertainment<br />
50
- download lucky play casino for a chance to hit the jackpot<br />
51
- download lucky play casino for a realistic gaming experience<br />
52
- download lucky play casino for a variety of games and themes<br />
53
- download lucky play casino for a friendly and social community<br />
54
- is it safe to download lucky play casino<br />
55
- is it legal to download lucky play casino<br />
56
- is it easy to download lucky play casino<br />
57
- is it worth it to download lucky play casino<br />
58
- is it free to download lucky play casino<br />
59
- how long does it take to download lucky play casino<br />
60
- how much space does it need to download lucky play casino<br />
61
- how do I update my download lucky play casino app<br />
62
- how do I uninstall my download lucky play casino app<br />
63
- how do I contact support for my download lucky play casino app<br />
64
- what are the features of download lucky play casino app<br />
65
- what are the requirements of download lucky play casino app<br />
66
- what are the advantages of download lucky play casino app<br />
67
- what are the disadvantages of download lucky play casino app<br />
68
- what are the alternatives of download lucky play casino app<br />
69
- can I use my facebook account to download lucky play casino app<br />
70
- can I use my google account to download lucky play casino app<br />
71
- can I use my apple account to download lucky play casino app<br />
72
- can I use my email address to download lucky play casino app<br />
73
- can I use my phone number to download lucky play casino app<br />
74
- can I transfer my progress from one device to another with download lucky play casino app<br />
75
- can I invite my friends to join me with download lucky play casino app<br />
76
- can I chat with other players with download lucky play casino app<br />
77
- can I customize my profile with download lucky play casino app<br />
78
- can I access exclusive offers with download lucky play casino app</p>
79
- <h4>- Play with millions of other players online</h4>
80
- <p>Lucky Play Casino is not just a solo game. You can also play with millions of other players online who share your passion for casino games. You can chat with them, send them gifts, join their clubs, or challenge them in tournaments. You can also make new friends and socialize with people from different countries and cultures.</p>
81
- <h4>- Get free coins and bonuses every day</h4>
82
- <p>Lucky Play Casino is generous when it comes to giving you free coins and bonuses. You can get free coins every day by spinning the wheel, watching videos, completing missions, or inviting friends. You can also get bonuses for logging in, leveling up, or playing certain games. You can use these coins and bonuses to play more games and win more prizes.</p>
83
- <h3>Tips and tricks for playing Lucky Play Casino</h3>
84
- <h4>- Choose the right slot machine for your budget and style</h4>
85
- <p>Lucky Play Casino has hundreds of slot machines to choose from, but not all of them are suitable for your budget and style. Some slot machines have higher payouts but lower odds, while others have lower payouts but higher odds. Some slot machines have more paylines but higher bets, while others have fewer paylines but lower bets. You should choose a slot machine that matches your budget and style, so you can have more fun and win more.</p>
86
- <h4>- Use the auto-spin feature to save time and increase your chances of winning</h4>
87
- <p>Lucky Play Casino has an auto-spin feature that lets you spin the reels automatically without pressing the spin button every time. This feature can save you time and increase your chances of winning, as it can spin faster and more frequently than manual spinning. You can also adjust the number of auto-spins, the bet amount, and the stop conditions according to your preferences.</p>
88
- <h4>- Join a club and chat with other players for more fun and rewards</h4>
89
- <p>Lucky Play Casino has a club feature that lets you join or create a club with other players who share your interests. You can chat with them, send them gifts, or play together in club tournaments. You can also earn club points by playing games or completing tasks, which can help you rank up your club and get more rewards.</p>
90
- <h2>Conclusion</h2>
91
- <p>Lucky Play Casino is the best online casino app that lets you play authentic Vegas slots on your mobile device. You can download it for free from the Google Play Store or the App Store and enjoy hundreds of slot machines from AGS, the leading provider of gaming solutions for casinos worldwide. You can also play other casino games like blackjack, video poker, and bingo, and join challenging casino tournaments and progressive jackpots. You can experience the thrill of Vegas anytime, anywhere, play with millions of other players online, and get free coins and bonuses every day. You can also use some tips and tricks to help you choose the right slot machine, use the auto-spin feature, and join a club for more fun and rewards.</p>
92
- <p>If you love playing casino games but don't have the time or money to visit a real casino, then Lucky Play Casino is the perfect app for you. Download it now and start playing today!</p>
93
- <h3>Frequently Asked Questions</h3 Q: How can I get more coins and bonuses in Lucky Play Casino?</b></li>
94
- <li>A: There are many ways to get more coins and bonuses in Lucky Play Casino. You can get free coins every day by spinning the wheel, watching videos, completing missions, or inviting friends. You can also get bonuses for logging in, leveling up, or playing certain games. You can also buy coins and bonuses with real money if you want to.</li>
95
- </ul>
96
- <p>I hope this article has helped you learn more about Lucky Play Casino and how to download it. If you have any feedback or suggestions, please let me know in the comments below. Thank you for reading and happy gaming!</p> 197e85843d<br />
97
- <br />
98
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_and_k_diffusion_objects.py DELETED
@@ -1,33 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # This file is autogenerated by the command `make fix-copies`, do not edit.
16
- # flake8: noqa
17
-
18
- from ..utils import DummyObject, requires_backends
19
-
20
-
21
- class StableDiffusionKDiffusionPipeline(metaclass=DummyObject):
22
- _backends = ["paddle", "paddlenlp", "k_diffusion"]
23
-
24
- def __init__(self, *args, **kwargs):
25
- requires_backends(self, ["paddle", "paddlenlp", "k_diffusion"])
26
-
27
- @classmethod
28
- def from_config(cls, *args, **kwargs):
29
- requires_backends(cls, ["paddle", "paddlenlp", "k_diffusion"])
30
-
31
- @classmethod
32
- def from_pretrained(cls, *args, **kwargs):
33
- requires_backends(cls, ["paddle", "paddlenlp", "k_diffusion"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1vash/demo-flask-docker-template/static/style.css DELETED
@@ -1,45 +0,0 @@
1
- body {
2
- --text: hsl(0 0% 15%);
3
- padding: 2.5rem;
4
- font-family: sans-serif;
5
- color: var(--text);
6
- }
7
-
8
- body.dark-theme {
9
- --text: hsl(0 0% 90%);
10
- background-color: hsl(223 39% 7%);
11
- }
12
-
13
- main {
14
- max-width: 80rem;
15
- text-align: center;
16
- }
17
-
18
- section {
19
- display: flex;
20
- flex-direction: column;
21
- align-items: center;
22
- }
23
-
24
- a {
25
- color: var(--text);
26
- }
27
-
28
- form {
29
- width: 30rem;
30
- margin: 0 auto;
31
- }
32
-
33
- input {
34
- width: 100%;
35
- }
36
-
37
- button {
38
- cursor: pointer;
39
- }
40
-
41
- .text-gen-output {
42
- min-height: 1.2rem;
43
- margin: 1rem;
44
- border: 0.5px solid grey;
45
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/core/utils/augmentor.py DELETED
@@ -1,246 +0,0 @@
1
- import numpy as np
2
- import random
3
- import math
4
- from PIL import Image
5
-
6
- import cv2
7
- cv2.setNumThreads(0)
8
- cv2.ocl.setUseOpenCL(False)
9
-
10
- import torch
11
- from torchvision.transforms import ColorJitter
12
- import torch.nn.functional as F
13
-
14
-
15
- class FlowAugmentor:
16
- def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
17
-
18
- # spatial augmentation params
19
- self.crop_size = crop_size
20
- self.min_scale = min_scale
21
- self.max_scale = max_scale
22
- self.spatial_aug_prob = 0.8
23
- self.stretch_prob = 0.8
24
- self.max_stretch = 0.2
25
-
26
- # flip augmentation params
27
- self.do_flip = do_flip
28
- self.h_flip_prob = 0.5
29
- self.v_flip_prob = 0.1
30
-
31
- # photometric augmentation params
32
- self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
33
- self.asymmetric_color_aug_prob = 0.2
34
- self.eraser_aug_prob = 0.5
35
-
36
- def color_transform(self, img1, img2):
37
- """ Photometric augmentation """
38
-
39
- # asymmetric
40
- if np.random.rand() < self.asymmetric_color_aug_prob:
41
- img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
42
- img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
43
-
44
- # symmetric
45
- else:
46
- image_stack = np.concatenate([img1, img2], axis=0)
47
- image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
48
- img1, img2 = np.split(image_stack, 2, axis=0)
49
-
50
- return img1, img2
51
-
52
- def eraser_transform(self, img1, img2, bounds=[50, 100]):
53
- """ Occlusion augmentation """
54
-
55
- ht, wd = img1.shape[:2]
56
- if np.random.rand() < self.eraser_aug_prob:
57
- mean_color = np.mean(img2.reshape(-1, 3), axis=0)
58
- for _ in range(np.random.randint(1, 3)):
59
- x0 = np.random.randint(0, wd)
60
- y0 = np.random.randint(0, ht)
61
- dx = np.random.randint(bounds[0], bounds[1])
62
- dy = np.random.randint(bounds[0], bounds[1])
63
- img2[y0:y0+dy, x0:x0+dx, :] = mean_color
64
-
65
- return img1, img2
66
-
67
- def spatial_transform(self, img1, img2, flow):
68
- # randomly sample scale
69
- ht, wd = img1.shape[:2]
70
- min_scale = np.maximum(
71
- (self.crop_size[0] + 8) / float(ht),
72
- (self.crop_size[1] + 8) / float(wd))
73
-
74
- scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
75
- scale_x = scale
76
- scale_y = scale
77
- if np.random.rand() < self.stretch_prob:
78
- scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
79
- scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
80
-
81
- scale_x = np.clip(scale_x, min_scale, None)
82
- scale_y = np.clip(scale_y, min_scale, None)
83
-
84
- if np.random.rand() < self.spatial_aug_prob:
85
- # rescale the images
86
- img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
87
- img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
88
- flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
89
- flow = flow * [scale_x, scale_y]
90
-
91
- if self.do_flip:
92
- if np.random.rand() < self.h_flip_prob: # h-flip
93
- img1 = img1[:, ::-1]
94
- img2 = img2[:, ::-1]
95
- flow = flow[:, ::-1] * [-1.0, 1.0]
96
-
97
- if np.random.rand() < self.v_flip_prob: # v-flip
98
- img1 = img1[::-1, :]
99
- img2 = img2[::-1, :]
100
- flow = flow[::-1, :] * [1.0, -1.0]
101
-
102
- y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
103
- x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
104
-
105
- img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
106
- img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
107
- flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
108
-
109
- return img1, img2, flow
110
-
111
- def __call__(self, img1, img2, flow):
112
- img1, img2 = self.color_transform(img1, img2)
113
- img1, img2 = self.eraser_transform(img1, img2)
114
- img1, img2, flow = self.spatial_transform(img1, img2, flow)
115
-
116
- img1 = np.ascontiguousarray(img1)
117
- img2 = np.ascontiguousarray(img2)
118
- flow = np.ascontiguousarray(flow)
119
-
120
- return img1, img2, flow
121
-
122
- class SparseFlowAugmentor:
123
- def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
124
- # spatial augmentation params
125
- self.crop_size = crop_size
126
- self.min_scale = min_scale
127
- self.max_scale = max_scale
128
- self.spatial_aug_prob = 0.8
129
- self.stretch_prob = 0.8
130
- self.max_stretch = 0.2
131
-
132
- # flip augmentation params
133
- self.do_flip = do_flip
134
- self.h_flip_prob = 0.5
135
- self.v_flip_prob = 0.1
136
-
137
- # photometric augmentation params
138
- self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
139
- self.asymmetric_color_aug_prob = 0.2
140
- self.eraser_aug_prob = 0.5
141
-
142
- def color_transform(self, img1, img2):
143
- image_stack = np.concatenate([img1, img2], axis=0)
144
- image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
145
- img1, img2 = np.split(image_stack, 2, axis=0)
146
- return img1, img2
147
-
148
- def eraser_transform(self, img1, img2):
149
- ht, wd = img1.shape[:2]
150
- if np.random.rand() < self.eraser_aug_prob:
151
- mean_color = np.mean(img2.reshape(-1, 3), axis=0)
152
- for _ in range(np.random.randint(1, 3)):
153
- x0 = np.random.randint(0, wd)
154
- y0 = np.random.randint(0, ht)
155
- dx = np.random.randint(50, 100)
156
- dy = np.random.randint(50, 100)
157
- img2[y0:y0+dy, x0:x0+dx, :] = mean_color
158
-
159
- return img1, img2
160
-
161
- def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
162
- ht, wd = flow.shape[:2]
163
- coords = np.meshgrid(np.arange(wd), np.arange(ht))
164
- coords = np.stack(coords, axis=-1)
165
-
166
- coords = coords.reshape(-1, 2).astype(np.float32)
167
- flow = flow.reshape(-1, 2).astype(np.float32)
168
- valid = valid.reshape(-1).astype(np.float32)
169
-
170
- coords0 = coords[valid>=1]
171
- flow0 = flow[valid>=1]
172
-
173
- ht1 = int(round(ht * fy))
174
- wd1 = int(round(wd * fx))
175
-
176
- coords1 = coords0 * [fx, fy]
177
- flow1 = flow0 * [fx, fy]
178
-
179
- xx = np.round(coords1[:,0]).astype(np.int32)
180
- yy = np.round(coords1[:,1]).astype(np.int32)
181
-
182
- v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
183
- xx = xx[v]
184
- yy = yy[v]
185
- flow1 = flow1[v]
186
-
187
- flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
188
- valid_img = np.zeros([ht1, wd1], dtype=np.int32)
189
-
190
- flow_img[yy, xx] = flow1
191
- valid_img[yy, xx] = 1
192
-
193
- return flow_img, valid_img
194
-
195
- def spatial_transform(self, img1, img2, flow, valid):
196
- # randomly sample scale
197
-
198
- ht, wd = img1.shape[:2]
199
- min_scale = np.maximum(
200
- (self.crop_size[0] + 1) / float(ht),
201
- (self.crop_size[1] + 1) / float(wd))
202
-
203
- scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
204
- scale_x = np.clip(scale, min_scale, None)
205
- scale_y = np.clip(scale, min_scale, None)
206
-
207
- if np.random.rand() < self.spatial_aug_prob:
208
- # rescale the images
209
- img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
210
- img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
211
- flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
212
-
213
- if self.do_flip:
214
- if np.random.rand() < 0.5: # h-flip
215
- img1 = img1[:, ::-1]
216
- img2 = img2[:, ::-1]
217
- flow = flow[:, ::-1] * [-1.0, 1.0]
218
- valid = valid[:, ::-1]
219
-
220
- margin_y = 20
221
- margin_x = 50
222
-
223
- y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
224
- x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
225
-
226
- y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
227
- x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
228
-
229
- img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
230
- img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
231
- flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
232
- valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
233
- return img1, img2, flow, valid
234
-
235
-
236
- def __call__(self, img1, img2, flow, valid):
237
- img1, img2 = self.color_transform(img1, img2)
238
- img1, img2 = self.eraser_transform(img1, img2)
239
- img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
240
-
241
- img1 = np.ascontiguousarray(img1)
242
- img2 = np.ascontiguousarray(img2)
243
- flow = np.ascontiguousarray(flow)
244
- valid = np.ascontiguousarray(valid)
245
-
246
- return img1, img2, flow, valid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/test/test_mora_list.py DELETED
@@ -1,20 +0,0 @@
1
- from unittest import TestCase
2
-
3
- from voicevox_engine.mora_list import openjtalk_mora2text
4
-
5
-
6
- class TestOpenJTalkMoraList(TestCase):
7
- def test_mora2text(self):
8
- self.assertEqual("ッ", openjtalk_mora2text["cl"])
9
- self.assertEqual("ティ", openjtalk_mora2text["ti"])
10
- self.assertEqual("トゥ", openjtalk_mora2text["tu"])
11
- self.assertEqual("ディ", openjtalk_mora2text["di"])
12
- # GitHub issue #60
13
- self.assertEqual("ギェ", openjtalk_mora2text["gye"])
14
- self.assertEqual("イェ", openjtalk_mora2text["ye"])
15
-
16
- def test_mora2text_injective(self):
17
- """異なるモーラが同じ読みがなに対応しないか確認する"""
18
- values = list(openjtalk_mora2text.values())
19
- uniq_values = list(set(values))
20
- self.assertCountEqual(values, uniq_values)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/utils/text2speech.py DELETED
@@ -1,12 +0,0 @@
1
- import os
2
-
3
- def text2speech(txt, audio_path):
4
- print(txt)
5
- cmd = f'tts --text "{txt}" --out_path {audio_path}'
6
- print(cmd)
7
- try:
8
- os.system(cmd)
9
- return audio_path
10
- except:
11
- print("Error: Failed convert txt to audio")
12
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AICODER009/food_detection/model.py DELETED
@@ -1,36 +0,0 @@
1
- import torch
2
- import torchvision
3
-
4
- from torch import nn
5
-
6
-
7
- def create_effnetb2_model(num_classes:int=3,
8
- seed:int=42):
9
- """Creates an EfficientNetB2 feature extractor model and transforms.
10
-
11
- Args:
12
- num_classes (int, optional): number of classes in the classifier head.
13
- Defaults to 3.
14
- seed (int, optional): random seed value. Defaults to 42.
15
-
16
- Returns:
17
- model (torch.nn.Module): EffNetB2 feature extractor model.
18
- transforms (torchvision.transforms): EffNetB2 image transforms.
19
- """
20
- # Create EffNetB2 pretrained weights, transforms and model
21
- weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
22
- transforms = weights.transforms()
23
- model = torchvision.models.efficientnet_b2(weights=weights)
24
-
25
- # Freeze all layers in base model
26
- for param in model.parameters():
27
- param.requires_grad = False
28
-
29
- # Change classifier head with random seed for reproducibility
30
- torch.manual_seed(seed)
31
- model.classifier = nn.Sequential(
32
- nn.Dropout(p=0.3, inplace=True),
33
- nn.Linear(in_features=1408, out_features=num_classes),
34
- )
35
-
36
- return model, transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/train_util.py DELETED
@@ -1,178 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #!/usr/bin/env python3
3
- import os
4
- import sys
5
- import logging
6
- from typing import Callable, Dict, Union
7
- import yaml
8
- import torch
9
- from torch.optim.swa_utils import AveragedModel as torch_average_model
10
- import numpy as np
11
- import pandas as pd
12
- from pprint import pformat
13
-
14
-
15
- def load_dict_from_csv(csv, cols):
16
- df = pd.read_csv(csv, sep="\t")
17
- output = dict(zip(df[cols[0]], df[cols[1]]))
18
- return output
19
-
20
-
21
- def init_logger(filename, level="INFO"):
22
- formatter = logging.Formatter(
23
- "[ %(levelname)s : %(asctime)s ] - %(message)s")
24
- logger = logging.getLogger(__name__ + "." + filename)
25
- logger.setLevel(getattr(logging, level))
26
- # Log results to std
27
- # stdhandler = logging.StreamHandler(sys.stdout)
28
- # stdhandler.setFormatter(formatter)
29
- # Dump log to file
30
- filehandler = logging.FileHandler(filename)
31
- filehandler.setFormatter(formatter)
32
- logger.addHandler(filehandler)
33
- # logger.addHandler(stdhandler)
34
- return logger
35
-
36
-
37
- def init_obj(module, config, **kwargs):# 'captioning.models.encoder'
38
- obj_args = config["args"].copy()
39
- obj_args.update(kwargs)
40
- return getattr(module, config["type"])(**obj_args)
41
-
42
-
43
- def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'):
44
- """pprint_dict
45
-
46
- :param outputfun: function to use, defaults to sys.stdout
47
- :param in_dict: dict to print
48
- """
49
- if formatter == 'yaml':
50
- format_fun = yaml.dump
51
- elif formatter == 'pretty':
52
- format_fun = pformat
53
- for line in format_fun(in_dict).split('\n'):
54
- outputfun(line)
55
-
56
-
57
- def merge_a_into_b(a, b):
58
- # merge dict a into dict b. values in a will overwrite b.
59
- for k, v in a.items():
60
- if isinstance(v, dict) and k in b:
61
- assert isinstance(
62
- b[k], dict
63
- ), "Cannot inherit key '{}' from base!".format(k)
64
- merge_a_into_b(v, b[k])
65
- else:
66
- b[k] = v
67
-
68
-
69
- def load_config(config_file):
70
- with open(config_file, "r") as reader:
71
- config = yaml.load(reader, Loader=yaml.FullLoader)
72
- if "inherit_from" in config:
73
- base_config_file = config["inherit_from"]
74
- base_config_file = os.path.join(
75
- os.path.dirname(config_file), base_config_file
76
- )
77
- assert not os.path.samefile(config_file, base_config_file), \
78
- "inherit from itself"
79
- base_config = load_config(base_config_file)
80
- del config["inherit_from"]
81
- merge_a_into_b(config, base_config)
82
- return base_config
83
- return config
84
-
85
-
86
- def parse_config_or_kwargs(config_file, **kwargs):
87
- yaml_config = load_config(config_file)
88
- # passed kwargs will override yaml config
89
- args = dict(yaml_config, **kwargs)
90
- return args
91
-
92
-
93
- def store_yaml(config, config_file):
94
- with open(config_file, "w") as con_writer:
95
- yaml.dump(config, con_writer, indent=4, default_flow_style=False)
96
-
97
-
98
- class MetricImprover:
99
-
100
- def __init__(self, mode):
101
- assert mode in ("min", "max")
102
- self.mode = mode
103
- # min: lower -> better; max: higher -> better
104
- self.best_value = np.inf if mode == "min" else -np.inf
105
-
106
- def compare(self, x, best_x):
107
- return x < best_x if self.mode == "min" else x > best_x
108
-
109
- def __call__(self, x):
110
- if self.compare(x, self.best_value):
111
- self.best_value = x
112
- return True
113
- return False
114
-
115
- def state_dict(self):
116
- return self.__dict__
117
-
118
- def load_state_dict(self, state_dict):
119
- self.__dict__.update(state_dict)
120
-
121
-
122
- def fix_batchnorm(model: torch.nn.Module):
123
- def inner(module):
124
- class_name = module.__class__.__name__
125
- if class_name.find("BatchNorm") != -1:
126
- module.eval()
127
- model.apply(inner)
128
-
129
-
130
- def load_pretrained_model(model: torch.nn.Module,
131
- pretrained: Union[str, Dict],
132
- output_fn: Callable = sys.stdout.write):
133
- if not isinstance(pretrained, dict) and not os.path.exists(pretrained):
134
- output_fn(f"pretrained {pretrained} not exist!")
135
- return
136
-
137
- if hasattr(model, "load_pretrained"):
138
- model.load_pretrained(pretrained)
139
- return
140
-
141
- if isinstance(pretrained, dict):
142
- state_dict = pretrained
143
- else:
144
- state_dict = torch.load(pretrained, map_location="cpu")
145
-
146
- if "model" in state_dict:
147
- state_dict = state_dict["model"]
148
- model_dict = model.state_dict()
149
- pretrained_dict = {
150
- k: v for k, v in state_dict.items() if (k in model_dict) and (
151
- model_dict[k].shape == v.shape)
152
- }
153
- output_fn(f"Loading pretrained keys {pretrained_dict.keys()}")
154
- model_dict.update(pretrained_dict)
155
- model.load_state_dict(model_dict, strict=True)
156
-
157
-
158
- class AveragedModel(torch_average_model):
159
-
160
- def update_parameters(self, model):
161
- for p_swa, p_model in zip(self.parameters(), model.parameters()):
162
- device = p_swa.device
163
- p_model_ = p_model.detach().to(device)
164
- if self.n_averaged == 0:
165
- p_swa.detach().copy_(p_model_)
166
- else:
167
- p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
168
- self.n_averaged.to(device)))
169
-
170
- for b_swa, b_model in zip(list(self.buffers())[1:], model.buffers()):
171
- device = b_swa.device
172
- b_model_ = b_model.detach().to(device)
173
- if self.n_averaged == 0:
174
- b_swa.detach().copy_(b_model_)
175
- else:
176
- b_swa.detach().copy_(self.avg_fn(b_swa.detach(), b_model_,
177
- self.n_averaged.to(device)))
178
- self.n_averaged += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIxPha/Real-CUGAN/upcunet_v3.py DELETED
@@ -1,714 +0,0 @@
1
- import torch
2
- from torch import nn as nn
3
- from torch.nn import functional as F
4
- import os, sys
5
- import numpy as np
6
-
7
- root_path = os.path.abspath('.')
8
- sys.path.append(root_path)
9
-
10
-
11
- class SEBlock(nn.Module):
12
- def __init__(self, in_channels, reduction=8, bias=False):
13
- super(SEBlock, self).__init__()
14
- self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
15
- self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
16
-
17
- def forward(self, x):
18
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
19
- x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
20
- else:
21
- x0 = torch.mean(x, dim=(2, 3), keepdim=True)
22
- x0 = self.conv1(x0)
23
- x0 = F.relu(x0, inplace=True)
24
- x0 = self.conv2(x0)
25
- x0 = torch.sigmoid(x0)
26
- x = torch.mul(x, x0)
27
- return x
28
-
29
- def forward_mean(self, x, x0):
30
- x0 = self.conv1(x0)
31
- x0 = F.relu(x0, inplace=True)
32
- x0 = self.conv2(x0)
33
- x0 = torch.sigmoid(x0)
34
- x = torch.mul(x, x0)
35
- return x
36
-
37
-
38
- class UNetConv(nn.Module):
39
- def __init__(self, in_channels, mid_channels, out_channels, se):
40
- super(UNetConv, self).__init__()
41
- self.conv = nn.Sequential(
42
- nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
43
- nn.LeakyReLU(0.1, inplace=True),
44
- nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
45
- nn.LeakyReLU(0.1, inplace=True),
46
- )
47
- if se:
48
- self.seblock = SEBlock(out_channels, reduction=8, bias=True)
49
- else:
50
- self.seblock = None
51
-
52
- def forward(self, x):
53
- z = self.conv(x)
54
- if self.seblock is not None:
55
- z = self.seblock(z)
56
- return z
57
-
58
-
59
- class UNet1(nn.Module):
60
- def __init__(self, in_channels, out_channels, deconv):
61
- super(UNet1, self).__init__()
62
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
63
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
64
- self.conv2 = UNetConv(64, 128, 64, se=True)
65
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
66
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
67
-
68
- if deconv:
69
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
70
- else:
71
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
72
-
73
- for m in self.modules():
74
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
75
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
76
- elif isinstance(m, nn.Linear):
77
- nn.init.normal_(m.weight, 0, 0.01)
78
- if m.bias is not None:
79
- nn.init.constant_(m.bias, 0)
80
-
81
- def forward(self, x):
82
- x1 = self.conv1(x)
83
- x2 = self.conv1_down(x1)
84
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
85
- x2 = self.conv2(x2)
86
- x2 = self.conv2_up(x2)
87
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
88
-
89
- x1 = F.pad(x1, (-4, -4, -4, -4))
90
- x3 = self.conv3(x1 + x2)
91
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
92
- z = self.conv_bottom(x3)
93
- return z
94
-
95
- def forward_a(self, x):
96
- x1 = self.conv1(x)
97
- x2 = self.conv1_down(x1)
98
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
99
- x2 = self.conv2.conv(x2)
100
- return x1, x2
101
-
102
- def forward_b(self, x1, x2):
103
- x2 = self.conv2_up(x2)
104
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
105
-
106
- x1 = F.pad(x1, (-4, -4, -4, -4))
107
- x3 = self.conv3(x1 + x2)
108
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
109
- z = self.conv_bottom(x3)
110
- return z
111
-
112
-
113
- class UNet1x3(nn.Module):
114
- def __init__(self, in_channels, out_channels, deconv):
115
- super(UNet1x3, self).__init__()
116
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
117
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
118
- self.conv2 = UNetConv(64, 128, 64, se=True)
119
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
120
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
121
-
122
- if deconv:
123
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
124
- else:
125
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
126
-
127
- for m in self.modules():
128
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
129
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
130
- elif isinstance(m, nn.Linear):
131
- nn.init.normal_(m.weight, 0, 0.01)
132
- if m.bias is not None:
133
- nn.init.constant_(m.bias, 0)
134
-
135
- def forward(self, x):
136
- x1 = self.conv1(x)
137
- x2 = self.conv1_down(x1)
138
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
139
- x2 = self.conv2(x2)
140
- x2 = self.conv2_up(x2)
141
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
142
-
143
- x1 = F.pad(x1, (-4, -4, -4, -4))
144
- x3 = self.conv3(x1 + x2)
145
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
146
- z = self.conv_bottom(x3)
147
- return z
148
-
149
- def forward_a(self, x):
150
- x1 = self.conv1(x)
151
- x2 = self.conv1_down(x1)
152
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
153
- x2 = self.conv2.conv(x2)
154
- return x1, x2
155
-
156
- def forward_b(self, x1, x2):
157
- x2 = self.conv2_up(x2)
158
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
159
-
160
- x1 = F.pad(x1, (-4, -4, -4, -4))
161
- x3 = self.conv3(x1 + x2)
162
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
163
- z = self.conv_bottom(x3)
164
- return z
165
-
166
-
167
- class UNet2(nn.Module):
168
- def __init__(self, in_channels, out_channels, deconv):
169
- super(UNet2, self).__init__()
170
-
171
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
172
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
173
- self.conv2 = UNetConv(64, 64, 128, se=True)
174
- self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
175
- self.conv3 = UNetConv(128, 256, 128, se=True)
176
- self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
177
- self.conv4 = UNetConv(128, 64, 64, se=True)
178
- self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
179
- self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
180
-
181
- if deconv:
182
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
183
- else:
184
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
185
-
186
- for m in self.modules():
187
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
188
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
189
- elif isinstance(m, nn.Linear):
190
- nn.init.normal_(m.weight, 0, 0.01)
191
- if m.bias is not None:
192
- nn.init.constant_(m.bias, 0)
193
-
194
- def forward(self, x):
195
- x1 = self.conv1(x)
196
- x2 = self.conv1_down(x1)
197
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
198
- x2 = self.conv2(x2)
199
-
200
- x3 = self.conv2_down(x2)
201
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
202
- x3 = self.conv3(x3)
203
- x3 = self.conv3_up(x3)
204
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
205
-
206
- x2 = F.pad(x2, (-4, -4, -4, -4))
207
- x4 = self.conv4(x2 + x3)
208
- x4 = self.conv4_up(x4)
209
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
210
-
211
- x1 = F.pad(x1, (-16, -16, -16, -16))
212
- x5 = self.conv5(x1 + x4)
213
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
214
-
215
- z = self.conv_bottom(x5)
216
- return z
217
-
218
- def forward_a(self, x): # conv234结尾有se
219
- x1 = self.conv1(x)
220
- x2 = self.conv1_down(x1)
221
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
222
- x2 = self.conv2.conv(x2)
223
- return x1, x2
224
-
225
- def forward_b(self, x2): # conv234结尾有se
226
- x3 = self.conv2_down(x2)
227
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
228
- x3 = self.conv3.conv(x3)
229
- return x3
230
-
231
- def forward_c(self, x2, x3): # conv234结尾有se
232
- x3 = self.conv3_up(x3)
233
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
234
-
235
- x2 = F.pad(x2, (-4, -4, -4, -4))
236
- x4 = self.conv4.conv(x2 + x3)
237
- return x4
238
-
239
- def forward_d(self, x1, x4): # conv234结尾有se
240
- x4 = self.conv4_up(x4)
241
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
242
-
243
- x1 = F.pad(x1, (-16, -16, -16, -16))
244
- x5 = self.conv5(x1 + x4)
245
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
246
-
247
- z = self.conv_bottom(x5)
248
- return z
249
-
250
-
251
- class UpCunet2x(nn.Module): # 完美tile,全程无损
252
- def __init__(self, in_channels=3, out_channels=3):
253
- super(UpCunet2x, self).__init__()
254
- self.unet1 = UNet1(in_channels, out_channels, deconv=True)
255
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
256
-
257
- def forward(self, x, tile_mode): # 1.7G
258
- n, c, h0, w0 = x.shape
259
- if (tile_mode == 0): # 不tile
260
- ph = ((h0 - 1) // 2 + 1) * 2
261
- pw = ((w0 - 1) // 2 + 1) * 2
262
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
263
- x = self.unet1.forward(x)
264
- x0 = self.unet2.forward(x)
265
- x1 = F.pad(x, (-20, -20, -20, -20))
266
- x = torch.add(x0, x1)
267
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
268
- return x
269
- elif (tile_mode == 1): # 对长边减半
270
- if (w0 >= h0):
271
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
272
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
273
- else:
274
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
275
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
276
- crop_size = (crop_size_h, crop_size_w) # 6.6G
277
- elif (tile_mode == 2): # hw都减半
278
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
279
- elif (tile_mode == 3): # hw都三分之一
280
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
281
- elif (tile_mode == 4): # hw都四分���一
282
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
283
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
284
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
285
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
286
- n, c, h, w = x.shape
287
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
288
- if ("Half" in x.type()):
289
- se_mean0 = se_mean0.half()
290
- n_patch = 0
291
- tmp_dict = {}
292
- opt_res_dict = {}
293
- for i in range(0, h - 36, crop_size[0]):
294
- tmp_dict[i] = {}
295
- for j in range(0, w - 36, crop_size[1]):
296
- x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
297
- n, c1, h1, w1 = x_crop.shape
298
- tmp0, x_crop = self.unet1.forward_a(x_crop)
299
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
300
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
301
- else:
302
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
303
- se_mean0 += tmp_se_mean
304
- n_patch += 1
305
- tmp_dict[i][j] = (tmp0, x_crop)
306
- se_mean0 /= n_patch
307
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
308
- if ("Half" in x.type()):
309
- se_mean1 = se_mean1.half()
310
- for i in range(0, h - 36, crop_size[0]):
311
- for j in range(0, w - 36, crop_size[1]):
312
- tmp0, x_crop = tmp_dict[i][j]
313
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
314
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
315
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
316
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
317
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
318
- else:
319
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
320
- se_mean1 += tmp_se_mean
321
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
322
- se_mean1 /= n_patch
323
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
324
- if ("Half" in x.type()):
325
- se_mean0 = se_mean0.half()
326
- for i in range(0, h - 36, crop_size[0]):
327
- for j in range(0, w - 36, crop_size[1]):
328
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
329
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
330
- tmp_x3 = self.unet2.forward_b(tmp_x2)
331
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
332
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
333
- else:
334
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
335
- se_mean0 += tmp_se_mean
336
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
337
- se_mean0 /= n_patch
338
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
339
- if ("Half" in x.type()):
340
- se_mean1 = se_mean1.half()
341
- for i in range(0, h - 36, crop_size[0]):
342
- for j in range(0, w - 36, crop_size[1]):
343
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
344
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
345
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
346
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
347
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
348
- else:
349
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
350
- se_mean1 += tmp_se_mean
351
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
352
- se_mean1 /= n_patch
353
- for i in range(0, h - 36, crop_size[0]):
354
- opt_res_dict[i] = {}
355
- for j in range(0, w - 36, crop_size[1]):
356
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
357
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
358
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
359
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
360
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
361
- opt_res_dict[i][j] = x_crop
362
- del tmp_dict
363
- torch.cuda.empty_cache()
364
- res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
365
- if ("Half" in x.type()):
366
- res = res.half()
367
- for i in range(0, h - 36, crop_size[0]):
368
- for j in range(0, w - 36, crop_size[1]):
369
- res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
370
- del opt_res_dict
371
- torch.cuda.empty_cache()
372
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
373
- return res #
374
-
375
-
376
- class UpCunet3x(nn.Module): # 完美tile,全程无损
377
- def __init__(self, in_channels=3, out_channels=3):
378
- super(UpCunet3x, self).__init__()
379
- self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
380
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
381
-
382
- def forward(self, x, tile_mode): # 1.7G
383
- n, c, h0, w0 = x.shape
384
- if (tile_mode == 0): # 不tile
385
- ph = ((h0 - 1) // 4 + 1) * 4
386
- pw = ((w0 - 1) // 4 + 1) * 4
387
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
388
- x = self.unet1.forward(x)
389
- x0 = self.unet2.forward(x)
390
- x1 = F.pad(x, (-20, -20, -20, -20))
391
- x = torch.add(x0, x1)
392
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
393
- return x
394
- elif (tile_mode == 1): # 对长边减半
395
- if (w0 >= h0):
396
- crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
397
- crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
398
- else:
399
- crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
400
- crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
401
- crop_size = (crop_size_h, crop_size_w) # 6.6G
402
- elif (tile_mode == 2): # hw都减半
403
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
404
- elif (tile_mode == 3): # hw都三分之一
405
- crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
406
- elif (tile_mode == 4): # hw都四分之一
407
- crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
408
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
409
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
410
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
411
- n, c, h, w = x.shape
412
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
413
- if ("Half" in x.type()):
414
- se_mean0 = se_mean0.half()
415
- n_patch = 0
416
- tmp_dict = {}
417
- opt_res_dict = {}
418
- for i in range(0, h - 28, crop_size[0]):
419
- tmp_dict[i] = {}
420
- for j in range(0, w - 28, crop_size[1]):
421
- x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
422
- n, c1, h1, w1 = x_crop.shape
423
- tmp0, x_crop = self.unet1.forward_a(x_crop)
424
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
425
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
426
- else:
427
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
428
- se_mean0 += tmp_se_mean
429
- n_patch += 1
430
- tmp_dict[i][j] = (tmp0, x_crop)
431
- se_mean0 /= n_patch
432
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
433
- if ("Half" in x.type()):
434
- se_mean1 = se_mean1.half()
435
- for i in range(0, h - 28, crop_size[0]):
436
- for j in range(0, w - 28, crop_size[1]):
437
- tmp0, x_crop = tmp_dict[i][j]
438
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
439
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
440
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
441
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
442
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
443
- else:
444
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
445
- se_mean1 += tmp_se_mean
446
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
447
- se_mean1 /= n_patch
448
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
449
- if ("Half" in x.type()):
450
- se_mean0 = se_mean0.half()
451
- for i in range(0, h - 28, crop_size[0]):
452
- for j in range(0, w - 28, crop_size[1]):
453
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
454
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
455
- tmp_x3 = self.unet2.forward_b(tmp_x2)
456
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
457
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
458
- else:
459
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
460
- se_mean0 += tmp_se_mean
461
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
462
- se_mean0 /= n_patch
463
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
464
- if ("Half" in x.type()):
465
- se_mean1 = se_mean1.half()
466
- for i in range(0, h - 28, crop_size[0]):
467
- for j in range(0, w - 28, crop_size[1]):
468
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
469
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
470
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
471
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
472
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
473
- else:
474
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
475
- se_mean1 += tmp_se_mean
476
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
477
- se_mean1 /= n_patch
478
- for i in range(0, h - 28, crop_size[0]):
479
- opt_res_dict[i] = {}
480
- for j in range(0, w - 28, crop_size[1]):
481
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
482
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
483
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
484
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
485
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
486
- opt_res_dict[i][j] = x_crop #
487
- del tmp_dict
488
- torch.cuda.empty_cache()
489
- res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
490
- if ("Half" in x.type()):
491
- res = res.half()
492
- for i in range(0, h - 28, crop_size[0]):
493
- for j in range(0, w - 28, crop_size[1]):
494
- res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
495
- del opt_res_dict
496
- torch.cuda.empty_cache()
497
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
498
- return res
499
-
500
-
501
- class UpCunet4x(nn.Module): # 完美tile,全程无损
502
- def __init__(self, in_channels=3, out_channels=3):
503
- super(UpCunet4x, self).__init__()
504
- self.unet1 = UNet1(in_channels, 64, deconv=True)
505
- self.unet2 = UNet2(64, 64, deconv=False)
506
- self.ps = nn.PixelShuffle(2)
507
- self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
508
-
509
- def forward(self, x, tile_mode):
510
- n, c, h0, w0 = x.shape
511
- x00 = x
512
- if (tile_mode == 0): # 不tile
513
- ph = ((h0 - 1) // 2 + 1) * 2
514
- pw = ((w0 - 1) // 2 + 1) * 2
515
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
516
- x = self.unet1.forward(x)
517
- x0 = self.unet2.forward(x)
518
- x1 = F.pad(x, (-20, -20, -20, -20))
519
- x = torch.add(x0, x1)
520
- x = self.conv_final(x)
521
- x = F.pad(x, (-1, -1, -1, -1))
522
- x = self.ps(x)
523
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
524
- x += F.interpolate(x00, scale_factor=4, mode='nearest')
525
- return x
526
- elif (tile_mode == 1): # 对长边减半
527
- if (w0 >= h0):
528
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
529
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
530
- else:
531
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
532
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
533
- crop_size = (crop_size_h, crop_size_w) # 6.6G
534
- elif (tile_mode == 2): # hw都减半
535
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
536
- elif (tile_mode == 3): # hw都三分之一
537
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
538
- elif (tile_mode == 4): # hw都四分之一
539
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
540
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
541
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
542
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
543
- n, c, h, w = x.shape
544
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
545
- if ("Half" in x.type()):
546
- se_mean0 = se_mean0.half()
547
- n_patch = 0
548
- tmp_dict = {}
549
- opt_res_dict = {}
550
- for i in range(0, h - 38, crop_size[0]):
551
- tmp_dict[i] = {}
552
- for j in range(0, w - 38, crop_size[1]):
553
- x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
554
- n, c1, h1, w1 = x_crop.shape
555
- tmp0, x_crop = self.unet1.forward_a(x_crop)
556
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
557
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
558
- else:
559
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
560
- se_mean0 += tmp_se_mean
561
- n_patch += 1
562
- tmp_dict[i][j] = (tmp0, x_crop)
563
- se_mean0 /= n_patch
564
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
565
- if ("Half" in x.type()):
566
- se_mean1 = se_mean1.half()
567
- for i in range(0, h - 38, crop_size[0]):
568
- for j in range(0, w - 38, crop_size[1]):
569
- tmp0, x_crop = tmp_dict[i][j]
570
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
571
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
572
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
573
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
574
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
575
- else:
576
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
577
- se_mean1 += tmp_se_mean
578
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
579
- se_mean1 /= n_patch
580
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
581
- if ("Half" in x.type()):
582
- se_mean0 = se_mean0.half()
583
- for i in range(0, h - 38, crop_size[0]):
584
- for j in range(0, w - 38, crop_size[1]):
585
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
586
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
587
- tmp_x3 = self.unet2.forward_b(tmp_x2)
588
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
589
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
590
- else:
591
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
592
- se_mean0 += tmp_se_mean
593
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
594
- se_mean0 /= n_patch
595
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
596
- if ("Half" in x.type()):
597
- se_mean1 = se_mean1.half()
598
- for i in range(0, h - 38, crop_size[0]):
599
- for j in range(0, w - 38, crop_size[1]):
600
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
601
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
602
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
603
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
604
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
605
- else:
606
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
607
- se_mean1 += tmp_se_mean
608
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
609
- se_mean1 /= n_patch
610
- for i in range(0, h - 38, crop_size[0]):
611
- opt_res_dict[i] = {}
612
- for j in range(0, w - 38, crop_size[1]):
613
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
614
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
615
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
616
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
617
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
618
- x_crop = self.conv_final(x_crop)
619
- x_crop = F.pad(x_crop, (-1, -1, -1, -1))
620
- x_crop = self.ps(x_crop)
621
- opt_res_dict[i][j] = x_crop
622
- del tmp_dict
623
- torch.cuda.empty_cache()
624
- res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
625
- if ("Half" in x.type()):
626
- res = res.half()
627
- for i in range(0, h - 38, crop_size[0]):
628
- for j in range(0, w - 38, crop_size[1]):
629
- # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
630
- res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
631
- del opt_res_dict
632
- torch.cuda.empty_cache()
633
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
634
- res += F.interpolate(x00, scale_factor=4, mode='nearest')
635
- return res #
636
-
637
-
638
- class RealWaifuUpScaler(object):
639
- def __init__(self, scale, weight_path, half, device):
640
- weight = torch.load(weight_path, map_location="cpu")
641
- self.model = eval("UpCunet%sx" % scale)()
642
- if (half == True):
643
- self.model = self.model.half().to(device)
644
- else:
645
- self.model = self.model.to(device)
646
- self.model.load_state_dict(weight, strict=True)
647
- self.model.eval()
648
- self.half = half
649
- self.device = device
650
-
651
- def np2tensor(self, np_frame):
652
- if (self.half == False):
653
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
654
- else:
655
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
656
-
657
- def tensor2np(self, tensor):
658
- if (self.half == False):
659
- return (
660
- np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
661
- else:
662
- return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
663
- (1, 2, 0)))
664
-
665
- def __call__(self, frame, tile_mode):
666
- with torch.no_grad():
667
- tensor = self.np2tensor(frame)
668
- result = self.tensor2np(self.model(tensor, tile_mode))
669
- return result
670
-
671
-
672
- if __name__ == "__main__":
673
- ###########inference_img
674
- import time, cv2, sys
675
- from time import time as ttime
676
-
677
- for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
678
- ("weights_v3/up4x-latest-denoise3x.pth", 4)]:
679
- for tile_mode in [0, 1, 2, 3, 4]:
680
- upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
681
- input_dir = "%s/input_dir1" % root_path
682
- output_dir = "%s/opt-dir-all-test" % root_path
683
- os.makedirs(output_dir, exist_ok=True)
684
- for name in os.listdir(input_dir):
685
- print(name)
686
- tmp = name.split(".")
687
- inp_path = os.path.join(input_dir, name)
688
- suffix = tmp[-1]
689
- prefix = ".".join(tmp[:-1])
690
- tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
691
- print(inp_path, tmp_path)
692
- # 支持中文路径
693
- # os.link(inp_path, tmp_path)#win用硬链接
694
- os.symlink(inp_path, tmp_path) # linux用软链接
695
- frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
696
- t0 = ttime()
697
- result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
698
- t1 = ttime()
699
- print(prefix, "done", t1 - t0)
700
- tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
701
- cv2.imwrite(tmp_opt_path, result)
702
- n = 0
703
- while (1):
704
- if (n == 0):
705
- suffix = "_%sx_tile%s.png" % (scale, tile_mode)
706
- else:
707
- suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
708
- if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
709
- break
710
- else:
711
- n += 1
712
- final_opt_path = os.path.join(output_dir, prefix + suffix)
713
- os.rename(tmp_opt_path, final_opt_path)
714
- os.remove(tmp_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/cbhg.py DELETED
@@ -1,121 +0,0 @@
1
- """
2
- The CBHG model implementation
3
- """
4
- from typing import List, Optional
5
-
6
- from torch import nn
7
- import torch
8
-
9
- from poetry_diacritizer.modules.tacotron_modules import CBHG, Prenet
10
-
11
-
12
- class CBHGModel(nn.Module):
13
- """CBHG model implementation as described in the paper:
14
- https://ieeexplore.ieee.org/document/9274427
15
-
16
- Args:
17
- inp_vocab_size (int): the number of the input symbols
18
- targ_vocab_size (int): the number of the target symbols (diacritics)
19
- embedding_dim (int): the embedding size
20
- use_prenet (bool): whether to use prenet or not
21
- prenet_sizes (List[int]): the sizes of the prenet networks
22
- cbhg_gru_units (int): the number of units of the CBHG GRU, which is the last
23
- layer of the CBHG Model.
24
- cbhg_filters (int): number of filters used in the CBHG module
25
- cbhg_projections: projections used in the CBHG module
26
-
27
- Returns:
28
- diacritics Dict[str, Tensor]:
29
- """
30
-
31
- def __init__(
32
- self,
33
- inp_vocab_size: int,
34
- targ_vocab_size: int,
35
- embedding_dim: int = 512,
36
- use_prenet: bool = True,
37
- prenet_sizes: List[int] = [512, 256],
38
- cbhg_gru_units: int = 512,
39
- cbhg_filters: int = 16,
40
- cbhg_projections: List[int] = [128, 256],
41
- post_cbhg_layers_units: List[int] = [256, 256],
42
- post_cbhg_use_batch_norm: bool = True
43
- ):
44
- super().__init__()
45
- self.use_prenet = use_prenet
46
- self.embedding = nn.Embedding(inp_vocab_size, embedding_dim)
47
- if self.use_prenet:
48
- self.prenet = Prenet(embedding_dim, prenet_depth=prenet_sizes)
49
-
50
- self.cbhg = CBHG(
51
- prenet_sizes[-1] if self.use_prenet else embedding_dim,
52
- cbhg_gru_units,
53
- K=cbhg_filters,
54
- projections=cbhg_projections,
55
- )
56
-
57
- layers = []
58
- post_cbhg_layers_units = [cbhg_gru_units] + post_cbhg_layers_units
59
-
60
- for i in range(1, len(post_cbhg_layers_units)):
61
- layers.append(
62
- nn.LSTM(
63
- post_cbhg_layers_units[i - 1] * 2,
64
- post_cbhg_layers_units[i],
65
- bidirectional=True,
66
- batch_first=True,
67
- )
68
- )
69
- if post_cbhg_use_batch_norm:
70
- layers.append(nn.BatchNorm1d(post_cbhg_layers_units[i] * 2))
71
-
72
- self.post_cbhg_layers = nn.ModuleList(layers)
73
- self.projections = nn.Linear(post_cbhg_layers_units[-1] * 2, targ_vocab_size)
74
- self.post_cbhg_layers_units = post_cbhg_layers_units
75
- self.post_cbhg_use_batch_norm = post_cbhg_use_batch_norm
76
-
77
-
78
- def forward(
79
- self,
80
- src: torch.Tensor,
81
- lengths: Optional[torch.Tensor] = None,
82
- target: Optional[torch.Tensor] = None, # not required in this model
83
- ):
84
- """Compute forward propagation"""
85
-
86
- # src = [batch_size, src len]
87
- # lengths = [batch_size]
88
- # target = [batch_size, trg len]
89
-
90
- embedding_out = self.embedding(src)
91
- # embedding_out; [batch_size, src_len, embedding_dim]
92
-
93
- cbhg_input = embedding_out
94
- if self.use_prenet:
95
- cbhg_input = self.prenet(embedding_out)
96
-
97
- # cbhg_input = [batch_size, src_len, prenet_sizes[-1]]
98
-
99
- outputs = self.cbhg(cbhg_input, lengths)
100
-
101
- hn = torch.zeros((2, 2, 2))
102
- cn = torch.zeros((2, 2, 2))
103
-
104
- for i, layer in enumerate(self.post_cbhg_layers):
105
- if isinstance(layer, nn.BatchNorm1d):
106
- outputs = layer(outputs.permute(0, 2, 1))
107
- outputs = outputs.permute(0, 2, 1)
108
- continue
109
- if i > 0:
110
- outputs, (hn, cn) = layer(outputs, (hn, cn))
111
- else:
112
- outputs, (hn, cn) = layer(outputs)
113
-
114
-
115
- predictions = self.projections(outputs)
116
-
117
- # predictions = [batch_size, src len, targ_vocab_size]
118
-
119
- output = {"diacritics": predictions}
120
-
121
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/live2d.d.ts DELETED
@@ -1,11 +0,0 @@
1
- import {
2
- Live2dCoreScriptFileCallback,
3
- Live2dFileCallback,
4
- Live2dGameObject
5
- } from './gameobjects/live2d/index';
6
-
7
- export {
8
- Live2dCoreScriptFileCallback,
9
- Live2dFileCallback,
10
- Live2dGameObject
11
- };
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/chess/RandomSymobl.js DELETED
@@ -1,38 +0,0 @@
1
- const GetRandom = Phaser.Utils.Array.GetRandom;
2
-
3
- var RandomSymbol = function (board, tileX, tileY, callback, scope, excluded) {
4
- var symbol;
5
- if (Array.isArray(callback)) {
6
- // pick random symbol from symbol array
7
- var symbols = callback;
8
- // excluded: undefined or a symbol array
9
- if (excluded !== undefined) {
10
- for (var i = 0, cnt = symbols.length; i < cnt; i++) {
11
- symbol = symbols[i];
12
- if (excluded.indexOf(symbol) !== -1) {
13
- continue;
14
- }
15
- tmpSymbolArray.push(symbol);
16
- }
17
- symbol = GetRandom(tmpSymbolArray);
18
- tmpSymbolArray.length = 0;
19
- } else {
20
- symbol = GetRandom(symbols);
21
- }
22
-
23
- } else if (typeof (obj) === 'function') {
24
- // symbols from return of callback
25
- if (scope) {
26
- symbol = callback.call(scope, board, tileX, tileY, excluded);
27
- } else {
28
- symbol = callback(board, tileX, tileY, excluded);
29
- }
30
- } else {
31
- // symbol value
32
- symbol = callback;
33
- }
34
- return symbol;
35
- }
36
-
37
- var tmpSymbolArray = [];
38
- export default RandomSymbol;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.js DELETED
@@ -1,16 +0,0 @@
1
- import Press from './Press.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import IsGameObject from '../../../plugins/utils/system/IsGameObject.js';
4
- import SetValue from '../../../plugins/utils/object/SetValue.js';
5
-
6
- ObjectFactory.register('press', function (gameObject, config) {
7
- if (!IsGameObject(gameObject)) {
8
- config = gameObject;
9
- gameObject = this.scene;
10
- }
11
- return new Press(gameObject, config);
12
- });
13
-
14
- SetValue(window, 'RexPlugins.UI.Press', Press);
15
-
16
- export default Press;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AjulorC/question_answering_bot_deployed_with_Gradio/app.py DELETED
@@ -1,39 +0,0 @@
1
- import tensorflow as tf
2
-
3
- #!pip install transformers
4
-
5
- from transformers import pipeline
6
-
7
- # importing necessary libraries
8
- from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
9
-
10
-
11
- tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
12
- model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False)
13
-
14
- nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)
15
-
16
- #!pip install gradio
17
- import gradio as gr
18
-
19
- # creating the function
20
- def func(context, question):
21
- result = nlp(question = question, context=context)
22
- return result['answer']
23
-
24
- example_1 = "(1) My name is Ajulor Christian, I am a data scientist and machine learning engineer"
25
- qst_1 = "what is christian's profession?"
26
-
27
- example_2 = "(2) Natural Language Processing (NLP) allows machines to break down and interpret human language. It's at the core of tools we use every day – from translation software, chatbots, spam filters, and search engines, to grammar correction software, voice assistants, and social media monitoring tools."
28
- qst_2 = "What is NLP used for?"
29
-
30
- # creating the interface
31
- app = gr.Interface(fn=func, inputs = ['textbox', 'text'], outputs = 'textbox',
32
- title = 'Question Answering bot', theme = 'dark-grass',
33
- description = 'Input context and question, then get answers!',
34
- examples = [[example_1, qst_1],
35
- [example_2, qst_2]]
36
- )
37
-
38
- # launching the app
39
- app.launch(inline=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/vits/modules.py DELETED
@@ -1,390 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- import commons
13
- from commons import init_weights, get_padding
14
- from transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
- super().__init__()
38
- self.in_channels = in_channels
39
- self.hidden_channels = hidden_channels
40
- self.out_channels = out_channels
41
- self.kernel_size = kernel_size
42
- self.n_layers = n_layers
43
- self.p_dropout = p_dropout
44
- assert n_layers > 1, "Number of layers should be larger than 0."
45
-
46
- self.conv_layers = nn.ModuleList()
47
- self.norm_layers = nn.ModuleList()
48
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
- self.norm_layers.append(LayerNorm(hidden_channels))
50
- self.relu_drop = nn.Sequential(
51
- nn.ReLU(),
52
- nn.Dropout(p_dropout))
53
- for _ in range(n_layers-1):
54
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
- self.norm_layers.append(LayerNorm(hidden_channels))
56
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
- self.proj.weight.data.zero_()
58
- self.proj.bias.data.zero_()
59
-
60
- def forward(self, x, x_mask):
61
- x_org = x
62
- for i in range(self.n_layers):
63
- x = self.conv_layers[i](x * x_mask)
64
- x = self.norm_layers[i](x)
65
- x = self.relu_drop(x)
66
- x = x_org + self.proj(x)
67
- return x * x_mask
68
-
69
-
70
- class DDSConv(nn.Module):
71
- """
72
- Dialted and Depth-Separable Convolution
73
- """
74
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
- super().__init__()
76
- self.channels = channels
77
- self.kernel_size = kernel_size
78
- self.n_layers = n_layers
79
- self.p_dropout = p_dropout
80
-
81
- self.drop = nn.Dropout(p_dropout)
82
- self.convs_sep = nn.ModuleList()
83
- self.convs_1x1 = nn.ModuleList()
84
- self.norms_1 = nn.ModuleList()
85
- self.norms_2 = nn.ModuleList()
86
- for i in range(n_layers):
87
- dilation = kernel_size ** i
88
- padding = (kernel_size * dilation - dilation) // 2
89
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
- groups=channels, dilation=dilation, padding=padding
91
- ))
92
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
- self.norms_1.append(LayerNorm(channels))
94
- self.norms_2.append(LayerNorm(channels))
95
-
96
- def forward(self, x, x_mask, g=None):
97
- if g is not None:
98
- x = x + g
99
- for i in range(self.n_layers):
100
- y = self.convs_sep[i](x * x_mask)
101
- y = self.norms_1[i](y)
102
- y = F.gelu(y)
103
- y = self.convs_1x1[i](y)
104
- y = self.norms_2[i](y)
105
- y = F.gelu(y)
106
- y = self.drop(y)
107
- x = x + y
108
- return x * x_mask
109
-
110
-
111
- class WN(torch.nn.Module):
112
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
- super(WN, self).__init__()
114
- assert(kernel_size % 2 == 1)
115
- self.hidden_channels =hidden_channels
116
- self.kernel_size = kernel_size,
117
- self.dilation_rate = dilation_rate
118
- self.n_layers = n_layers
119
- self.gin_channels = gin_channels
120
- self.p_dropout = p_dropout
121
-
122
- self.in_layers = torch.nn.ModuleList()
123
- self.res_skip_layers = torch.nn.ModuleList()
124
- self.drop = nn.Dropout(p_dropout)
125
-
126
- if gin_channels != 0:
127
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
-
130
- for i in range(n_layers):
131
- dilation = dilation_rate ** i
132
- padding = int((kernel_size * dilation - dilation) / 2)
133
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
- dilation=dilation, padding=padding)
135
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
- self.in_layers.append(in_layer)
137
-
138
- # last one is not necessary
139
- if i < n_layers - 1:
140
- res_skip_channels = 2 * hidden_channels
141
- else:
142
- res_skip_channels = hidden_channels
143
-
144
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
- self.res_skip_layers.append(res_skip_layer)
147
-
148
- def forward(self, x, x_mask, g=None, **kwargs):
149
- output = torch.zeros_like(x)
150
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
-
152
- if g is not None:
153
- g = self.cond_layer(g)
154
-
155
- for i in range(self.n_layers):
156
- x_in = self.in_layers[i](x)
157
- if g is not None:
158
- cond_offset = i * 2 * self.hidden_channels
159
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
- else:
161
- g_l = torch.zeros_like(x_in)
162
-
163
- acts = commons.fused_add_tanh_sigmoid_multiply(
164
- x_in,
165
- g_l,
166
- n_channels_tensor)
167
- acts = self.drop(acts)
168
-
169
- res_skip_acts = self.res_skip_layers[i](acts)
170
- if i < self.n_layers - 1:
171
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
- x = (x + res_acts) * x_mask
173
- output = output + res_skip_acts[:,self.hidden_channels:,:]
174
- else:
175
- output = output + res_skip_acts
176
- return output * x_mask
177
-
178
- def remove_weight_norm(self):
179
- if self.gin_channels != 0:
180
- torch.nn.utils.remove_weight_norm(self.cond_layer)
181
- for l in self.in_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
- for l in self.res_skip_layers:
184
- torch.nn.utils.remove_weight_norm(l)
185
-
186
-
187
- class ResBlock1(torch.nn.Module):
188
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
- super(ResBlock1, self).__init__()
190
- self.convs1 = nn.ModuleList([
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
- padding=get_padding(kernel_size, dilation[0]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
- padding=get_padding(kernel_size, dilation[1]))),
195
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
- padding=get_padding(kernel_size, dilation[2])))
197
- ])
198
- self.convs1.apply(init_weights)
199
-
200
- self.convs2 = nn.ModuleList([
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1))),
205
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
- padding=get_padding(kernel_size, 1)))
207
- ])
208
- self.convs2.apply(init_weights)
209
-
210
- def forward(self, x, x_mask=None):
211
- for c1, c2 in zip(self.convs1, self.convs2):
212
- xt = F.leaky_relu(x, LRELU_SLOPE)
213
- if x_mask is not None:
214
- xt = xt * x_mask
215
- xt = c1(xt)
216
- xt = F.leaky_relu(xt, LRELU_SLOPE)
217
- if x_mask is not None:
218
- xt = xt * x_mask
219
- xt = c2(xt)
220
- x = xt + x
221
- if x_mask is not None:
222
- x = x * x_mask
223
- return x
224
-
225
- def remove_weight_norm(self):
226
- for l in self.convs1:
227
- remove_weight_norm(l)
228
- for l in self.convs2:
229
- remove_weight_norm(l)
230
-
231
-
232
- class ResBlock2(torch.nn.Module):
233
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
- super(ResBlock2, self).__init__()
235
- self.convs = nn.ModuleList([
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
- padding=get_padding(kernel_size, dilation[0]))),
238
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
- padding=get_padding(kernel_size, dilation[1])))
240
- ])
241
- self.convs.apply(init_weights)
242
-
243
- def forward(self, x, x_mask=None):
244
- for c in self.convs:
245
- xt = F.leaky_relu(x, LRELU_SLOPE)
246
- if x_mask is not None:
247
- xt = xt * x_mask
248
- xt = c(xt)
249
- x = xt + x
250
- if x_mask is not None:
251
- x = x * x_mask
252
- return x
253
-
254
- def remove_weight_norm(self):
255
- for l in self.convs:
256
- remove_weight_norm(l)
257
-
258
-
259
- class Log(nn.Module):
260
- def forward(self, x, x_mask, reverse=False, **kwargs):
261
- if not reverse:
262
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
- logdet = torch.sum(-y, [1, 2])
264
- return y, logdet
265
- else:
266
- x = torch.exp(x) * x_mask
267
- return x
268
-
269
-
270
- class Flip(nn.Module):
271
- def forward(self, x, *args, reverse=False, **kwargs):
272
- x = torch.flip(x, [1])
273
- if not reverse:
274
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
- return x, logdet
276
- else:
277
- return x
278
-
279
-
280
- class ElementwiseAffine(nn.Module):
281
- def __init__(self, channels):
282
- super().__init__()
283
- self.channels = channels
284
- self.m = nn.Parameter(torch.zeros(channels,1))
285
- self.logs = nn.Parameter(torch.zeros(channels,1))
286
-
287
- def forward(self, x, x_mask, reverse=False, **kwargs):
288
- if not reverse:
289
- y = self.m + torch.exp(self.logs) * x
290
- y = y * x_mask
291
- logdet = torch.sum(self.logs * x_mask, [1,2])
292
- return y, logdet
293
- else:
294
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
- return x
296
-
297
-
298
- class ResidualCouplingLayer(nn.Module):
299
- def __init__(self,
300
- channels,
301
- hidden_channels,
302
- kernel_size,
303
- dilation_rate,
304
- n_layers,
305
- p_dropout=0,
306
- gin_channels=0,
307
- mean_only=False):
308
- assert channels % 2 == 0, "channels should be divisible by 2"
309
- super().__init__()
310
- self.channels = channels
311
- self.hidden_channels = hidden_channels
312
- self.kernel_size = kernel_size
313
- self.dilation_rate = dilation_rate
314
- self.n_layers = n_layers
315
- self.half_channels = channels // 2
316
- self.mean_only = mean_only
317
-
318
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
- self.post.weight.data.zero_()
322
- self.post.bias.data.zero_()
323
-
324
- def forward(self, x, x_mask, g=None, reverse=False):
325
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
- h = self.pre(x0) * x_mask
327
- h = self.enc(h, x_mask, g=g)
328
- stats = self.post(h) * x_mask
329
- if not self.mean_only:
330
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
- else:
332
- m = stats
333
- logs = torch.zeros_like(m)
334
-
335
- if not reverse:
336
- x1 = m + x1 * torch.exp(logs) * x_mask
337
- x = torch.cat([x0, x1], 1)
338
- logdet = torch.sum(logs, [1,2])
339
- return x, logdet
340
- else:
341
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
- x = torch.cat([x0, x1], 1)
343
- return x
344
-
345
-
346
- class ConvFlow(nn.Module):
347
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
- super().__init__()
349
- self.in_channels = in_channels
350
- self.filter_channels = filter_channels
351
- self.kernel_size = kernel_size
352
- self.n_layers = n_layers
353
- self.num_bins = num_bins
354
- self.tail_bound = tail_bound
355
- self.half_channels = in_channels // 2
356
-
357
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
- self.proj.weight.data.zero_()
361
- self.proj.bias.data.zero_()
362
-
363
- def forward(self, x, x_mask, g=None, reverse=False):
364
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
- h = self.pre(x0)
366
- h = self.convs(h, x_mask, g=g)
367
- h = self.proj(h) * x_mask
368
-
369
- b, c, t = x0.shape
370
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
-
372
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
-
376
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
- unnormalized_widths,
378
- unnormalized_heights,
379
- unnormalized_derivatives,
380
- inverse=reverse,
381
- tails='linear',
382
- tail_bound=self.tail_bound
383
- )
384
-
385
- x = torch.cat([x0, x1], 1) * x_mask
386
- logdet = torch.sum(logabsdet * x_mask, [1,2])
387
- if not reverse:
388
- return x, logdet
389
- else:
390
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/mask_example.py DELETED
@@ -1,14 +0,0 @@
1
- import matplotlib.pyplot as plt
2
- from skimage import io
3
- from skimage.transform import resize
4
-
5
- from saicinpainting.evaluation.masks.mask import SegmentationMask
6
-
7
- im = io.imread('imgs/ex4.jpg')
8
- im = resize(im, (512, 1024), anti_aliasing=True)
9
- mask_seg = SegmentationMask(num_variants_per_mask=10)
10
- mask_examples = mask_seg.get_masks(im)
11
- for i, example in enumerate(mask_examples):
12
- plt.imshow(example)
13
- plt.show()
14
- plt.imsave(f'tmp/img_masks/{i}.png', example)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text_inversion.md DELETED
@@ -1,277 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
-
14
-
15
- # Textual Inversion
16
-
17
- [Textual Inversion](https://arxiv.org/abs/2208.01618) is a technique for capturing novel concepts from a small number of example images. While the technique was originally demonstrated with a [latent diffusion model](https://github.com/CompVis/latent-diffusion), it has since been applied to other model variants like [Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/conceptual/stable_diffusion). The learned concepts can be used to better control the images generated from text-to-image pipelines. It learns new "words" in the text encoder's embedding space, which are used within text prompts for personalized image generation.
18
-
19
- ![Textual Inversion example](https://textual-inversion.github.io/static/images/editing/colorful_teapot.JPG)
20
- <small>By using just 3-5 images you can teach new concepts to a model such as Stable Diffusion for personalized image generation <a href="https://github.com/rinongal/textual_inversion">(image source)</a>.</small>
21
-
22
- This guide will show you how to train a [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) model with Textual Inversion. All the training scripts for Textual Inversion used in this guide can be found [here](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) if you're interested in taking a closer look at how things work under the hood.
23
-
24
- <Tip>
25
-
26
- There is a community-created collection of trained Textual Inversion models in the [Stable Diffusion Textual Inversion Concepts Library](https://huggingface.co/sd-concepts-library) which are readily available for inference. Over time, this'll hopefully grow into a useful resource as more concepts are added!
27
-
28
- </Tip>
29
-
30
- Before you begin, make sure you install the library's training dependencies:
31
-
32
- ```bash
33
- pip install diffusers accelerate transformers
34
- ```
35
-
36
- After all the dependencies have been set up, initialize a [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
37
-
38
- ```bash
39
- accelerate config
40
- ```
41
-
42
- To setup a default 🤗 Accelerate environment without choosing any configurations:
43
-
44
- ```bash
45
- accelerate config default
46
- ```
47
-
48
- Or if your environment doesn't support an interactive shell like a notebook, you can use:
49
-
50
- ```bash
51
- from accelerate.utils import write_basic_config
52
-
53
- write_basic_config()
54
- ```
55
-
56
- Finally, you try and [install xFormers](https://huggingface.co/docs/diffusers/main/en/training/optimization/xformers) to reduce your memory footprint with xFormers memory-efficient attention. Once you have xFormers installed, add the `--enable_xformers_memory_efficient_attention` argument to the training script. xFormers is not supported for Flax.
57
-
58
- ## Upload model to Hub
59
-
60
- If you want to store your model on the Hub, add the following argument to the training script:
61
-
62
- ```bash
63
- --push_to_hub
64
- ```
65
-
66
- ## Save and load checkpoints
67
-
68
- It is often a good idea to regularly save checkpoints of your model during training. This way, you can resume training from a saved checkpoint if your training is interrupted for any reason. To save a checkpoint, pass the following argument to the training script to save the full training state in a subfolder in `output_dir` every 500 steps:
69
-
70
- ```bash
71
- --checkpointing_steps=500
72
- ```
73
-
74
- To resume training from a saved checkpoint, pass the following argument to the training script and the specific checkpoint you'd like to resume from:
75
-
76
- ```bash
77
- --resume_from_checkpoint="checkpoint-1500"
78
- ```
79
-
80
- ## Finetuning
81
-
82
- For your training dataset, download these [images of a cat toy](https://huggingface.co/datasets/diffusers/cat_toy_example) and store them in a directory. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
83
-
84
- ```py
85
- from huggingface_hub import snapshot_download
86
-
87
- local_dir = "./cat"
88
- snapshot_download(
89
- "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes"
90
- )
91
- ```
92
-
93
- Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument, and the `DATA_DIR` environment variable to the path of the directory containing the images.
94
-
95
- Now you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py). The script creates and saves the following files to your repository: `learned_embeds.bin`, `token_identifier.txt`, and `type_of_concept.txt`.
96
-
97
- <Tip>
98
-
99
- 💡 A full training run takes ~1 hour on one V100 GPU. While you're waiting for the training to complete, feel free to check out [how Textual Inversion works](#how-it-works) in the section below if you're curious!
100
-
101
- </Tip>
102
-
103
- <frameworkcontent>
104
- <pt>
105
- ```bash
106
- export MODEL_NAME="runwayml/stable-diffusion-v1-5"
107
- export DATA_DIR="./cat"
108
-
109
- accelerate launch textual_inversion.py \
110
- --pretrained_model_name_or_path=$MODEL_NAME \
111
- --train_data_dir=$DATA_DIR \
112
- --learnable_property="object" \
113
- --placeholder_token="<cat-toy>" --initializer_token="toy" \
114
- --resolution=512 \
115
- --train_batch_size=1 \
116
- --gradient_accumulation_steps=4 \
117
- --max_train_steps=3000 \
118
- --learning_rate=5.0e-04 --scale_lr \
119
- --lr_scheduler="constant" \
120
- --lr_warmup_steps=0 \
121
- --output_dir="textual_inversion_cat" \
122
- --push_to_hub
123
- ```
124
-
125
- <Tip>
126
-
127
- 💡 If you want to increase the trainable capacity, you can associate your placeholder token, *e.g.* `<cat-toy>` to
128
- multiple embedding vectors. This can help the model to better capture the style of more (complex) images.
129
- To enable training multiple embedding vectors, simply pass:
130
-
131
- ```bash
132
- --num_vectors=5
133
- ```
134
-
135
- </Tip>
136
- </pt>
137
- <jax>
138
- If you have access to TPUs, try out the [Flax training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py) to train even faster (this'll also work for GPUs). With the same configuration settings, the Flax training script should be at least 70% faster than the PyTorch training script! ⚡️
139
-
140
- Before you begin, make sure you install the Flax specific dependencies:
141
-
142
- ```bash
143
- pip install -U -r requirements_flax.txt
144
- ```
145
-
146
- Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
147
-
148
- Then you can launch the [training script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py):
149
-
150
- ```bash
151
- export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
152
- export DATA_DIR="./cat"
153
-
154
- python textual_inversion_flax.py \
155
- --pretrained_model_name_or_path=$MODEL_NAME \
156
- --train_data_dir=$DATA_DIR \
157
- --learnable_property="object" \
158
- --placeholder_token="<cat-toy>" --initializer_token="toy" \
159
- --resolution=512 \
160
- --train_batch_size=1 \
161
- --max_train_steps=3000 \
162
- --learning_rate=5.0e-04 --scale_lr \
163
- --output_dir="textual_inversion_cat" \
164
- --push_to_hub
165
- ```
166
- </jax>
167
- </frameworkcontent>
168
-
169
- ### Intermediate logging
170
-
171
- If you're interested in following along with your model training progress, you can save the generated images from the training process. Add the following arguments to the training script to enable intermediate logging:
172
-
173
- - `validation_prompt`, the prompt used to generate samples (this is set to `None` by default and intermediate logging is disabled)
174
- - `num_validation_images`, the number of sample images to generate
175
- - `validation_steps`, the number of steps before generating `num_validation_images` from the `validation_prompt`
176
-
177
- ```bash
178
- --validation_prompt="A <cat-toy> backpack"
179
- --num_validation_images=4
180
- --validation_steps=100
181
- ```
182
-
183
- ## Inference
184
-
185
- Once you have trained a model, you can use it for inference with the [`StableDiffusionPipeline`].
186
-
187
- The textual inversion script will by default only save the textual inversion embedding vector(s) that have
188
- been added to the text encoder embedding matrix and consequently been trained.
189
-
190
- <frameworkcontent>
191
- <pt>
192
- <Tip>
193
-
194
- 💡 The community has created a large library of different textual inversion embedding vectors, called [sd-concepts-library](https://huggingface.co/sd-concepts-library).
195
- Instead of training textual inversion embeddings from scratch you can also see whether a fitting textual inversion embedding has already been added to the libary.
196
-
197
- </Tip>
198
-
199
- To load the textual inversion embeddings you first need to load the base model that was used when training
200
- your textual inversion embedding vectors. Here we assume that [`runwayml/stable-diffusion-v1-5`](runwayml/stable-diffusion-v1-5)
201
- was used as a base model so we load it first:
202
- ```python
203
- from diffusers import StableDiffusionPipeline
204
- import torch
205
-
206
- model_id = "runwayml/stable-diffusion-v1-5"
207
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
208
- ```
209
-
210
- Next, we need to load the textual inversion embedding vector which can be done via the [`TextualInversionLoaderMixin.load_textual_inversion`]
211
- function. Here we'll load the embeddings of the "<cat-toy>" example from before.
212
- ```python
213
- pipe.load_textual_inversion("sd-concepts-library/cat-toy")
214
- ```
215
-
216
- Now we can run the pipeline making sure that the placeholder token `<cat-toy>` is used in our prompt.
217
-
218
- ```python
219
- prompt = "A <cat-toy> backpack"
220
-
221
- image = pipe(prompt, num_inference_steps=50).images[0]
222
- image.save("cat-backpack.png")
223
- ```
224
-
225
- The function [`TextualInversionLoaderMixin.load_textual_inversion`] can not only
226
- load textual embedding vectors saved in Diffusers' format, but also embedding vectors
227
- saved in [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) format.
228
- To do so, you can first download an embedding vector from [civitAI](https://civitai.com/models/3036?modelVersionId=8387)
229
- and then load it locally:
230
- ```python
231
- pipe.load_textual_inversion("./charturnerv2.pt")
232
- ```
233
- </pt>
234
- <jax>
235
- Currently there is no `load_textual_inversion` function for Flax so one has to make sure the textual inversion
236
- embedding vector is saved as part of the model after training.
237
-
238
- The model can then be run just like any other Flax model:
239
-
240
- ```python
241
- import jax
242
- import numpy as np
243
- from flax.jax_utils import replicate
244
- from flax.training.common_utils import shard
245
- from diffusers import FlaxStableDiffusionPipeline
246
-
247
- model_path = "path-to-your-trained-model"
248
- pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
249
-
250
- prompt = "A <cat-toy> backpack"
251
- prng_seed = jax.random.PRNGKey(0)
252
- num_inference_steps = 50
253
-
254
- num_samples = jax.device_count()
255
- prompt = num_samples * [prompt]
256
- prompt_ids = pipeline.prepare_inputs(prompt)
257
-
258
- # shard inputs and rng
259
- params = replicate(params)
260
- prng_seed = jax.random.split(prng_seed, jax.device_count())
261
- prompt_ids = shard(prompt_ids)
262
-
263
- images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
264
- images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
265
- image.save("cat-backpack.png")
266
- ```
267
- </jax>
268
- </frameworkcontent>
269
-
270
- ## How it works
271
-
272
- ![Diagram from the paper showing overview](https://textual-inversion.github.io/static/images/training/training.JPG)
273
- <small>Architecture overview from the Textual Inversion <a href="https://textual-inversion.github.io/">blog post.</a></small>
274
-
275
- Usually, text prompts are tokenized into an embedding before being passed to a model, which is often a transformer. Textual Inversion does something similar, but it learns a new token embedding, `v*`, from a special token `S*` in the diagram above. The model output is used to condition the diffusion model, which helps the diffusion model understand the prompt and new concepts from just a few example images.
276
-
277
- To do this, Textual Inversion uses a generator model and noisy versions of the training images. The generator tries to predict less noisy versions of the images, and the token embedding `v*` is optimized based on how well the generator does. If the token embedding successfully captures the new concept, it gives more useful information to the diffusion model and helps create clearer images with less noise. This optimization process typically occurs after several thousand steps of exposure to a variety of prompt and image variants.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet.py DELETED
@@ -1,1002 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import tempfile
18
- import traceback
19
- import unittest
20
-
21
- import numpy as np
22
- import torch
23
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
24
-
25
- from diffusers import (
26
- AutoencoderKL,
27
- ControlNetModel,
28
- DDIMScheduler,
29
- EulerDiscreteScheduler,
30
- StableDiffusionControlNetPipeline,
31
- UNet2DConditionModel,
32
- )
33
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
34
- from diffusers.utils import load_image, load_numpy, randn_tensor, slow, torch_device
35
- from diffusers.utils.import_utils import is_xformers_available
36
- from diffusers.utils.testing_utils import (
37
- enable_full_determinism,
38
- require_torch_2,
39
- require_torch_gpu,
40
- run_test_in_subprocess,
41
- )
42
-
43
- from ..pipeline_params import (
44
- IMAGE_TO_IMAGE_IMAGE_PARAMS,
45
- TEXT_TO_IMAGE_BATCH_PARAMS,
46
- TEXT_TO_IMAGE_IMAGE_PARAMS,
47
- TEXT_TO_IMAGE_PARAMS,
48
- )
49
- from ..test_pipelines_common import (
50
- PipelineKarrasSchedulerTesterMixin,
51
- PipelineLatentTesterMixin,
52
- PipelineTesterMixin,
53
- )
54
-
55
-
56
- enable_full_determinism()
57
-
58
-
59
- # Will be run via run_test_in_subprocess
60
- def _test_stable_diffusion_compile(in_queue, out_queue, timeout):
61
- error = None
62
- try:
63
- _ = in_queue.get(timeout=timeout)
64
-
65
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
66
-
67
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
68
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
69
- )
70
- pipe.to("cuda")
71
- pipe.set_progress_bar_config(disable=None)
72
-
73
- pipe.unet.to(memory_format=torch.channels_last)
74
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
75
-
76
- pipe.controlnet.to(memory_format=torch.channels_last)
77
- pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True)
78
-
79
- generator = torch.Generator(device="cpu").manual_seed(0)
80
- prompt = "bird"
81
- image = load_image(
82
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
83
- )
84
-
85
- output = pipe(prompt, image, generator=generator, output_type="np")
86
- image = output.images[0]
87
-
88
- assert image.shape == (768, 512, 3)
89
-
90
- expected_image = load_numpy(
91
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy"
92
- )
93
-
94
- assert np.abs(expected_image - image).max() < 1.0
95
-
96
- except Exception:
97
- error = f"{traceback.format_exc()}"
98
-
99
- results = {"error": error}
100
- out_queue.put(results, timeout=timeout)
101
- out_queue.join()
102
-
103
-
104
- class ControlNetPipelineFastTests(
105
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
106
- ):
107
- pipeline_class = StableDiffusionControlNetPipeline
108
- params = TEXT_TO_IMAGE_PARAMS
109
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
110
- image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
111
- image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
112
-
113
- def get_dummy_components(self):
114
- torch.manual_seed(0)
115
- unet = UNet2DConditionModel(
116
- block_out_channels=(32, 64),
117
- layers_per_block=2,
118
- sample_size=32,
119
- in_channels=4,
120
- out_channels=4,
121
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
122
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
123
- cross_attention_dim=32,
124
- )
125
- torch.manual_seed(0)
126
- controlnet = ControlNetModel(
127
- block_out_channels=(32, 64),
128
- layers_per_block=2,
129
- in_channels=4,
130
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
131
- cross_attention_dim=32,
132
- conditioning_embedding_out_channels=(16, 32),
133
- )
134
- torch.manual_seed(0)
135
- scheduler = DDIMScheduler(
136
- beta_start=0.00085,
137
- beta_end=0.012,
138
- beta_schedule="scaled_linear",
139
- clip_sample=False,
140
- set_alpha_to_one=False,
141
- )
142
- torch.manual_seed(0)
143
- vae = AutoencoderKL(
144
- block_out_channels=[32, 64],
145
- in_channels=3,
146
- out_channels=3,
147
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
148
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
149
- latent_channels=4,
150
- )
151
- torch.manual_seed(0)
152
- text_encoder_config = CLIPTextConfig(
153
- bos_token_id=0,
154
- eos_token_id=2,
155
- hidden_size=32,
156
- intermediate_size=37,
157
- layer_norm_eps=1e-05,
158
- num_attention_heads=4,
159
- num_hidden_layers=5,
160
- pad_token_id=1,
161
- vocab_size=1000,
162
- )
163
- text_encoder = CLIPTextModel(text_encoder_config)
164
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
165
-
166
- components = {
167
- "unet": unet,
168
- "controlnet": controlnet,
169
- "scheduler": scheduler,
170
- "vae": vae,
171
- "text_encoder": text_encoder,
172
- "tokenizer": tokenizer,
173
- "safety_checker": None,
174
- "feature_extractor": None,
175
- }
176
- return components
177
-
178
- def get_dummy_inputs(self, device, seed=0):
179
- if str(device).startswith("mps"):
180
- generator = torch.manual_seed(seed)
181
- else:
182
- generator = torch.Generator(device=device).manual_seed(seed)
183
-
184
- controlnet_embedder_scale_factor = 2
185
- image = randn_tensor(
186
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
187
- generator=generator,
188
- device=torch.device(device),
189
- )
190
-
191
- inputs = {
192
- "prompt": "A painting of a squirrel eating a burger",
193
- "generator": generator,
194
- "num_inference_steps": 2,
195
- "guidance_scale": 6.0,
196
- "output_type": "numpy",
197
- "image": image,
198
- }
199
-
200
- return inputs
201
-
202
- def test_attention_slicing_forward_pass(self):
203
- return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
204
-
205
- @unittest.skipIf(
206
- torch_device != "cuda" or not is_xformers_available(),
207
- reason="XFormers attention is only available with CUDA and `xformers` installed",
208
- )
209
- def test_xformers_attention_forwardGenerator_pass(self):
210
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
211
-
212
- def test_inference_batch_single_identical(self):
213
- self._test_inference_batch_single_identical(expected_max_diff=2e-3)
214
-
215
-
216
- class StableDiffusionMultiControlNetPipelineFastTests(
217
- PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
218
- ):
219
- pipeline_class = StableDiffusionControlNetPipeline
220
- params = TEXT_TO_IMAGE_PARAMS
221
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
222
- image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
223
-
224
- def get_dummy_components(self):
225
- torch.manual_seed(0)
226
- unet = UNet2DConditionModel(
227
- block_out_channels=(32, 64),
228
- layers_per_block=2,
229
- sample_size=32,
230
- in_channels=4,
231
- out_channels=4,
232
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
233
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
234
- cross_attention_dim=32,
235
- )
236
- torch.manual_seed(0)
237
-
238
- def init_weights(m):
239
- if isinstance(m, torch.nn.Conv2d):
240
- torch.nn.init.normal(m.weight)
241
- m.bias.data.fill_(1.0)
242
-
243
- controlnet1 = ControlNetModel(
244
- block_out_channels=(32, 64),
245
- layers_per_block=2,
246
- in_channels=4,
247
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
248
- cross_attention_dim=32,
249
- conditioning_embedding_out_channels=(16, 32),
250
- )
251
- controlnet1.controlnet_down_blocks.apply(init_weights)
252
-
253
- torch.manual_seed(0)
254
- controlnet2 = ControlNetModel(
255
- block_out_channels=(32, 64),
256
- layers_per_block=2,
257
- in_channels=4,
258
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
259
- cross_attention_dim=32,
260
- conditioning_embedding_out_channels=(16, 32),
261
- )
262
- controlnet2.controlnet_down_blocks.apply(init_weights)
263
-
264
- torch.manual_seed(0)
265
- scheduler = DDIMScheduler(
266
- beta_start=0.00085,
267
- beta_end=0.012,
268
- beta_schedule="scaled_linear",
269
- clip_sample=False,
270
- set_alpha_to_one=False,
271
- )
272
- torch.manual_seed(0)
273
- vae = AutoencoderKL(
274
- block_out_channels=[32, 64],
275
- in_channels=3,
276
- out_channels=3,
277
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
278
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
279
- latent_channels=4,
280
- )
281
- torch.manual_seed(0)
282
- text_encoder_config = CLIPTextConfig(
283
- bos_token_id=0,
284
- eos_token_id=2,
285
- hidden_size=32,
286
- intermediate_size=37,
287
- layer_norm_eps=1e-05,
288
- num_attention_heads=4,
289
- num_hidden_layers=5,
290
- pad_token_id=1,
291
- vocab_size=1000,
292
- )
293
- text_encoder = CLIPTextModel(text_encoder_config)
294
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
295
-
296
- controlnet = MultiControlNetModel([controlnet1, controlnet2])
297
-
298
- components = {
299
- "unet": unet,
300
- "controlnet": controlnet,
301
- "scheduler": scheduler,
302
- "vae": vae,
303
- "text_encoder": text_encoder,
304
- "tokenizer": tokenizer,
305
- "safety_checker": None,
306
- "feature_extractor": None,
307
- }
308
- return components
309
-
310
- def get_dummy_inputs(self, device, seed=0):
311
- if str(device).startswith("mps"):
312
- generator = torch.manual_seed(seed)
313
- else:
314
- generator = torch.Generator(device=device).manual_seed(seed)
315
-
316
- controlnet_embedder_scale_factor = 2
317
-
318
- images = [
319
- randn_tensor(
320
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
321
- generator=generator,
322
- device=torch.device(device),
323
- ),
324
- randn_tensor(
325
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
326
- generator=generator,
327
- device=torch.device(device),
328
- ),
329
- ]
330
-
331
- inputs = {
332
- "prompt": "A painting of a squirrel eating a burger",
333
- "generator": generator,
334
- "num_inference_steps": 2,
335
- "guidance_scale": 6.0,
336
- "output_type": "numpy",
337
- "image": images,
338
- }
339
-
340
- return inputs
341
-
342
- def test_control_guidance_switch(self):
343
- components = self.get_dummy_components()
344
- pipe = self.pipeline_class(**components)
345
- pipe.to(torch_device)
346
-
347
- scale = 10.0
348
- steps = 4
349
-
350
- inputs = self.get_dummy_inputs(torch_device)
351
- inputs["num_inference_steps"] = steps
352
- inputs["controlnet_conditioning_scale"] = scale
353
- output_1 = pipe(**inputs)[0]
354
-
355
- inputs = self.get_dummy_inputs(torch_device)
356
- inputs["num_inference_steps"] = steps
357
- inputs["controlnet_conditioning_scale"] = scale
358
- output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0]
359
-
360
- inputs = self.get_dummy_inputs(torch_device)
361
- inputs["num_inference_steps"] = steps
362
- inputs["controlnet_conditioning_scale"] = scale
363
- output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0]
364
-
365
- inputs = self.get_dummy_inputs(torch_device)
366
- inputs["num_inference_steps"] = steps
367
- inputs["controlnet_conditioning_scale"] = scale
368
- output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0]
369
-
370
- # make sure that all outputs are different
371
- assert np.sum(np.abs(output_1 - output_2)) > 1e-3
372
- assert np.sum(np.abs(output_1 - output_3)) > 1e-3
373
- assert np.sum(np.abs(output_1 - output_4)) > 1e-3
374
-
375
- def test_attention_slicing_forward_pass(self):
376
- return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
377
-
378
- @unittest.skipIf(
379
- torch_device != "cuda" or not is_xformers_available(),
380
- reason="XFormers attention is only available with CUDA and `xformers` installed",
381
- )
382
- def test_xformers_attention_forwardGenerator_pass(self):
383
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
384
-
385
- def test_inference_batch_single_identical(self):
386
- self._test_inference_batch_single_identical(expected_max_diff=2e-3)
387
-
388
- def test_save_pretrained_raise_not_implemented_exception(self):
389
- components = self.get_dummy_components()
390
- pipe = self.pipeline_class(**components)
391
- pipe.to(torch_device)
392
- pipe.set_progress_bar_config(disable=None)
393
- with tempfile.TemporaryDirectory() as tmpdir:
394
- try:
395
- # save_pretrained is not implemented for Multi-ControlNet
396
- pipe.save_pretrained(tmpdir)
397
- except NotImplementedError:
398
- pass
399
-
400
-
401
- class StableDiffusionMultiControlNetOneModelPipelineFastTests(
402
- PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
403
- ):
404
- pipeline_class = StableDiffusionControlNetPipeline
405
- params = TEXT_TO_IMAGE_PARAMS
406
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
407
- image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
408
-
409
- def get_dummy_components(self):
410
- torch.manual_seed(0)
411
- unet = UNet2DConditionModel(
412
- block_out_channels=(32, 64),
413
- layers_per_block=2,
414
- sample_size=32,
415
- in_channels=4,
416
- out_channels=4,
417
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
418
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
419
- cross_attention_dim=32,
420
- )
421
- torch.manual_seed(0)
422
-
423
- def init_weights(m):
424
- if isinstance(m, torch.nn.Conv2d):
425
- torch.nn.init.normal(m.weight)
426
- m.bias.data.fill_(1.0)
427
-
428
- controlnet = ControlNetModel(
429
- block_out_channels=(32, 64),
430
- layers_per_block=2,
431
- in_channels=4,
432
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
433
- cross_attention_dim=32,
434
- conditioning_embedding_out_channels=(16, 32),
435
- )
436
- controlnet.controlnet_down_blocks.apply(init_weights)
437
-
438
- torch.manual_seed(0)
439
- scheduler = DDIMScheduler(
440
- beta_start=0.00085,
441
- beta_end=0.012,
442
- beta_schedule="scaled_linear",
443
- clip_sample=False,
444
- set_alpha_to_one=False,
445
- )
446
- torch.manual_seed(0)
447
- vae = AutoencoderKL(
448
- block_out_channels=[32, 64],
449
- in_channels=3,
450
- out_channels=3,
451
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
452
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
453
- latent_channels=4,
454
- )
455
- torch.manual_seed(0)
456
- text_encoder_config = CLIPTextConfig(
457
- bos_token_id=0,
458
- eos_token_id=2,
459
- hidden_size=32,
460
- intermediate_size=37,
461
- layer_norm_eps=1e-05,
462
- num_attention_heads=4,
463
- num_hidden_layers=5,
464
- pad_token_id=1,
465
- vocab_size=1000,
466
- )
467
- text_encoder = CLIPTextModel(text_encoder_config)
468
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
469
-
470
- controlnet = MultiControlNetModel([controlnet])
471
-
472
- components = {
473
- "unet": unet,
474
- "controlnet": controlnet,
475
- "scheduler": scheduler,
476
- "vae": vae,
477
- "text_encoder": text_encoder,
478
- "tokenizer": tokenizer,
479
- "safety_checker": None,
480
- "feature_extractor": None,
481
- }
482
- return components
483
-
484
- def get_dummy_inputs(self, device, seed=0):
485
- if str(device).startswith("mps"):
486
- generator = torch.manual_seed(seed)
487
- else:
488
- generator = torch.Generator(device=device).manual_seed(seed)
489
-
490
- controlnet_embedder_scale_factor = 2
491
-
492
- images = [
493
- randn_tensor(
494
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
495
- generator=generator,
496
- device=torch.device(device),
497
- ),
498
- ]
499
-
500
- inputs = {
501
- "prompt": "A painting of a squirrel eating a burger",
502
- "generator": generator,
503
- "num_inference_steps": 2,
504
- "guidance_scale": 6.0,
505
- "output_type": "numpy",
506
- "image": images,
507
- }
508
-
509
- return inputs
510
-
511
- def test_control_guidance_switch(self):
512
- components = self.get_dummy_components()
513
- pipe = self.pipeline_class(**components)
514
- pipe.to(torch_device)
515
-
516
- scale = 10.0
517
- steps = 4
518
-
519
- inputs = self.get_dummy_inputs(torch_device)
520
- inputs["num_inference_steps"] = steps
521
- inputs["controlnet_conditioning_scale"] = scale
522
- output_1 = pipe(**inputs)[0]
523
-
524
- inputs = self.get_dummy_inputs(torch_device)
525
- inputs["num_inference_steps"] = steps
526
- inputs["controlnet_conditioning_scale"] = scale
527
- output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0]
528
-
529
- inputs = self.get_dummy_inputs(torch_device)
530
- inputs["num_inference_steps"] = steps
531
- inputs["controlnet_conditioning_scale"] = scale
532
- output_3 = pipe(
533
- **inputs,
534
- control_guidance_start=[0.1],
535
- control_guidance_end=[0.2],
536
- )[0]
537
-
538
- inputs = self.get_dummy_inputs(torch_device)
539
- inputs["num_inference_steps"] = steps
540
- inputs["controlnet_conditioning_scale"] = scale
541
- output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0]
542
-
543
- # make sure that all outputs are different
544
- assert np.sum(np.abs(output_1 - output_2)) > 1e-3
545
- assert np.sum(np.abs(output_1 - output_3)) > 1e-3
546
- assert np.sum(np.abs(output_1 - output_4)) > 1e-3
547
-
548
- def test_attention_slicing_forward_pass(self):
549
- return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
550
-
551
- @unittest.skipIf(
552
- torch_device != "cuda" or not is_xformers_available(),
553
- reason="XFormers attention is only available with CUDA and `xformers` installed",
554
- )
555
- def test_xformers_attention_forwardGenerator_pass(self):
556
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
557
-
558
- def test_inference_batch_single_identical(self):
559
- self._test_inference_batch_single_identical(expected_max_diff=2e-3)
560
-
561
- def test_save_pretrained_raise_not_implemented_exception(self):
562
- components = self.get_dummy_components()
563
- pipe = self.pipeline_class(**components)
564
- pipe.to(torch_device)
565
- pipe.set_progress_bar_config(disable=None)
566
- with tempfile.TemporaryDirectory() as tmpdir:
567
- try:
568
- # save_pretrained is not implemented for Multi-ControlNet
569
- pipe.save_pretrained(tmpdir)
570
- except NotImplementedError:
571
- pass
572
-
573
-
574
- @slow
575
- @require_torch_gpu
576
- class ControlNetPipelineSlowTests(unittest.TestCase):
577
- def tearDown(self):
578
- super().tearDown()
579
- gc.collect()
580
- torch.cuda.empty_cache()
581
-
582
- def test_canny(self):
583
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
584
-
585
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
586
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
587
- )
588
- pipe.enable_model_cpu_offload()
589
- pipe.set_progress_bar_config(disable=None)
590
-
591
- generator = torch.Generator(device="cpu").manual_seed(0)
592
- prompt = "bird"
593
- image = load_image(
594
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
595
- )
596
-
597
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
598
-
599
- image = output.images[0]
600
-
601
- assert image.shape == (768, 512, 3)
602
-
603
- expected_image = load_numpy(
604
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy"
605
- )
606
-
607
- assert np.abs(expected_image - image).max() < 9e-2
608
-
609
- def test_depth(self):
610
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth")
611
-
612
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
613
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
614
- )
615
- pipe.enable_model_cpu_offload()
616
- pipe.set_progress_bar_config(disable=None)
617
-
618
- generator = torch.Generator(device="cpu").manual_seed(0)
619
- prompt = "Stormtrooper's lecture"
620
- image = load_image(
621
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png"
622
- )
623
-
624
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
625
-
626
- image = output.images[0]
627
-
628
- assert image.shape == (512, 512, 3)
629
-
630
- expected_image = load_numpy(
631
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy"
632
- )
633
-
634
- assert np.abs(expected_image - image).max() < 8e-1
635
-
636
- def test_hed(self):
637
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed")
638
-
639
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
640
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
641
- )
642
- pipe.enable_model_cpu_offload()
643
- pipe.set_progress_bar_config(disable=None)
644
-
645
- generator = torch.Generator(device="cpu").manual_seed(0)
646
- prompt = "oil painting of handsome old man, masterpiece"
647
- image = load_image(
648
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png"
649
- )
650
-
651
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
652
-
653
- image = output.images[0]
654
-
655
- assert image.shape == (704, 512, 3)
656
-
657
- expected_image = load_numpy(
658
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy"
659
- )
660
-
661
- assert np.abs(expected_image - image).max() < 8e-2
662
-
663
- def test_mlsd(self):
664
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd")
665
-
666
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
667
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
668
- )
669
- pipe.enable_model_cpu_offload()
670
- pipe.set_progress_bar_config(disable=None)
671
-
672
- generator = torch.Generator(device="cpu").manual_seed(0)
673
- prompt = "room"
674
- image = load_image(
675
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png"
676
- )
677
-
678
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
679
-
680
- image = output.images[0]
681
-
682
- assert image.shape == (704, 512, 3)
683
-
684
- expected_image = load_numpy(
685
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy"
686
- )
687
-
688
- assert np.abs(expected_image - image).max() < 5e-2
689
-
690
- def test_normal(self):
691
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal")
692
-
693
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
694
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
695
- )
696
- pipe.enable_model_cpu_offload()
697
- pipe.set_progress_bar_config(disable=None)
698
-
699
- generator = torch.Generator(device="cpu").manual_seed(0)
700
- prompt = "cute toy"
701
- image = load_image(
702
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png"
703
- )
704
-
705
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
706
-
707
- image = output.images[0]
708
-
709
- assert image.shape == (512, 512, 3)
710
-
711
- expected_image = load_numpy(
712
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy"
713
- )
714
-
715
- assert np.abs(expected_image - image).max() < 5e-2
716
-
717
- def test_openpose(self):
718
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose")
719
-
720
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
721
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
722
- )
723
- pipe.enable_model_cpu_offload()
724
- pipe.set_progress_bar_config(disable=None)
725
-
726
- generator = torch.Generator(device="cpu").manual_seed(0)
727
- prompt = "Chef in the kitchen"
728
- image = load_image(
729
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
730
- )
731
-
732
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
733
-
734
- image = output.images[0]
735
-
736
- assert image.shape == (768, 512, 3)
737
-
738
- expected_image = load_numpy(
739
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy"
740
- )
741
-
742
- assert np.abs(expected_image - image).max() < 8e-2
743
-
744
- def test_scribble(self):
745
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble")
746
-
747
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
748
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
749
- )
750
- pipe.enable_model_cpu_offload()
751
- pipe.set_progress_bar_config(disable=None)
752
-
753
- generator = torch.Generator(device="cpu").manual_seed(5)
754
- prompt = "bag"
755
- image = load_image(
756
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png"
757
- )
758
-
759
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
760
-
761
- image = output.images[0]
762
-
763
- assert image.shape == (640, 512, 3)
764
-
765
- expected_image = load_numpy(
766
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy"
767
- )
768
-
769
- assert np.abs(expected_image - image).max() < 8e-2
770
-
771
- def test_seg(self):
772
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg")
773
-
774
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
775
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
776
- )
777
- pipe.enable_model_cpu_offload()
778
- pipe.set_progress_bar_config(disable=None)
779
-
780
- generator = torch.Generator(device="cpu").manual_seed(5)
781
- prompt = "house"
782
- image = load_image(
783
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png"
784
- )
785
-
786
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
787
-
788
- image = output.images[0]
789
-
790
- assert image.shape == (512, 512, 3)
791
-
792
- expected_image = load_numpy(
793
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy"
794
- )
795
-
796
- assert np.abs(expected_image - image).max() < 8e-2
797
-
798
- def test_sequential_cpu_offloading(self):
799
- torch.cuda.empty_cache()
800
- torch.cuda.reset_max_memory_allocated()
801
- torch.cuda.reset_peak_memory_stats()
802
-
803
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg")
804
-
805
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
806
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
807
- )
808
- pipe.set_progress_bar_config(disable=None)
809
- pipe.enable_attention_slicing()
810
- pipe.enable_sequential_cpu_offload()
811
-
812
- prompt = "house"
813
- image = load_image(
814
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png"
815
- )
816
-
817
- _ = pipe(
818
- prompt,
819
- image,
820
- num_inference_steps=2,
821
- output_type="np",
822
- )
823
-
824
- mem_bytes = torch.cuda.max_memory_allocated()
825
- # make sure that less than 7 GB is allocated
826
- assert mem_bytes < 4 * 10**9
827
-
828
- def test_canny_guess_mode(self):
829
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
830
-
831
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
832
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
833
- )
834
- pipe.enable_model_cpu_offload()
835
- pipe.set_progress_bar_config(disable=None)
836
-
837
- generator = torch.Generator(device="cpu").manual_seed(0)
838
- prompt = ""
839
- image = load_image(
840
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
841
- )
842
-
843
- output = pipe(
844
- prompt,
845
- image,
846
- generator=generator,
847
- output_type="np",
848
- num_inference_steps=3,
849
- guidance_scale=3.0,
850
- guess_mode=True,
851
- )
852
-
853
- image = output.images[0]
854
- assert image.shape == (768, 512, 3)
855
-
856
- image_slice = image[-3:, -3:, -1]
857
- expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887])
858
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
859
-
860
- def test_canny_guess_mode_euler(self):
861
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
862
-
863
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
864
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
865
- )
866
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
867
- pipe.enable_model_cpu_offload()
868
- pipe.set_progress_bar_config(disable=None)
869
-
870
- generator = torch.Generator(device="cpu").manual_seed(0)
871
- prompt = ""
872
- image = load_image(
873
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
874
- )
875
-
876
- output = pipe(
877
- prompt,
878
- image,
879
- generator=generator,
880
- output_type="np",
881
- num_inference_steps=3,
882
- guidance_scale=3.0,
883
- guess_mode=True,
884
- )
885
-
886
- image = output.images[0]
887
- assert image.shape == (768, 512, 3)
888
-
889
- image_slice = image[-3:, -3:, -1]
890
- expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494])
891
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
892
-
893
- @require_torch_2
894
- def test_stable_diffusion_compile(self):
895
- run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None)
896
-
897
- def test_v11_shuffle_global_pool_conditions(self):
898
- controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle")
899
-
900
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
901
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
902
- )
903
- pipe.enable_model_cpu_offload()
904
- pipe.set_progress_bar_config(disable=None)
905
-
906
- generator = torch.Generator(device="cpu").manual_seed(0)
907
- prompt = "New York"
908
- image = load_image(
909
- "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"
910
- )
911
-
912
- output = pipe(
913
- prompt,
914
- image,
915
- generator=generator,
916
- output_type="np",
917
- num_inference_steps=3,
918
- guidance_scale=7.0,
919
- )
920
-
921
- image = output.images[0]
922
- assert image.shape == (512, 640, 3)
923
-
924
- image_slice = image[-3:, -3:, -1]
925
- expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348])
926
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
927
-
928
- def test_load_local(self):
929
- controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
930
- pipe_1 = StableDiffusionControlNetPipeline.from_pretrained(
931
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
932
- )
933
-
934
- controlnet = ControlNetModel.from_single_file(
935
- "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
936
- )
937
- pipe_2 = StableDiffusionControlNetPipeline.from_single_file(
938
- "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors",
939
- safety_checker=None,
940
- controlnet=controlnet,
941
- )
942
- pipes = [pipe_1, pipe_2]
943
- images = []
944
-
945
- for pipe in pipes:
946
- pipe.enable_model_cpu_offload()
947
- pipe.set_progress_bar_config(disable=None)
948
-
949
- generator = torch.Generator(device="cpu").manual_seed(0)
950
- prompt = "bird"
951
- image = load_image(
952
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
953
- )
954
-
955
- output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3)
956
- images.append(output.images[0])
957
-
958
- del pipe
959
- gc.collect()
960
- torch.cuda.empty_cache()
961
-
962
- assert np.abs(images[0] - images[1]).sum() < 1e-3
963
-
964
-
965
- @slow
966
- @require_torch_gpu
967
- class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase):
968
- def tearDown(self):
969
- super().tearDown()
970
- gc.collect()
971
- torch.cuda.empty_cache()
972
-
973
- def test_pose_and_canny(self):
974
- controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
975
- controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose")
976
-
977
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
978
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny]
979
- )
980
- pipe.enable_model_cpu_offload()
981
- pipe.set_progress_bar_config(disable=None)
982
-
983
- generator = torch.Generator(device="cpu").manual_seed(0)
984
- prompt = "bird and Chef"
985
- image_canny = load_image(
986
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
987
- )
988
- image_pose = load_image(
989
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
990
- )
991
-
992
- output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3)
993
-
994
- image = output.images[0]
995
-
996
- assert image.shape == (768, 512, 3)
997
-
998
- expected_image = load_numpy(
999
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy"
1000
- )
1001
-
1002
- assert np.abs(expected_image - image).max() < 5e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnimaLab/bias-test-gpt-pairs/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Bias Test Gpt Pairs
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: RKocielnik/bias-test-gpt-pairs
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/training.py DELETED
@@ -1,737 +0,0 @@
1
- import os
2
-
3
- os.environ["WANDB_MODE"] = "offline"
4
- # os.environ["WANDB_DISABLED"] = "true"
5
-
6
- import json
7
- import math
8
- import random
9
- import shutil
10
- import sys
11
- import threading
12
- import time
13
- import traceback
14
- from datetime import datetime
15
- from pathlib import Path
16
-
17
- import gradio as gr
18
- import torch
19
- import transformers
20
- from datasets import Dataset, load_dataset
21
- from peft import (
22
- LoraConfig,
23
- get_peft_model,
24
- prepare_model_for_kbit_training,
25
- set_peft_model_state_dict
26
- )
27
- from peft.utils.other import \
28
- TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as model_to_lora_modules
29
- from transformers.models.auto.modeling_auto import (
30
- MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
31
- )
32
-
33
- from modules import shared, ui, utils
34
- from modules.evaluate import (
35
- calculate_perplexity,
36
- generate_markdown_table,
37
- save_past_evaluations
38
- )
39
- from modules.logging_colors import logger
40
- from modules.models import reload_model
41
- from modules.utils import natural_keys
42
-
43
- MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()}
44
- PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "overlap_len", "newline_favor_len", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to"]
45
- WANT_INTERRUPT = False
46
-
47
- train_log = {}
48
- train_template = {}
49
-
50
-
51
- def create_ui():
52
- mu = shared.args.multi_user
53
- with gr.Tab("Training", elem_id="training-tab"):
54
- with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
55
- tmp = gr.State('')
56
- with gr.Row():
57
- with gr.Column():
58
- gr.Markdown("[Tutorial](https://github.com/oobabooga/text-generation-webui/blob/main/docs/Training-LoRAs.md)")
59
-
60
- with gr.Row():
61
- copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=utils.get_available_loras(), elem_classes=['slim-dropdown'], interactive=not mu)
62
- ui.create_refresh_button(copy_from, lambda: None, lambda: {'choices': utils.get_available_loras()}, 'refresh-button', interactive=not mu)
63
-
64
- with gr.Row():
65
- with gr.Column(scale=5):
66
- lora_name = gr.Textbox(label='Name', info='The name of your new LoRA file')
67
- with gr.Column():
68
- always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background'])
69
-
70
- with gr.Row():
71
- with gr.Column():
72
- lora_rank = gr.Slider(label='LoRA Rank', value=32, minimum=0, maximum=1024, step=4, info='Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.')
73
- lora_alpha = gr.Slider(label='LoRA Alpha', value=64, minimum=0, maximum=2048, step=4, info='This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.')
74
- batch_size = gr.Slider(label='Batch Size', value=128, minimum=0, maximum=1024, step=4, info='Global batch size. The two batch sizes together determine gradient accumulation (gradientAccum = batch / microBatch). Higher gradient accum values lead to better quality training.')
75
- micro_batch_size = gr.Slider(label='Micro Batch Size', value=4, minimum=1, maximum=128, step=1, info='Per-device batch size (NOTE: multiple devices not yet implemented). Increasing this will increase VRAM usage.')
76
- cutoff_len = gr.Slider(label='Cutoff Length', minimum=0, maximum=2048, value=256, step=32, info='Cutoff length for text input. Essentially, how long of a line of text to feed in at a time. Higher values require drastically more VRAM.')
77
-
78
- with gr.Column():
79
- save_steps = gr.Number(label='Save every n steps', value=0, info='If above 0, a checkpoint of the LoRA will be saved every time this many steps pass.')
80
-
81
- epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.')
82
- learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.')
83
- lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt'], info='Learning rate scheduler - defines how the learning rate changes over time. "Constant" means never change, "linear" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.', elem_classes=['slim-dropdown'])
84
-
85
- with gr.Accordion(label='Advanced Options', open=False):
86
- with gr.Row():
87
- with gr.Column():
88
- lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.05, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.')
89
- stop_at_loss = gr.Slider(label='Stop at loss', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached. (reasonable numbers are 1.5-1.8)')
90
- optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown'])
91
-
92
- with gr.Column():
93
- warmup_steps = gr.Number(label='Warmup Steps', value=100, info='For this many steps at the start, the learning rate will be lower than normal. This helps the trainer prepare the model and precompute statistics to improve the quality of training after the start.')
94
- train_only_after = gr.Textbox(label='Train Only After', value='', info='Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use "### Response:" to only train the response and ignore the input.')
95
-
96
- add_eos_token = gr.Checkbox(label='Add EOS token', value=False, info="Adds EOS token for each dataset item. In case of raw text, the EOS will be added at the Hard Cut")
97
-
98
- higher_rank_limit = gr.Checkbox(label='Enable higher ranks', value=False, info='If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.')
99
- report_to = gr.Radio(label="Save detailed logs with", value="None", choices=["None", "wandb", "tensorboard"], interactive=True)
100
-
101
- with gr.Column():
102
- with gr.Tab(label='Formatted Dataset'):
103
- with gr.Row():
104
- format = gr.Dropdown(choices=utils.get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'], interactive=not mu)
105
- ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('training/formats', 'json')}, 'refresh-button', interactive=not mu)
106
-
107
- with gr.Row():
108
- dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
109
- ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu)
110
-
111
- with gr.Row():
112
- eval_dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu)
113
- ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu)
114
-
115
- eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
116
-
117
- with gr.Tab(label="Raw text file"):
118
- with gr.Row():
119
- raw_text_file = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
120
- ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'txt')}, 'refresh-button', interactive=not mu)
121
-
122
- with gr.Row():
123
- with gr.Column():
124
- overlap_len = gr.Slider(label='Overlap Length', minimum=0, maximum=512, value=128, step=16, info='How many tokens from the prior chunk of text to include into the next chunk. (The chunks themselves will be of a size determined by Cutoff Length). Setting overlap to exactly half the cutoff length may be ideal.')
125
- newline_favor_len = gr.Slider(label='Prefer Newline Cut Length', minimum=0, maximum=512, value=128, step=16, info='Length (in characters, not tokens) of the maximum distance to shift an overlap cut by to ensure chunks cut at newlines. If too low, cuts may occur in the middle of lines.')
126
-
127
- with gr.Column():
128
- hard_cut_string = gr.Textbox(label='Hard Cut String', value='\\n\\n\\n', info='String that indicates a hard cut between text parts. Helps prevent unwanted overlap.')
129
- min_chars = gr.Number(label='Ignore small blocks', value=0, info='Ignore Hard Cut blocks that have less or equal characters than this number')
130
-
131
- with gr.Row():
132
- start_button = gr.Button("Start LoRA Training", variant='primary', interactive=not mu)
133
- stop_button = gr.Button("Interrupt", interactive=not mu)
134
-
135
- output = gr.Markdown(value="Ready")
136
-
137
- with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
138
- with gr.Row():
139
- with gr.Column():
140
- models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True, interactive=not mu)
141
- evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.', interactive=not mu)
142
- with gr.Row():
143
- with gr.Column():
144
- stride_length = gr.Slider(label='Stride', minimum=0, maximum=32768, value=512, step=256, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
145
-
146
- with gr.Column():
147
- max_length = gr.Slider(label='max_length', minimum=0, maximum=32768, value=0, step=256, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
148
-
149
- with gr.Row():
150
- start_current_evaluation = gr.Button("Evaluate loaded model", interactive=not mu)
151
- start_evaluation = gr.Button("Evaluate selected models", interactive=not mu)
152
- stop_evaluation = gr.Button("Interrupt", interactive=not mu)
153
-
154
- with gr.Column():
155
- evaluation_log = gr.Markdown(value='')
156
-
157
- evaluation_table = gr.Dataframe(value=generate_markdown_table(), interactive=True)
158
- with gr.Row():
159
- save_comments = gr.Button('Save comments', elem_classes="small-button", interactive=not mu)
160
- refresh_table = gr.Button('Refresh the table', elem_classes="small-button", interactive=not mu)
161
-
162
- # Training events
163
- all_params = [lora_name, always_override, save_steps, micro_batch_size, batch_size, epochs, learning_rate, lr_scheduler_type, lora_rank, lora_alpha, lora_dropout, cutoff_len, dataset, eval_dataset, format, eval_steps, raw_text_file, overlap_len, newline_favor_len, higher_rank_limit, warmup_steps, optimizer, hard_cut_string, train_only_after, stop_at_loss, add_eos_token, min_chars, report_to]
164
-
165
- copy_from.change(do_copy_params, [copy_from] + all_params, all_params)
166
- start_button.click(do_train, all_params, output)
167
- stop_button.click(do_interrupt, None, None, queue=False)
168
- higher_rank_limit.change(change_rank_limit, [higher_rank_limit], [lora_rank, lora_alpha])
169
-
170
- # Evaluation events. For some reason, the interrupt event
171
- # doesn't work with the .then() syntax, so I write them one
172
- # by one in this ugly but functional way.
173
- ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
174
- start_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
175
-
176
- start_current_evaluation.click(lambda: ['current model'], None, tmp)
177
- ev_cur = start_current_evaluation.click(calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
178
- start_current_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
179
-
180
- stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False)
181
- refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True)
182
- save_comments.click(
183
- save_past_evaluations, evaluation_table, None).then(
184
- lambda: "Comments saved.", None, evaluation_log, show_progress=False)
185
-
186
-
187
- def do_interrupt():
188
- global WANT_INTERRUPT
189
- WANT_INTERRUPT = True
190
-
191
-
192
- def do_copy_params(lora_name: str, *args):
193
- f_name = f"{shared.args.lora_dir}/{clean_path(None, lora_name)}/training_parameters.json"
194
- if Path(f_name).is_file():
195
- with open(f_name, 'r', encoding='utf-8') as format_file:
196
- params: dict[str, str] = json.load(format_file)
197
- else:
198
- params = {}
199
-
200
- result = list()
201
- for i in range(0, len(PARAMETERS)):
202
- key = PARAMETERS[i]
203
- if key in params:
204
- result.append(params[key])
205
- else:
206
- result.append(args[i])
207
-
208
- return result
209
-
210
-
211
- def change_rank_limit(use_higher_ranks: bool):
212
- mult = 2 if use_higher_ranks else 1
213
- return {"maximum": 1024 * mult, "__type__": "update"}, {"maximum": 2048 * mult, "__type__": "update"}
214
-
215
-
216
- def clean_path(base_path: str, path: str):
217
- """Strips unusual symbols and forcibly builds a path as relative to the intended directory."""
218
- path = path.replace('\\', '/').replace('..', '_')
219
- if base_path is None:
220
- return path
221
-
222
- return f'{Path(base_path).absolute()}/{path}'
223
-
224
-
225
- def backup_adapter(input_folder):
226
- # Get the creation date of the file adapter_model.bin
227
- try:
228
- adapter_file = Path(f"{input_folder}/adapter_model.bin")
229
- if adapter_file.is_file():
230
-
231
- logger.info("Backing up existing LoRA adapter...")
232
- creation_date = datetime.fromtimestamp(adapter_file.stat().st_ctime)
233
- creation_date_str = creation_date.strftime("Backup-%Y-%m-%d")
234
-
235
- # Create the new subfolder
236
- subfolder_path = Path(f"{input_folder}/{creation_date_str}")
237
- subfolder_path.mkdir(parents=True, exist_ok=True)
238
-
239
- # Check if the file already exists in the subfolder
240
- backup_adapter_file = Path(f"{input_folder}/{creation_date_str}/adapter_model.bin")
241
- if backup_adapter_file.is_file():
242
- print(" - Backup already exists. Skipping backup process.")
243
- return
244
-
245
- # Copy existing files to the new subfolder
246
- existing_files = Path(input_folder).iterdir()
247
- for file in existing_files:
248
- if file.is_file():
249
- shutil.copy2(file, subfolder_path)
250
- except Exception as e:
251
- print("An error occurred in backup_adapter:", str(e))
252
-
253
-
254
- def calc_trainable_parameters(model):
255
- trainable_params = 0
256
- all_param = 0
257
- for _, param in model.named_parameters():
258
- num_params = param.numel()
259
- # if using DS Zero 3 and the weights are initialized empty
260
- if num_params == 0 and hasattr(param, "ds_numel"):
261
- num_params = param.ds_numel
262
-
263
- all_param += num_params
264
- if param.requires_grad:
265
- trainable_params += num_params
266
-
267
- return trainable_params, all_param
268
-
269
-
270
- def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str):
271
-
272
- if shared.args.monkey_patch:
273
- from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
274
- replace_peft_model_with_int4_lora_model
275
- )
276
- replace_peft_model_with_int4_lora_model()
277
-
278
- global WANT_INTERRUPT
279
- WANT_INTERRUPT = False
280
-
281
- # == Input validation / processing ==
282
- yield "Preparing the input..."
283
- lora_file_path = clean_path(None, lora_name)
284
- if lora_file_path.strip() == '':
285
- yield "Missing or invalid LoRA file name input."
286
- return
287
-
288
- lora_file_path = f"{Path(shared.args.lora_dir)}/{lora_file_path}"
289
- actual_lr = float(learning_rate)
290
- model_type = type(shared.model).__name__
291
-
292
- if model_type in MODEL_CLASSES:
293
- model_id = MODEL_CLASSES[model_type]
294
- else:
295
- model_id = "llama"
296
- if model_type == "PeftModelForCausalLM":
297
- if len(shared.lora_names) > 0:
298
- yield "You are trying to train a LoRA while you already have another LoRA loaded. This will work, but may have unexpected effects. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*"
299
- logger.warning("Training LoRA over top of another LoRA. May have unexpected effects.")
300
- else:
301
- yield "Model ID not matched due to LoRA loading. Consider reloading base model. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*"
302
- logger.warning("Model ID not matched due to LoRA loading. Consider reloading base model.")
303
- else:
304
- yield "LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. Unexpected errors may follow. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*"
305
- logger.warning(f"LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. (Found model type: {model_type})")
306
-
307
- time.sleep(5)
308
-
309
- if shared.args.loader == 'GPTQ-for-LLaMa' and not shared.args.monkey_patch:
310
- yield "LoRA training with GPTQ-for-LLaMa requires loading with `--monkey-patch`"
311
- return
312
-
313
- if cutoff_len <= 0 or micro_batch_size <= 0 or batch_size <= 0 or actual_lr <= 0 or lora_rank <= 0 or lora_alpha <= 0:
314
- yield "Cannot input zeroes."
315
- return
316
-
317
- gradient_accumulation_steps = batch_size // micro_batch_size
318
- shared.tokenizer.pad_token_id = 0
319
- shared.tokenizer.padding_side = "left"
320
-
321
- def encode(text, add_bos_token):
322
- result = shared.tokenizer.encode(text, truncation=True, max_length=cutoff_len)
323
- # Check if the first two tokens are BOS
324
- if len(result) >= 2 and result[:2] == [shared.tokenizer.bos_token_id, shared.tokenizer.bos_token_id]:
325
- result = result[1:]
326
-
327
- if not add_bos_token and result[0] == shared.tokenizer.bos_token_id:
328
- result = result[1:]
329
- return result
330
-
331
- def tokenize(prompt, append_eos_token=False):
332
-
333
- if train_only_after == '' or train_only_after not in prompt:
334
- input_ids = encode(prompt, True)
335
-
336
- if append_eos_token and input_ids[-1] != shared.tokenizer.eos_token_id and len(input_ids) < cutoff_len:
337
- input_ids.append(shared.tokenizer.eos_token_id)
338
-
339
- input_ids = [shared.tokenizer.pad_token_id] * (cutoff_len - len(input_ids)) + input_ids
340
- labels = [1] * len(input_ids)
341
-
342
- else:
343
- ind = prompt.index(train_only_after) + len(train_only_after)
344
- before_tokens = encode(prompt[:ind], True)
345
- after_tokens = encode(prompt[ind:], False)
346
-
347
- if append_eos_token and after_tokens[-1] != shared.tokenizer.eos_token_id:
348
- after_tokens.append(shared.tokenizer.eos_token_id)
349
-
350
- full_length = len(after_tokens) + len(before_tokens)
351
- if full_length > cutoff_len:
352
- after_tokens = after_tokens[:cutoff_len - len(before_tokens)]
353
- else:
354
- before_tokens = [shared.tokenizer.pad_token_id] * (cutoff_len - full_length) + before_tokens
355
-
356
- input_ids = before_tokens + after_tokens
357
- labels = [-100] * len(before_tokens) + [1] * len(after_tokens)
358
-
359
- input_ids = torch.tensor(input_ids)
360
- return {
361
- "input_ids": input_ids,
362
- "labels": labels,
363
- "attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
364
- }
365
-
366
- train_template.clear()
367
-
368
- # == Prep the dataset, format, etc ==
369
- if raw_text_file not in ['None', '']:
370
- train_template["template_type"] = "raw_text"
371
- logger.info("Loading raw text file dataset...")
372
- fullpath = clean_path('training/datasets', f'{raw_text_file}')
373
- fullpath = Path(fullpath)
374
- if fullpath.is_dir():
375
- logger.info('Training path directory {}'.format(raw_text_file))
376
- raw_text = ""
377
- file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name))
378
- for file_path in file_paths:
379
- if file_path.is_file():
380
- with file_path.open('r', encoding='utf-8') as file:
381
- raw_text += file.read().replace('\r', '')
382
-
383
- logger.info(f"Loaded training file: {file_path.name}")
384
- else:
385
- with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
386
- raw_text = file.read().replace('\r', '')
387
-
388
- cut_string = hard_cut_string.replace('\\n', '\n')
389
- eos_added = 0
390
- out_tokens = []
391
- for text_part in raw_text.split(cut_string):
392
- if len(text_part.strip()) <= min_chars:
393
- continue
394
-
395
- tokens = shared.tokenizer.encode(text_part)
396
- if add_eos_token:
397
- tokens.append(shared.tokenizer.eos_token_id)
398
- eos_added += 1
399
-
400
- step = cutoff_len - overlap_len
401
- if step <= 0:
402
- yield f"Error: overlap_len ({overlap_len}) cannot be greater than or equal to cutoff_len ({cutoff_len})"
403
- return
404
-
405
- out_tokens.extend(split_chunks(tokens, cutoff_len, step))
406
-
407
- if eos_added > 0:
408
- print(f"EOS added to {eos_added} text blocks")
409
-
410
- del raw_text # Note: could be a gig for a large dataset, so delete redundant data as we go to be safe on RAM
411
- text_chunks = [shared.tokenizer.decode(x) for x in out_tokens]
412
- del out_tokens
413
- if newline_favor_len > 0:
414
- text_chunks = [cut_chunk_for_newline(x, newline_favor_len) for x in text_chunks]
415
-
416
- train_data = Dataset.from_list([tokenize(x) for x in text_chunks])
417
- del text_chunks
418
- eval_data = None
419
- else:
420
- if dataset in ['None', '']:
421
- yield "Missing dataset choice input, cannot continue."
422
- return
423
-
424
- if format in ['None', '']:
425
- yield "Missing format choice input, cannot continue."
426
- return
427
-
428
- train_template["template_type"] = "dataset"
429
-
430
- with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
431
- format_data: dict[str, str] = json.load(formatFile)
432
-
433
- # == store training prompt ==
434
- for _, value in format_data.items():
435
- prompt_key = f"template_{len(train_template)}"
436
- train_template[prompt_key] = value
437
-
438
- def generate_prompt(data_point: dict[str, str]):
439
- for options, data in format_data.items():
440
- if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
441
- for key, val in data_point.items():
442
- if type(val) is str:
443
- data = data.replace(f'%{key}%', val)
444
- return data
445
- raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
446
-
447
- def generate_and_tokenize_prompt(data_point):
448
- prompt = generate_prompt(data_point)
449
- return tokenize(prompt, add_eos_token)
450
-
451
- logger.info("Loading JSON datasets...")
452
- data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
453
- train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
454
-
455
- if eval_dataset == 'None':
456
- eval_data = None
457
- else:
458
- eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json'))
459
- eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
460
-
461
- # == We MUST reload model if it went through any previous training, even failed one ==
462
- if shared.model_dirty_from_training:
463
- selected_model = shared.model_name
464
- if selected_model:
465
- print("\033[1;31;1m(Model has been modified by previous training, it needs to be reloaded...)\033[0;37;0m")
466
- try:
467
- yield f"Reloading {selected_model}..."
468
- reload_model()
469
- if shared.model is not None:
470
- print("Model reloaded OK, continue with training.")
471
- else:
472
- return f"Failed to load {selected_model}."
473
- except:
474
- exc = traceback.format_exc()
475
- logger.error('Failed to reload the model.')
476
- print(exc)
477
- return exc.replace('\n', '\n\n')
478
-
479
- # == Start prepping the model itself ==
480
- if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'):
481
- logger.info("Getting model ready...")
482
- prepare_model_for_kbit_training(shared.model)
483
-
484
- # base model is now frozen and should not be reused for any other LoRA training than this one
485
- shared.model_dirty_from_training = True
486
-
487
- logger.info("Preparing for training...")
488
- config = LoraConfig(
489
- r=lora_rank,
490
- lora_alpha=lora_alpha,
491
- target_modules=model_to_lora_modules[model_id],
492
- lora_dropout=lora_dropout,
493
- bias="none",
494
- task_type="CAUSAL_LM"
495
- )
496
-
497
- # == Backup the existing adapter ==
498
- if not always_override:
499
- backup_adapter(lora_file_path)
500
-
501
- # == get model trainable params
502
- model_trainable_params, model_all_params = calc_trainable_parameters(shared.model)
503
-
504
- try:
505
- logger.info("Creating LoRA model...")
506
- lora_model = get_peft_model(shared.model, config)
507
- if not always_override and Path(f"{lora_file_path}/adapter_model.bin").is_file():
508
- logger.info("Loading existing LoRA data...")
509
- state_dict_peft = torch.load(f"{lora_file_path}/adapter_model.bin")
510
- set_peft_model_state_dict(lora_model, state_dict_peft)
511
- except:
512
- yield traceback.format_exc().replace('\n', '\n\n')
513
- return
514
-
515
- if shared.args.monkey_patch:
516
- from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear
517
- from alpaca_lora_4bit.models import Linear4bitLt
518
- for _, m in lora_model.named_modules():
519
- if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
520
- if m.is_v1_model:
521
- m.zeros = m.zeros.half()
522
- m.scales = m.scales.half()
523
-
524
- class Tracked():
525
- def __init__(self):
526
- self.current_steps = 0
527
- self.max_steps = 0
528
- self.did_save = False
529
-
530
- tracked = Tracked()
531
- actual_save_steps = math.ceil(save_steps / gradient_accumulation_steps)
532
-
533
- class Callbacks(transformers.TrainerCallback):
534
- def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
535
- tracked.current_steps = state.global_step * gradient_accumulation_steps
536
- tracked.max_steps = state.max_steps * gradient_accumulation_steps
537
- if WANT_INTERRUPT:
538
- control.should_epoch_stop = True
539
- control.should_training_stop = True
540
- elif state.global_step > 0 and actual_save_steps > 0 and state.global_step % actual_save_steps == 0:
541
- lora_model.save_pretrained(f"{lora_file_path}/checkpoint-{tracked.current_steps}/")
542
- # Save log
543
- with open(f"{lora_file_path}/checkpoint-{tracked.current_steps}/training_log.json", 'w', encoding='utf-8') as file:
544
- json.dump(train_log, file, indent=2)
545
- # == Save training prompt ==
546
- with open(f"{lora_file_path}/checkpoint-{tracked.current_steps}/training_prompt.json", 'w', encoding='utf-8') as file:
547
- json.dump(train_template, file, indent=2)
548
-
549
- def on_substep_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
550
- tracked.current_steps += 1
551
- if WANT_INTERRUPT:
552
- control.should_epoch_stop = True
553
- control.should_training_stop = True
554
-
555
- def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, logs, **kwargs):
556
- train_log.update(logs)
557
- train_log.update({"current_steps": tracked.current_steps})
558
- if WANT_INTERRUPT:
559
- print("\033[1;31;1mInterrupted by user\033[0;37;0m")
560
-
561
- print(f"\033[1;30;40mStep: {tracked.current_steps} \033[0;37;0m", end='')
562
- if 'loss' in logs:
563
- loss = float(logs['loss'])
564
- if loss <= stop_at_loss:
565
- control.should_epoch_stop = True
566
- control.should_training_stop = True
567
- print(f"\033[1;31;1mStop Loss {stop_at_loss} reached.\033[0;37;0m")
568
-
569
- trainer = transformers.Trainer(
570
- model=lora_model,
571
- train_dataset=train_data,
572
- eval_dataset=eval_data,
573
- args=transformers.TrainingArguments(
574
- report_to=report_to if report_to != "None" else None,
575
- per_device_train_batch_size=micro_batch_size,
576
- gradient_accumulation_steps=gradient_accumulation_steps,
577
- warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps),
578
- num_train_epochs=epochs,
579
- learning_rate=actual_lr,
580
- fp16=False if shared.args.cpu else True,
581
- optim=optimizer,
582
- logging_steps=2 if stop_at_loss > 0 else 5,
583
- evaluation_strategy="steps" if eval_data is not None else "no",
584
- eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None,
585
- save_strategy="steps" if eval_data is not None else "no",
586
- output_dir=lora_file_path,
587
- lr_scheduler_type=lr_scheduler_type,
588
- load_best_model_at_end=eval_data is not None,
589
- # TODO: Enable multi-device support
590
- ddp_find_unused_parameters=None,
591
- no_cuda=shared.args.cpu,
592
- ),
593
- data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
594
- callbacks=list([Callbacks()])
595
- )
596
-
597
- lora_model.config.use_cache = False
598
-
599
- if torch.__version__ >= "2" and sys.platform != "win32":
600
- lora_model = torch.compile(lora_model)
601
-
602
- # == Save parameters for reuse ==
603
- with open(f"{lora_file_path}/training_parameters.json", 'w', encoding='utf-8') as file:
604
- vars = locals()
605
- json.dump({x: vars[x] for x in PARAMETERS}, file, indent=2)
606
-
607
- # == Save training prompt ==
608
- with open(f"{lora_file_path}/training_prompt.json", 'w', encoding='utf-8') as file:
609
- json.dump(train_template, file, indent=2)
610
-
611
- # == Main run and monitor loop ==
612
- logger.info("Starting training...")
613
- yield "Starting..."
614
-
615
- lora_trainable_param, lora_all_param = calc_trainable_parameters(lora_model)
616
-
617
- projections_string = ", ".join([projection.replace("_proj", "") for projection in model_to_lora_modules[model_id]])
618
-
619
- print(f"Training '{model_id}' model using ({projections_string}) projections")
620
-
621
- if lora_all_param > 0:
622
- print(f"Trainable params: {lora_trainable_param:,d} ({100 * lora_trainable_param / lora_all_param:.4f} %), All params: {lora_all_param:,d} (Model: {model_all_params:,d})")
623
-
624
- train_log.update({"base_model_name": shared.model_name})
625
- train_log.update({"base_model_class": shared.model.__class__.__name__})
626
- train_log.update({"base_loaded_in_4bit": getattr(lora_model, "is_loaded_in_4bit", False)})
627
- train_log.update({"base_loaded_in_8bit": getattr(lora_model, "is_loaded_in_8bit", False)})
628
- train_log.update({"projections": projections_string})
629
-
630
- if stop_at_loss > 0:
631
- print(f"Monitoring loss \033[1;31;1m(Auto-Stop at: {stop_at_loss})\033[0;37;0m")
632
-
633
- if WANT_INTERRUPT:
634
- yield "Interrupted before start."
635
- return
636
-
637
- def log_train_dataset(trainer):
638
- decoded_entries = []
639
- # Try to decode the entries and write the log file
640
- try:
641
- # Iterate over the first 10 elements in the dataset (or fewer if there are less than 10)
642
- for i in range(min(10, len(trainer.train_dataset))):
643
- decoded_text = shared.tokenizer.decode(trainer.train_dataset[i]['input_ids'])
644
- decoded_entries.append({"value": decoded_text})
645
-
646
- # Write the log file
647
- Path('logs').mkdir(exist_ok=True)
648
- with open(Path('logs/train_dataset_sample.json'), 'w') as json_file:
649
- json.dump(decoded_entries, json_file, indent=4)
650
-
651
- logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.")
652
- except Exception as e:
653
- logger.error(f"Failed to create log file due to error: {e}")
654
-
655
- def threaded_run():
656
- log_train_dataset(trainer)
657
- trainer.train()
658
- # Note: save in the thread in case the gradio thread breaks (eg browser closed)
659
- lora_model.save_pretrained(lora_file_path)
660
- logger.info("LoRA training run is completed and saved.")
661
- # Save log
662
- with open(f"{lora_file_path}/training_log.json", 'w', encoding='utf-8') as file:
663
- json.dump(train_log, file, indent=2)
664
-
665
- thread = threading.Thread(target=threaded_run)
666
- thread.start()
667
- last_step = 0
668
- start_time = time.perf_counter()
669
-
670
- while thread.is_alive():
671
- time.sleep(0.5)
672
- if WANT_INTERRUPT:
673
- yield "Interrupting, please wait... *(Run will stop after the current training step completes.)*"
674
-
675
- elif tracked.current_steps != last_step:
676
- last_step = tracked.current_steps
677
- time_elapsed = time.perf_counter() - start_time
678
- if time_elapsed <= 0:
679
- timer_info = ""
680
- total_time_estimate = 999
681
- else:
682
- its = tracked.current_steps / time_elapsed
683
- if its > 1:
684
- timer_info = f"`{its:.2f}` it/s"
685
- else:
686
- timer_info = f"`{1.0/its:.2f}` s/it"
687
-
688
- total_time_estimate = (1.0 / its) * (tracked.max_steps)
689
-
690
- yield f"Running... **{tracked.current_steps}** / **{tracked.max_steps}** ... {timer_info}, {format_time(time_elapsed)} / {format_time(total_time_estimate)} ... {format_time(total_time_estimate - time_elapsed)} remaining"
691
-
692
- # Saving in the train thread might fail if an error occurs, so save here if so.
693
- if not tracked.did_save:
694
- logger.info("Training complete, saving...")
695
- lora_model.save_pretrained(lora_file_path)
696
-
697
- if WANT_INTERRUPT:
698
- logger.info("Training interrupted.")
699
- yield f"Interrupted. Incomplete LoRA saved to `{lora_file_path}`."
700
- else:
701
- logger.info("Training complete!")
702
- yield f"Done! LoRA saved to `{lora_file_path}`.\n\nBefore testing your new LoRA, make sure to first reload the model, as it is currently dirty from training."
703
-
704
-
705
- def split_chunks(arr, size, step):
706
- for i in range(0, len(arr), step):
707
- yield arr[i:i + size]
708
-
709
-
710
- def cut_chunk_for_newline(chunk: str, max_length: int):
711
- if '\n' not in chunk:
712
- return chunk
713
-
714
- first_newline = chunk.index('\n')
715
- if first_newline < max_length:
716
- chunk = chunk[first_newline + 1:]
717
-
718
- if '\n' not in chunk:
719
- return chunk
720
-
721
- last_newline = chunk.rindex('\n')
722
- if len(chunk) - last_newline < max_length:
723
- chunk = chunk[:last_newline]
724
-
725
- return chunk
726
-
727
-
728
- def format_time(seconds: float):
729
- if seconds < 120:
730
- return f"`{seconds:.0f}` seconds"
731
-
732
- minutes = seconds / 60
733
- if minutes < 120:
734
- return f"`{minutes:.0f}` minutes"
735
-
736
- hours = minutes / 60
737
- return f"`{hours:.0f}` hours"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnnasBlackHat/Image-Similarity/src/model/simlarity_model.py DELETED
@@ -1,9 +0,0 @@
1
- from dataclasses import dataclass
2
- from .similarity_interface import SimilarityInterface
3
-
4
- @dataclass
5
- class SimilarityModel:
6
- name: str
7
- image_size: int
8
- model_cls: SimilarityInterface
9
- image_input_type: str = 'array'
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/bbox.py DELETED
@@ -1,72 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from ..utils import ext_loader
3
-
4
- ext_module = ext_loader.load_ext('_ext', ['bbox_overlaps'])
5
-
6
-
7
- def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0):
8
- """Calculate overlap between two set of bboxes.
9
-
10
- If ``aligned`` is ``False``, then calculate the ious between each bbox
11
- of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
12
- bboxes1 and bboxes2.
13
-
14
- Args:
15
- bboxes1 (Tensor): shape (m, 4) in <x1, y1, x2, y2> format or empty.
16
- bboxes2 (Tensor): shape (n, 4) in <x1, y1, x2, y2> format or empty.
17
- If aligned is ``True``, then m and n must be equal.
18
- mode (str): "iou" (intersection over union) or iof (intersection over
19
- foreground).
20
-
21
- Returns:
22
- ious(Tensor): shape (m, n) if aligned == False else shape (m, 1)
23
-
24
- Example:
25
- >>> bboxes1 = torch.FloatTensor([
26
- >>> [0, 0, 10, 10],
27
- >>> [10, 10, 20, 20],
28
- >>> [32, 32, 38, 42],
29
- >>> ])
30
- >>> bboxes2 = torch.FloatTensor([
31
- >>> [0, 0, 10, 20],
32
- >>> [0, 10, 10, 19],
33
- >>> [10, 10, 20, 20],
34
- >>> ])
35
- >>> bbox_overlaps(bboxes1, bboxes2)
36
- tensor([[0.5000, 0.0000, 0.0000],
37
- [0.0000, 0.0000, 1.0000],
38
- [0.0000, 0.0000, 0.0000]])
39
-
40
- Example:
41
- >>> empty = torch.FloatTensor([])
42
- >>> nonempty = torch.FloatTensor([
43
- >>> [0, 0, 10, 9],
44
- >>> ])
45
- >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
46
- >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
47
- >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
48
- """
49
-
50
- mode_dict = {'iou': 0, 'iof': 1}
51
- assert mode in mode_dict.keys()
52
- mode_flag = mode_dict[mode]
53
- # Either the boxes are empty or the length of boxes' last dimension is 4
54
- assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
55
- assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
56
- assert offset == 1 or offset == 0
57
-
58
- rows = bboxes1.size(0)
59
- cols = bboxes2.size(0)
60
- if aligned:
61
- assert rows == cols
62
-
63
- if rows * cols == 0:
64
- return bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols)
65
-
66
- if aligned:
67
- ious = bboxes1.new_zeros(rows)
68
- else:
69
- ious = bboxes1.new_zeros((rows, cols))
70
- ext_module.bbox_overlaps(
71
- bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset)
72
- return ious
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/windows.py DELETED
@@ -1,195 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import ctypes
4
- import os
5
- import sys
6
- from functools import lru_cache
7
- from typing import Callable
8
-
9
- from .api import PlatformDirsABC
10
-
11
-
12
- class Windows(PlatformDirsABC):
13
- """`MSDN on where to store app data files
14
- <http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
15
- Makes use of the
16
- `appname <platformdirs.api.PlatformDirsABC.appname>`,
17
- `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
18
- `version <platformdirs.api.PlatformDirsABC.version>`,
19
- `roaming <platformdirs.api.PlatformDirsABC.roaming>`,
20
- `opinion <platformdirs.api.PlatformDirsABC.opinion>`,
21
- `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
22
- """
23
-
24
- @property
25
- def user_data_dir(self) -> str:
26
- """
27
- :return: data directory tied to the user, e.g.
28
- ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
29
- ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
30
- """
31
- const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
32
- path = os.path.normpath(get_win_folder(const))
33
- return self._append_parts(path)
34
-
35
- def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
36
- params = []
37
- if self.appname:
38
- if self.appauthor is not False:
39
- author = self.appauthor or self.appname
40
- params.append(author)
41
- params.append(self.appname)
42
- if opinion_value is not None and self.opinion:
43
- params.append(opinion_value)
44
- if self.version:
45
- params.append(self.version)
46
- path = os.path.join(path, *params)
47
- self._optionally_create_directory(path)
48
- return path
49
-
50
- @property
51
- def site_data_dir(self) -> str:
52
- """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
53
- path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
54
- return self._append_parts(path)
55
-
56
- @property
57
- def user_config_dir(self) -> str:
58
- """:return: config directory tied to the user, same as `user_data_dir`"""
59
- return self.user_data_dir
60
-
61
- @property
62
- def site_config_dir(self) -> str:
63
- """:return: config directory shared by the users, same as `site_data_dir`"""
64
- return self.site_data_dir
65
-
66
- @property
67
- def user_cache_dir(self) -> str:
68
- """
69
- :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
70
- ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
71
- """
72
- path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
73
- return self._append_parts(path, opinion_value="Cache")
74
-
75
- @property
76
- def site_cache_dir(self) -> str:
77
- """:return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``"""
78
- path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
79
- return self._append_parts(path, opinion_value="Cache")
80
-
81
- @property
82
- def user_state_dir(self) -> str:
83
- """:return: state directory tied to the user, same as `user_data_dir`"""
84
- return self.user_data_dir
85
-
86
- @property
87
- def user_log_dir(self) -> str:
88
- """
89
- :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
90
- """
91
- path = self.user_data_dir
92
- if self.opinion:
93
- path = os.path.join(path, "Logs")
94
- self._optionally_create_directory(path)
95
- return path
96
-
97
- @property
98
- def user_documents_dir(self) -> str:
99
- """
100
- :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
101
- """
102
- return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
103
-
104
- @property
105
- def user_runtime_dir(self) -> str:
106
- """
107
- :return: runtime directory tied to the user, e.g.
108
- ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
109
- """
110
- path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
111
- return self._append_parts(path)
112
-
113
-
114
- def get_win_folder_from_env_vars(csidl_name: str) -> str:
115
- """Get folder from environment variables."""
116
- if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
117
- return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
118
-
119
- env_var_name = {
120
- "CSIDL_APPDATA": "APPDATA",
121
- "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
122
- "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
123
- }.get(csidl_name)
124
- if env_var_name is None:
125
- raise ValueError(f"Unknown CSIDL name: {csidl_name}")
126
- result = os.environ.get(env_var_name)
127
- if result is None:
128
- raise ValueError(f"Unset environment variable: {env_var_name}")
129
- return result
130
-
131
-
132
- def get_win_folder_from_registry(csidl_name: str) -> str:
133
- """Get folder from the registry.
134
-
135
- This is a fallback technique at best. I'm not sure if using the
136
- registry for this guarantees us the correct answer for all CSIDL_*
137
- names.
138
- """
139
- shell_folder_name = {
140
- "CSIDL_APPDATA": "AppData",
141
- "CSIDL_COMMON_APPDATA": "Common AppData",
142
- "CSIDL_LOCAL_APPDATA": "Local AppData",
143
- "CSIDL_PERSONAL": "Personal",
144
- }.get(csidl_name)
145
- if shell_folder_name is None:
146
- raise ValueError(f"Unknown CSIDL name: {csidl_name}")
147
- if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
148
- raise NotImplementedError
149
- import winreg
150
-
151
- key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
152
- directory, _ = winreg.QueryValueEx(key, shell_folder_name)
153
- return str(directory)
154
-
155
-
156
- def get_win_folder_via_ctypes(csidl_name: str) -> str:
157
- """Get folder with ctypes."""
158
- csidl_const = {
159
- "CSIDL_APPDATA": 26,
160
- "CSIDL_COMMON_APPDATA": 35,
161
- "CSIDL_LOCAL_APPDATA": 28,
162
- "CSIDL_PERSONAL": 5,
163
- }.get(csidl_name)
164
- if csidl_const is None:
165
- raise ValueError(f"Unknown CSIDL name: {csidl_name}")
166
-
167
- buf = ctypes.create_unicode_buffer(1024)
168
- windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
169
- windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
170
-
171
- # Downgrade to short path name if it has highbit chars.
172
- if any(ord(c) > 255 for c in buf):
173
- buf2 = ctypes.create_unicode_buffer(1024)
174
- if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
175
- buf = buf2
176
-
177
- return buf.value
178
-
179
-
180
- def _pick_get_win_folder() -> Callable[[str], str]:
181
- if hasattr(ctypes, "windll"):
182
- return get_win_folder_via_ctypes
183
- try:
184
- import winreg # noqa: F401
185
- except ImportError:
186
- return get_win_folder_from_env_vars
187
- else:
188
- return get_win_folder_from_registry
189
-
190
-
191
- get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
192
-
193
- __all__ = [
194
- "Windows",
195
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_l.py DELETED
@@ -1,15 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding:utf-8 -*-
3
- # Copyright (c) Megvii, Inc. and its affiliates.
4
-
5
- import os
6
-
7
- from yolox.exp import Exp as MyExp
8
-
9
-
10
- class Exp(MyExp):
11
- def __init__(self):
12
- super(Exp, self).__init__()
13
- self.depth = 1.0
14
- self.width = 1.0
15
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/data/audiotext_dataset.py DELETED
@@ -1,91 +0,0 @@
1
- import json
2
- import random
3
- import torch
4
- import torchaudio
5
- from torch.utils.data import Dataset
6
-
7
-
8
- class AudioTextDataset(Dataset):
9
- """Can sample data from audio-text databases
10
- Params:
11
- sampling_rate: audio sampling rate
12
- max_clip_len: max length (seconds) of audio clip to be sampled
13
- """
14
- def __init__(
15
- self,
16
- datafiles=[''],
17
- sampling_rate=32000,
18
- max_clip_len=5,
19
- ):
20
- all_data_json = []
21
- for datafile in datafiles:
22
- with open(datafile, 'r') as fp:
23
- data_json = json.load(fp)['data']
24
- all_data_json.extend(data_json)
25
- self.all_data_json = all_data_json
26
-
27
- self.sampling_rate = sampling_rate
28
- self.max_length = max_clip_len * sampling_rate
29
-
30
- def __len__(self):
31
- return len(self.all_data_json)
32
-
33
- def _cut_or_randomcrop(self, waveform):
34
- # waveform: [1, samples]
35
- # random crop
36
- if waveform.size(1) > self.max_length:
37
- random_idx = random.randint(0, waveform.size(1)-self.max_length)
38
- waveform = waveform[:, random_idx:random_idx+self.max_length]
39
- else:
40
- temp_wav = torch.zeros(1, self.max_length)
41
- temp_wav[:, 0:waveform.size(1)] = waveform
42
- waveform = temp_wav
43
-
44
- assert waveform.size(1) == self.max_length, \
45
- f"number of audio samples is {waveform.size(1)}"
46
-
47
- return waveform
48
-
49
- def _read_audio(self, index):
50
- try:
51
- audio_path = self.all_data_json[index]['wav']
52
- audio_data, audio_rate = torchaudio.load(audio_path, channels_first=True)
53
- text = self.all_data_json[index]['caption']
54
-
55
- # drop short utterance
56
- if audio_data.size(1) < self.sampling_rate * 1:
57
- raise Exception(f'{audio_path} is too short, drop it ...')
58
-
59
- return text, audio_data, audio_rate
60
-
61
- except Exception as e:
62
- print(f'error: {e} occurs, when loading {audio_path}')
63
- random_index = random.randint(0, len(self.all_data_json)-1)
64
- return self._read_audio(index=random_index)
65
-
66
- def __getitem__(self, index):
67
- # create a audio tensor
68
- text, audio_data, audio_rate = self._read_audio(index)
69
- audio_len = audio_data.shape[1] / audio_rate
70
- # convert stero to single channel
71
- if audio_data.shape[0] > 1:
72
- # audio_data: [samples]
73
- audio_data = (audio_data[0] + audio_data[1]) / 2
74
- else:
75
- audio_data = audio_data.squeeze(0)
76
-
77
- # resample audio clip
78
- if audio_rate != self.sampling_rate:
79
- audio_data = torchaudio.functional.resample(audio_data, orig_freq=audio_rate, new_freq=self.sampling_rate)
80
-
81
- audio_data = audio_data.unsqueeze(0)
82
-
83
- audio_data = self._cut_or_randomcrop(audio_data)
84
-
85
- data_dict = {
86
- 'text': text,
87
- 'waveform': audio_data,
88
- 'modality': 'audio_text'
89
- }
90
-
91
- return data_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from . import transforms # isort:skip
3
-
4
- from .build import (
5
- build_batch_data_loader,
6
- build_detection_test_loader,
7
- build_detection_train_loader,
8
- get_detection_dataset_dicts,
9
- load_proposals_into_dataset,
10
- print_instances_class_histogram,
11
- )
12
- from .catalog import DatasetCatalog, MetadataCatalog, Metadata
13
- from .common import DatasetFromList, MapDataset, ToIterableDataset
14
- from .dataset_mapper import DatasetMapper
15
-
16
- # ensure the builtin datasets are registered
17
- from . import datasets, samplers # isort:skip
18
-
19
- __all__ = [k for k in globals().keys() if not k.startswith("_")]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/commons.py DELETED
@@ -1,166 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size * dilation - dilation) / 2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def kl_divergence(m_p, logs_p, m_q, logs_q):
25
- """KL(P||Q)"""
26
- kl = (logs_q - logs_p) - 0.5
27
- kl += (
28
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
- )
30
- return kl
31
-
32
-
33
- def rand_gumbel(shape):
34
- """Sample from the Gumbel distribution, protect from overflows."""
35
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
- return -torch.log(-torch.log(uniform_samples))
37
-
38
-
39
- def rand_gumbel_like(x):
40
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
- return g
42
-
43
-
44
- def slice_segments(x, ids_str, segment_size=4):
45
- ret = torch.zeros_like(x[:, :, :segment_size])
46
- for i in range(x.size(0)):
47
- idx_str = ids_str[i]
48
- idx_end = idx_str + segment_size
49
- ret[i] = x[i, :, idx_str:idx_end]
50
- return ret
51
-
52
-
53
- def slice_segments2(x, ids_str, segment_size=4):
54
- ret = torch.zeros_like(x[:, :segment_size])
55
- for i in range(x.size(0)):
56
- idx_str = ids_str[i]
57
- idx_end = idx_str + segment_size
58
- ret[i] = x[i, idx_str:idx_end]
59
- return ret
60
-
61
-
62
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
- b, d, t = x.size()
64
- if x_lengths is None:
65
- x_lengths = t
66
- ids_str_max = x_lengths - segment_size + 1
67
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
- ret = slice_segments(x, ids_str, segment_size)
69
- return ret, ids_str
70
-
71
-
72
- def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
- position = torch.arange(length, dtype=torch.float)
74
- num_timescales = channels // 2
75
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
- num_timescales - 1
77
- )
78
- inv_timescales = min_timescale * torch.exp(
79
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
- )
81
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
- signal = F.pad(signal, [0, 0, 0, channels % 2])
84
- signal = signal.view(1, channels, length)
85
- return signal
86
-
87
-
88
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
- b, channels, length = x.size()
90
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
- return x + signal.to(dtype=x.dtype, device=x.device)
92
-
93
-
94
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
-
99
-
100
- def subsequent_mask(length):
101
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
- return mask
103
-
104
-
105
- @torch.jit.script
106
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
- n_channels_int = n_channels[0]
108
- in_act = input_a + input_b
109
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
- acts = t_act * s_act
112
- return acts
113
-
114
-
115
- def convert_pad_shape(pad_shape):
116
- l = pad_shape[::-1]
117
- pad_shape = [item for sublist in l for item in sublist]
118
- return pad_shape
119
-
120
-
121
- def shift_1d(x):
122
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
- return x
124
-
125
-
126
- def sequence_mask(length, max_length=None):
127
- if max_length is None:
128
- max_length = length.max()
129
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
- return x.unsqueeze(0) < length.unsqueeze(1)
131
-
132
-
133
- def generate_path(duration, mask):
134
- """
135
- duration: [b, 1, t_x]
136
- mask: [b, 1, t_y, t_x]
137
- """
138
- device = duration.device
139
-
140
- b, _, t_y, t_x = mask.shape
141
- cum_duration = torch.cumsum(duration, -1)
142
-
143
- cum_duration_flat = cum_duration.view(b * t_x)
144
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
- path = path.view(b, t_x, t_y)
146
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
- path = path.unsqueeze(1).transpose(2, 3) * mask
148
- return path
149
-
150
-
151
- def clip_grad_value_(parameters, clip_value, norm_type=2):
152
- if isinstance(parameters, torch.Tensor):
153
- parameters = [parameters]
154
- parameters = list(filter(lambda p: p.grad is not None, parameters))
155
- norm_type = float(norm_type)
156
- if clip_value is not None:
157
- clip_value = float(clip_value)
158
-
159
- total_norm = 0
160
- for p in parameters:
161
- param_norm = p.grad.data.norm(norm_type)
162
- total_norm += param_norm.item() ** norm_type
163
- if clip_value is not None:
164
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
- total_norm = total_norm ** (1.0 / norm_type)
166
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/svelte.config.js DELETED
@@ -1,26 +0,0 @@
1
- import adapter from "@sveltejs/adapter-node";
2
- import { vitePreprocess } from "@sveltejs/kit/vite";
3
- import dotenv from "dotenv";
4
- import pkg from "./package.json" assert { type: "json" };
5
-
6
- dotenv.config({ path: "./.env.local" });
7
- dotenv.config({ path: "./.env" });
8
-
9
- process.env.PUBLIC_VERSION = pkg.version.replace(/\.0\b/g, "");
10
-
11
- /** @type {import('@sveltejs/kit').Config} */
12
- const config = {
13
- // Consult https://kit.svelte.dev/docs/integrations#preprocessors
14
- // for more information about preprocessors
15
- preprocess: vitePreprocess(),
16
-
17
- kit: {
18
- adapter: adapter(),
19
-
20
- paths: {
21
- base: process.env.APP_BASE || "",
22
- },
23
- },
24
- };
25
-
26
- export default config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/actions/snapScrollToBottom.ts DELETED
@@ -1,54 +0,0 @@
1
- import { navigating } from "$app/stores";
2
- import { tick } from "svelte";
3
- import { get } from "svelte/store";
4
-
5
- const detachedOffset = 10;
6
-
7
- /**
8
- * @param node element to snap scroll to bottom
9
- * @param dependency pass in a dependency to update scroll on changes.
10
- */
11
- export const snapScrollToBottom = (node: HTMLElement, dependency: any) => {
12
- let prevScrollValue = node.scrollTop;
13
- let isDetached = false;
14
-
15
- const handleScroll = () => {
16
- // if user scrolled up, we detach
17
- if (node.scrollTop < prevScrollValue) {
18
- isDetached = true;
19
- }
20
-
21
- // if user scrolled back to within 10px of bottom, we reattach
22
- if (node.scrollTop - (node.scrollHeight - node.clientHeight) >= -detachedOffset) {
23
- isDetached = false;
24
- }
25
-
26
- prevScrollValue = node.scrollTop;
27
- };
28
-
29
- const updateScroll = async (_options: { force?: boolean } = {}) => {
30
- const defaultOptions = { force: false };
31
- const options = { ...defaultOptions, ..._options };
32
- const { force } = options;
33
-
34
- if (!force && isDetached && !get(navigating)) return;
35
-
36
- // wait for next tick to ensure that the DOM is updated
37
- await tick();
38
-
39
- node.scrollTo({ top: node.scrollHeight });
40
- };
41
-
42
- node.addEventListener("scroll", handleScroll);
43
-
44
- if (dependency) {
45
- updateScroll({ force: true });
46
- }
47
-
48
- return {
49
- update: updateScroll,
50
- destroy: () => {
51
- node.removeEventListener("scroll", handleScroll);
52
- },
53
- };
54
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distro/distro.py DELETED
@@ -1,1399 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright 2015,2016,2017 Nir Cohen
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """
17
- The ``distro`` package (``distro`` stands for Linux Distribution) provides
18
- information about the Linux distribution it runs on, such as a reliable
19
- machine-readable distro ID, or version information.
20
-
21
- It is the recommended replacement for Python's original
22
- :py:func:`platform.linux_distribution` function, but it provides much more
23
- functionality. An alternative implementation became necessary because Python
24
- 3.5 deprecated this function, and Python 3.8 removed it altogether. Its
25
- predecessor function :py:func:`platform.dist` was already deprecated since
26
- Python 2.6 and removed in Python 3.8. Still, there are many cases in which
27
- access to OS distribution information is needed. See `Python issue 1322
28
- <https://bugs.python.org/issue1322>`_ for more information.
29
- """
30
-
31
- import argparse
32
- import json
33
- import logging
34
- import os
35
- import re
36
- import shlex
37
- import subprocess
38
- import sys
39
- import warnings
40
- from typing import (
41
- Any,
42
- Callable,
43
- Dict,
44
- Iterable,
45
- Optional,
46
- Sequence,
47
- TextIO,
48
- Tuple,
49
- Type,
50
- )
51
-
52
- try:
53
- from typing import TypedDict
54
- except ImportError:
55
- # Python 3.7
56
- TypedDict = dict
57
-
58
- __version__ = "1.8.0"
59
-
60
-
61
- class VersionDict(TypedDict):
62
- major: str
63
- minor: str
64
- build_number: str
65
-
66
-
67
- class InfoDict(TypedDict):
68
- id: str
69
- version: str
70
- version_parts: VersionDict
71
- like: str
72
- codename: str
73
-
74
-
75
- _UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
76
- _UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")
77
- _OS_RELEASE_BASENAME = "os-release"
78
-
79
- #: Translation table for normalizing the "ID" attribute defined in os-release
80
- #: files, for use by the :func:`distro.id` method.
81
- #:
82
- #: * Key: Value as defined in the os-release file, translated to lower case,
83
- #: with blanks translated to underscores.
84
- #:
85
- #: * Value: Normalized value.
86
- NORMALIZED_OS_ID = {
87
- "ol": "oracle", # Oracle Linux
88
- "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap
89
- }
90
-
91
- #: Translation table for normalizing the "Distributor ID" attribute returned by
92
- #: the lsb_release command, for use by the :func:`distro.id` method.
93
- #:
94
- #: * Key: Value as returned by the lsb_release command, translated to lower
95
- #: case, with blanks translated to underscores.
96
- #:
97
- #: * Value: Normalized value.
98
- NORMALIZED_LSB_ID = {
99
- "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4
100
- "enterpriseenterpriseserver": "oracle", # Oracle Linux 5
101
- "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation
102
- "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server
103
- "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode
104
- }
105
-
106
- #: Translation table for normalizing the distro ID derived from the file name
107
- #: of distro release files, for use by the :func:`distro.id` method.
108
- #:
109
- #: * Key: Value as derived from the file name of a distro release file,
110
- #: translated to lower case, with blanks translated to underscores.
111
- #:
112
- #: * Value: Normalized value.
113
- NORMALIZED_DISTRO_ID = {
114
- "redhat": "rhel", # RHEL 6.x, 7.x
115
- }
116
-
117
- # Pattern for content of distro release file (reversed)
118
- _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
119
- r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"
120
- )
121
-
122
- # Pattern for base file name of distro release file
123
- _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
124
-
125
- # Base file names to be looked up for if _UNIXCONFDIR is not readable.
126
- _DISTRO_RELEASE_BASENAMES = [
127
- "SuSE-release",
128
- "arch-release",
129
- "base-release",
130
- "centos-release",
131
- "fedora-release",
132
- "gentoo-release",
133
- "mageia-release",
134
- "mandrake-release",
135
- "mandriva-release",
136
- "mandrivalinux-release",
137
- "manjaro-release",
138
- "oracle-release",
139
- "redhat-release",
140
- "rocky-release",
141
- "sl-release",
142
- "slackware-version",
143
- ]
144
-
145
- # Base file names to be ignored when searching for distro release file
146
- _DISTRO_RELEASE_IGNORE_BASENAMES = (
147
- "debian_version",
148
- "lsb-release",
149
- "oem-release",
150
- _OS_RELEASE_BASENAME,
151
- "system-release",
152
- "plesk-release",
153
- "iredmail-release",
154
- )
155
-
156
-
157
- def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]:
158
- """
159
- .. deprecated:: 1.6.0
160
-
161
- :func:`distro.linux_distribution()` is deprecated. It should only be
162
- used as a compatibility shim with Python's
163
- :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,
164
- :func:`distro.version` and :func:`distro.name` instead.
165
-
166
- Return information about the current OS distribution as a tuple
167
- ``(id_name, version, codename)`` with items as follows:
168
-
169
- * ``id_name``: If *full_distribution_name* is false, the result of
170
- :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
171
-
172
- * ``version``: The result of :func:`distro.version`.
173
-
174
- * ``codename``: The extra item (usually in parentheses) after the
175
- os-release version number, or the result of :func:`distro.codename`.
176
-
177
- The interface of this function is compatible with the original
178
- :py:func:`platform.linux_distribution` function, supporting a subset of
179
- its parameters.
180
-
181
- The data it returns may not exactly be the same, because it uses more data
182
- sources than the original function, and that may lead to different data if
183
- the OS distribution is not consistent across multiple data sources it
184
- provides (there are indeed such distributions ...).
185
-
186
- Another reason for differences is the fact that the :func:`distro.id`
187
- method normalizes the distro ID string to a reliable machine-readable value
188
- for a number of popular OS distributions.
189
- """
190
- warnings.warn(
191
- "distro.linux_distribution() is deprecated. It should only be used as a "
192
- "compatibility shim with Python's platform.linux_distribution(). Please use "
193
- "distro.id(), distro.version() and distro.name() instead.",
194
- DeprecationWarning,
195
- stacklevel=2,
196
- )
197
- return _distro.linux_distribution(full_distribution_name)
198
-
199
-
200
- def id() -> str:
201
- """
202
- Return the distro ID of the current distribution, as a
203
- machine-readable string.
204
-
205
- For a number of OS distributions, the returned distro ID value is
206
- *reliable*, in the sense that it is documented and that it does not change
207
- across releases of the distribution.
208
-
209
- This package maintains the following reliable distro ID values:
210
-
211
- ============== =========================================
212
- Distro ID Distribution
213
- ============== =========================================
214
- "ubuntu" Ubuntu
215
- "debian" Debian
216
- "rhel" RedHat Enterprise Linux
217
- "centos" CentOS
218
- "fedora" Fedora
219
- "sles" SUSE Linux Enterprise Server
220
- "opensuse" openSUSE
221
- "amzn" Amazon Linux
222
- "arch" Arch Linux
223
- "buildroot" Buildroot
224
- "cloudlinux" CloudLinux OS
225
- "exherbo" Exherbo Linux
226
- "gentoo" GenToo Linux
227
- "ibm_powerkvm" IBM PowerKVM
228
- "kvmibm" KVM for IBM z Systems
229
- "linuxmint" Linux Mint
230
- "mageia" Mageia
231
- "mandriva" Mandriva Linux
232
- "parallels" Parallels
233
- "pidora" Pidora
234
- "raspbian" Raspbian
235
- "oracle" Oracle Linux (and Oracle Enterprise Linux)
236
- "scientific" Scientific Linux
237
- "slackware" Slackware
238
- "xenserver" XenServer
239
- "openbsd" OpenBSD
240
- "netbsd" NetBSD
241
- "freebsd" FreeBSD
242
- "midnightbsd" MidnightBSD
243
- "rocky" Rocky Linux
244
- "aix" AIX
245
- "guix" Guix System
246
- ============== =========================================
247
-
248
- If you have a need to get distros for reliable IDs added into this set,
249
- or if you find that the :func:`distro.id` function returns a different
250
- distro ID for one of the listed distros, please create an issue in the
251
- `distro issue tracker`_.
252
-
253
- **Lookup hierarchy and transformations:**
254
-
255
- First, the ID is obtained from the following sources, in the specified
256
- order. The first available and non-empty value is used:
257
-
258
- * the value of the "ID" attribute of the os-release file,
259
-
260
- * the value of the "Distributor ID" attribute returned by the lsb_release
261
- command,
262
-
263
- * the first part of the file name of the distro release file,
264
-
265
- The so determined ID value then passes the following transformations,
266
- before it is returned by this method:
267
-
268
- * it is translated to lower case,
269
-
270
- * blanks (which should not be there anyway) are translated to underscores,
271
-
272
- * a normalization of the ID is performed, based upon
273
- `normalization tables`_. The purpose of this normalization is to ensure
274
- that the ID is as reliable as possible, even across incompatible changes
275
- in the OS distributions. A common reason for an incompatible change is
276
- the addition of an os-release file, or the addition of the lsb_release
277
- command, with ID values that differ from what was previously determined
278
- from the distro release file name.
279
- """
280
- return _distro.id()
281
-
282
-
283
- def name(pretty: bool = False) -> str:
284
- """
285
- Return the name of the current OS distribution, as a human-readable
286
- string.
287
-
288
- If *pretty* is false, the name is returned without version or codename.
289
- (e.g. "CentOS Linux")
290
-
291
- If *pretty* is true, the version and codename are appended.
292
- (e.g. "CentOS Linux 7.1.1503 (Core)")
293
-
294
- **Lookup hierarchy:**
295
-
296
- The name is obtained from the following sources, in the specified order.
297
- The first available and non-empty value is used:
298
-
299
- * If *pretty* is false:
300
-
301
- - the value of the "NAME" attribute of the os-release file,
302
-
303
- - the value of the "Distributor ID" attribute returned by the lsb_release
304
- command,
305
-
306
- - the value of the "<name>" field of the distro release file.
307
-
308
- * If *pretty* is true:
309
-
310
- - the value of the "PRETTY_NAME" attribute of the os-release file,
311
-
312
- - the value of the "Description" attribute returned by the lsb_release
313
- command,
314
-
315
- - the value of the "<name>" field of the distro release file, appended
316
- with the value of the pretty version ("<version_id>" and "<codename>"
317
- fields) of the distro release file, if available.
318
- """
319
- return _distro.name(pretty)
320
-
321
-
322
- def version(pretty: bool = False, best: bool = False) -> str:
323
- """
324
- Return the version of the current OS distribution, as a human-readable
325
- string.
326
-
327
- If *pretty* is false, the version is returned without codename (e.g.
328
- "7.0").
329
-
330
- If *pretty* is true, the codename in parenthesis is appended, if the
331
- codename is non-empty (e.g. "7.0 (Maipo)").
332
-
333
- Some distributions provide version numbers with different precisions in
334
- the different sources of distribution information. Examining the different
335
- sources in a fixed priority order does not always yield the most precise
336
- version (e.g. for Debian 8.2, or CentOS 7.1).
337
-
338
- Some other distributions may not provide this kind of information. In these
339
- cases, an empty string would be returned. This behavior can be observed
340
- with rolling releases distributions (e.g. Arch Linux).
341
-
342
- The *best* parameter can be used to control the approach for the returned
343
- version:
344
-
345
- If *best* is false, the first non-empty version number in priority order of
346
- the examined sources is returned.
347
-
348
- If *best* is true, the most precise version number out of all examined
349
- sources is returned.
350
-
351
- **Lookup hierarchy:**
352
-
353
- In all cases, the version number is obtained from the following sources.
354
- If *best* is false, this order represents the priority order:
355
-
356
- * the value of the "VERSION_ID" attribute of the os-release file,
357
- * the value of the "Release" attribute returned by the lsb_release
358
- command,
359
- * the version number parsed from the "<version_id>" field of the first line
360
- of the distro release file,
361
- * the version number parsed from the "PRETTY_NAME" attribute of the
362
- os-release file, if it follows the format of the distro release files.
363
- * the version number parsed from the "Description" attribute returned by
364
- the lsb_release command, if it follows the format of the distro release
365
- files.
366
- """
367
- return _distro.version(pretty, best)
368
-
369
-
370
- def version_parts(best: bool = False) -> Tuple[str, str, str]:
371
- """
372
- Return the version of the current OS distribution as a tuple
373
- ``(major, minor, build_number)`` with items as follows:
374
-
375
- * ``major``: The result of :func:`distro.major_version`.
376
-
377
- * ``minor``: The result of :func:`distro.minor_version`.
378
-
379
- * ``build_number``: The result of :func:`distro.build_number`.
380
-
381
- For a description of the *best* parameter, see the :func:`distro.version`
382
- method.
383
- """
384
- return _distro.version_parts(best)
385
-
386
-
387
- def major_version(best: bool = False) -> str:
388
- """
389
- Return the major version of the current OS distribution, as a string,
390
- if provided.
391
- Otherwise, the empty string is returned. The major version is the first
392
- part of the dot-separated version string.
393
-
394
- For a description of the *best* parameter, see the :func:`distro.version`
395
- method.
396
- """
397
- return _distro.major_version(best)
398
-
399
-
400
- def minor_version(best: bool = False) -> str:
401
- """
402
- Return the minor version of the current OS distribution, as a string,
403
- if provided.
404
- Otherwise, the empty string is returned. The minor version is the second
405
- part of the dot-separated version string.
406
-
407
- For a description of the *best* parameter, see the :func:`distro.version`
408
- method.
409
- """
410
- return _distro.minor_version(best)
411
-
412
-
413
- def build_number(best: bool = False) -> str:
414
- """
415
- Return the build number of the current OS distribution, as a string,
416
- if provided.
417
- Otherwise, the empty string is returned. The build number is the third part
418
- of the dot-separated version string.
419
-
420
- For a description of the *best* parameter, see the :func:`distro.version`
421
- method.
422
- """
423
- return _distro.build_number(best)
424
-
425
-
426
- def like() -> str:
427
- """
428
- Return a space-separated list of distro IDs of distributions that are
429
- closely related to the current OS distribution in regards to packaging
430
- and programming interfaces, for example distributions the current
431
- distribution is a derivative from.
432
-
433
- **Lookup hierarchy:**
434
-
435
- This information item is only provided by the os-release file.
436
- For details, see the description of the "ID_LIKE" attribute in the
437
- `os-release man page
438
- <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
439
- """
440
- return _distro.like()
441
-
442
-
443
- def codename() -> str:
444
- """
445
- Return the codename for the release of the current OS distribution,
446
- as a string.
447
-
448
- If the distribution does not have a codename, an empty string is returned.
449
-
450
- Note that the returned codename is not always really a codename. For
451
- example, openSUSE returns "x86_64". This function does not handle such
452
- cases in any special way and just returns the string it finds, if any.
453
-
454
- **Lookup hierarchy:**
455
-
456
- * the codename within the "VERSION" attribute of the os-release file, if
457
- provided,
458
-
459
- * the value of the "Codename" attribute returned by the lsb_release
460
- command,
461
-
462
- * the value of the "<codename>" field of the distro release file.
463
- """
464
- return _distro.codename()
465
-
466
-
467
- def info(pretty: bool = False, best: bool = False) -> InfoDict:
468
- """
469
- Return certain machine-readable information items about the current OS
470
- distribution in a dictionary, as shown in the following example:
471
-
472
- .. sourcecode:: python
473
-
474
- {
475
- 'id': 'rhel',
476
- 'version': '7.0',
477
- 'version_parts': {
478
- 'major': '7',
479
- 'minor': '0',
480
- 'build_number': ''
481
- },
482
- 'like': 'fedora',
483
- 'codename': 'Maipo'
484
- }
485
-
486
- The dictionary structure and keys are always the same, regardless of which
487
- information items are available in the underlying data sources. The values
488
- for the various keys are as follows:
489
-
490
- * ``id``: The result of :func:`distro.id`.
491
-
492
- * ``version``: The result of :func:`distro.version`.
493
-
494
- * ``version_parts -> major``: The result of :func:`distro.major_version`.
495
-
496
- * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
497
-
498
- * ``version_parts -> build_number``: The result of
499
- :func:`distro.build_number`.
500
-
501
- * ``like``: The result of :func:`distro.like`.
502
-
503
- * ``codename``: The result of :func:`distro.codename`.
504
-
505
- For a description of the *pretty* and *best* parameters, see the
506
- :func:`distro.version` method.
507
- """
508
- return _distro.info(pretty, best)
509
-
510
-
511
- def os_release_info() -> Dict[str, str]:
512
- """
513
- Return a dictionary containing key-value pairs for the information items
514
- from the os-release file data source of the current OS distribution.
515
-
516
- See `os-release file`_ for details about these information items.
517
- """
518
- return _distro.os_release_info()
519
-
520
-
521
- def lsb_release_info() -> Dict[str, str]:
522
- """
523
- Return a dictionary containing key-value pairs for the information items
524
- from the lsb_release command data source of the current OS distribution.
525
-
526
- See `lsb_release command output`_ for details about these information
527
- items.
528
- """
529
- return _distro.lsb_release_info()
530
-
531
-
532
- def distro_release_info() -> Dict[str, str]:
533
- """
534
- Return a dictionary containing key-value pairs for the information items
535
- from the distro release file data source of the current OS distribution.
536
-
537
- See `distro release file`_ for details about these information items.
538
- """
539
- return _distro.distro_release_info()
540
-
541
-
542
- def uname_info() -> Dict[str, str]:
543
- """
544
- Return a dictionary containing key-value pairs for the information items
545
- from the distro release file data source of the current OS distribution.
546
- """
547
- return _distro.uname_info()
548
-
549
-
550
- def os_release_attr(attribute: str) -> str:
551
- """
552
- Return a single named information item from the os-release file data source
553
- of the current OS distribution.
554
-
555
- Parameters:
556
-
557
- * ``attribute`` (string): Key of the information item.
558
-
559
- Returns:
560
-
561
- * (string): Value of the information item, if the item exists.
562
- The empty string, if the item does not exist.
563
-
564
- See `os-release file`_ for details about these information items.
565
- """
566
- return _distro.os_release_attr(attribute)
567
-
568
-
569
- def lsb_release_attr(attribute: str) -> str:
570
- """
571
- Return a single named information item from the lsb_release command output
572
- data source of the current OS distribution.
573
-
574
- Parameters:
575
-
576
- * ``attribute`` (string): Key of the information item.
577
-
578
- Returns:
579
-
580
- * (string): Value of the information item, if the item exists.
581
- The empty string, if the item does not exist.
582
-
583
- See `lsb_release command output`_ for details about these information
584
- items.
585
- """
586
- return _distro.lsb_release_attr(attribute)
587
-
588
-
589
- def distro_release_attr(attribute: str) -> str:
590
- """
591
- Return a single named information item from the distro release file
592
- data source of the current OS distribution.
593
-
594
- Parameters:
595
-
596
- * ``attribute`` (string): Key of the information item.
597
-
598
- Returns:
599
-
600
- * (string): Value of the information item, if the item exists.
601
- The empty string, if the item does not exist.
602
-
603
- See `distro release file`_ for details about these information items.
604
- """
605
- return _distro.distro_release_attr(attribute)
606
-
607
-
608
- def uname_attr(attribute: str) -> str:
609
- """
610
- Return a single named information item from the distro release file
611
- data source of the current OS distribution.
612
-
613
- Parameters:
614
-
615
- * ``attribute`` (string): Key of the information item.
616
-
617
- Returns:
618
-
619
- * (string): Value of the information item, if the item exists.
620
- The empty string, if the item does not exist.
621
- """
622
- return _distro.uname_attr(attribute)
623
-
624
-
625
- try:
626
- from functools import cached_property
627
- except ImportError:
628
- # Python < 3.8
629
- class cached_property: # type: ignore
630
- """A version of @property which caches the value. On access, it calls the
631
- underlying function and sets the value in `__dict__` so future accesses
632
- will not re-call the property.
633
- """
634
-
635
- def __init__(self, f: Callable[[Any], Any]) -> None:
636
- self._fname = f.__name__
637
- self._f = f
638
-
639
- def __get__(self, obj: Any, owner: Type[Any]) -> Any:
640
- assert obj is not None, f"call {self._fname} on an instance"
641
- ret = obj.__dict__[self._fname] = self._f(obj)
642
- return ret
643
-
644
-
645
- class LinuxDistribution:
646
- """
647
- Provides information about a OS distribution.
648
-
649
- This package creates a private module-global instance of this class with
650
- default initialization arguments, that is used by the
651
- `consolidated accessor functions`_ and `single source accessor functions`_.
652
- By using default initialization arguments, that module-global instance
653
- returns data about the current OS distribution (i.e. the distro this
654
- package runs on).
655
-
656
- Normally, it is not necessary to create additional instances of this class.
657
- However, in situations where control is needed over the exact data sources
658
- that are used, instances of this class can be created with a specific
659
- distro release file, or a specific os-release file, or without invoking the
660
- lsb_release command.
661
- """
662
-
663
- def __init__(
664
- self,
665
- include_lsb: Optional[bool] = None,
666
- os_release_file: str = "",
667
- distro_release_file: str = "",
668
- include_uname: Optional[bool] = None,
669
- root_dir: Optional[str] = None,
670
- include_oslevel: Optional[bool] = None,
671
- ) -> None:
672
- """
673
- The initialization method of this class gathers information from the
674
- available data sources, and stores that in private instance attributes.
675
- Subsequent access to the information items uses these private instance
676
- attributes, so that the data sources are read only once.
677
-
678
- Parameters:
679
-
680
- * ``include_lsb`` (bool): Controls whether the
681
- `lsb_release command output`_ is included as a data source.
682
-
683
- If the lsb_release command is not available in the program execution
684
- path, the data source for the lsb_release command will be empty.
685
-
686
- * ``os_release_file`` (string): The path name of the
687
- `os-release file`_ that is to be used as a data source.
688
-
689
- An empty string (the default) will cause the default path name to
690
- be used (see `os-release file`_ for details).
691
-
692
- If the specified or defaulted os-release file does not exist, the
693
- data source for the os-release file will be empty.
694
-
695
- * ``distro_release_file`` (string): The path name of the
696
- `distro release file`_ that is to be used as a data source.
697
-
698
- An empty string (the default) will cause a default search algorithm
699
- to be used (see `distro release file`_ for details).
700
-
701
- If the specified distro release file does not exist, or if no default
702
- distro release file can be found, the data source for the distro
703
- release file will be empty.
704
-
705
- * ``include_uname`` (bool): Controls whether uname command output is
706
- included as a data source. If the uname command is not available in
707
- the program execution path the data source for the uname command will
708
- be empty.
709
-
710
- * ``root_dir`` (string): The absolute path to the root directory to use
711
- to find distro-related information files. Note that ``include_*``
712
- parameters must not be enabled in combination with ``root_dir``.
713
-
714
- * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command
715
- output is included as a data source. If the oslevel command is not
716
- available in the program execution path the data source will be
717
- empty.
718
-
719
- Public instance attributes:
720
-
721
- * ``os_release_file`` (string): The path name of the
722
- `os-release file`_ that is actually used as a data source. The
723
- empty string if no distro release file is used as a data source.
724
-
725
- * ``distro_release_file`` (string): The path name of the
726
- `distro release file`_ that is actually used as a data source. The
727
- empty string if no distro release file is used as a data source.
728
-
729
- * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
730
- This controls whether the lsb information will be loaded.
731
-
732
- * ``include_uname`` (bool): The result of the ``include_uname``
733
- parameter. This controls whether the uname information will
734
- be loaded.
735
-
736
- * ``include_oslevel`` (bool): The result of the ``include_oslevel``
737
- parameter. This controls whether (AIX) oslevel information will be
738
- loaded.
739
-
740
- * ``root_dir`` (string): The result of the ``root_dir`` parameter.
741
- The absolute path to the root directory to use to find distro-related
742
- information files.
743
-
744
- Raises:
745
-
746
- * :py:exc:`ValueError`: Initialization parameters combination is not
747
- supported.
748
-
749
- * :py:exc:`OSError`: Some I/O issue with an os-release file or distro
750
- release file.
751
-
752
- * :py:exc:`UnicodeError`: A data source has unexpected characters or
753
- uses an unexpected encoding.
754
- """
755
- self.root_dir = root_dir
756
- self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR
757
- self.usr_lib_dir = (
758
- os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR
759
- )
760
-
761
- if os_release_file:
762
- self.os_release_file = os_release_file
763
- else:
764
- etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)
765
- usr_lib_os_release_file = os.path.join(
766
- self.usr_lib_dir, _OS_RELEASE_BASENAME
767
- )
768
-
769
- # NOTE: The idea is to respect order **and** have it set
770
- # at all times for API backwards compatibility.
771
- if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(
772
- usr_lib_os_release_file
773
- ):
774
- self.os_release_file = etc_dir_os_release_file
775
- else:
776
- self.os_release_file = usr_lib_os_release_file
777
-
778
- self.distro_release_file = distro_release_file or "" # updated later
779
-
780
- is_root_dir_defined = root_dir is not None
781
- if is_root_dir_defined and (include_lsb or include_uname or include_oslevel):
782
- raise ValueError(
783
- "Including subprocess data sources from specific root_dir is disallowed"
784
- " to prevent false information"
785
- )
786
- self.include_lsb = (
787
- include_lsb if include_lsb is not None else not is_root_dir_defined
788
- )
789
- self.include_uname = (
790
- include_uname if include_uname is not None else not is_root_dir_defined
791
- )
792
- self.include_oslevel = (
793
- include_oslevel if include_oslevel is not None else not is_root_dir_defined
794
- )
795
-
796
- def __repr__(self) -> str:
797
- """Return repr of all info"""
798
- return (
799
- "LinuxDistribution("
800
- "os_release_file={self.os_release_file!r}, "
801
- "distro_release_file={self.distro_release_file!r}, "
802
- "include_lsb={self.include_lsb!r}, "
803
- "include_uname={self.include_uname!r}, "
804
- "include_oslevel={self.include_oslevel!r}, "
805
- "root_dir={self.root_dir!r}, "
806
- "_os_release_info={self._os_release_info!r}, "
807
- "_lsb_release_info={self._lsb_release_info!r}, "
808
- "_distro_release_info={self._distro_release_info!r}, "
809
- "_uname_info={self._uname_info!r}, "
810
- "_oslevel_info={self._oslevel_info!r})".format(self=self)
811
- )
812
-
813
- def linux_distribution(
814
- self, full_distribution_name: bool = True
815
- ) -> Tuple[str, str, str]:
816
- """
817
- Return information about the OS distribution that is compatible
818
- with Python's :func:`platform.linux_distribution`, supporting a subset
819
- of its parameters.
820
-
821
- For details, see :func:`distro.linux_distribution`.
822
- """
823
- return (
824
- self.name() if full_distribution_name else self.id(),
825
- self.version(),
826
- self._os_release_info.get("release_codename") or self.codename(),
827
- )
828
-
829
- def id(self) -> str:
830
- """Return the distro ID of the OS distribution, as a string.
831
-
832
- For details, see :func:`distro.id`.
833
- """
834
-
835
- def normalize(distro_id: str, table: Dict[str, str]) -> str:
836
- distro_id = distro_id.lower().replace(" ", "_")
837
- return table.get(distro_id, distro_id)
838
-
839
- distro_id = self.os_release_attr("id")
840
- if distro_id:
841
- return normalize(distro_id, NORMALIZED_OS_ID)
842
-
843
- distro_id = self.lsb_release_attr("distributor_id")
844
- if distro_id:
845
- return normalize(distro_id, NORMALIZED_LSB_ID)
846
-
847
- distro_id = self.distro_release_attr("id")
848
- if distro_id:
849
- return normalize(distro_id, NORMALIZED_DISTRO_ID)
850
-
851
- distro_id = self.uname_attr("id")
852
- if distro_id:
853
- return normalize(distro_id, NORMALIZED_DISTRO_ID)
854
-
855
- return ""
856
-
857
- def name(self, pretty: bool = False) -> str:
858
- """
859
- Return the name of the OS distribution, as a string.
860
-
861
- For details, see :func:`distro.name`.
862
- """
863
- name = (
864
- self.os_release_attr("name")
865
- or self.lsb_release_attr("distributor_id")
866
- or self.distro_release_attr("name")
867
- or self.uname_attr("name")
868
- )
869
- if pretty:
870
- name = self.os_release_attr("pretty_name") or self.lsb_release_attr(
871
- "description"
872
- )
873
- if not name:
874
- name = self.distro_release_attr("name") or self.uname_attr("name")
875
- version = self.version(pretty=True)
876
- if version:
877
- name = f"{name} {version}"
878
- return name or ""
879
-
880
- def version(self, pretty: bool = False, best: bool = False) -> str:
881
- """
882
- Return the version of the OS distribution, as a string.
883
-
884
- For details, see :func:`distro.version`.
885
- """
886
- versions = [
887
- self.os_release_attr("version_id"),
888
- self.lsb_release_attr("release"),
889
- self.distro_release_attr("version_id"),
890
- self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(
891
- "version_id", ""
892
- ),
893
- self._parse_distro_release_content(
894
- self.lsb_release_attr("description")
895
- ).get("version_id", ""),
896
- self.uname_attr("release"),
897
- ]
898
- if self.uname_attr("id").startswith("aix"):
899
- # On AIX platforms, prefer oslevel command output.
900
- versions.insert(0, self.oslevel_info())
901
- elif self.id() == "debian" or "debian" in self.like().split():
902
- # On Debian-like, add debian_version file content to candidates list.
903
- versions.append(self._debian_version)
904
- version = ""
905
- if best:
906
- # This algorithm uses the last version in priority order that has
907
- # the best precision. If the versions are not in conflict, that
908
- # does not matter; otherwise, using the last one instead of the
909
- # first one might be considered a surprise.
910
- for v in versions:
911
- if v.count(".") > version.count(".") or version == "":
912
- version = v
913
- else:
914
- for v in versions:
915
- if v != "":
916
- version = v
917
- break
918
- if pretty and version and self.codename():
919
- version = f"{version} ({self.codename()})"
920
- return version
921
-
922
- def version_parts(self, best: bool = False) -> Tuple[str, str, str]:
923
- """
924
- Return the version of the OS distribution, as a tuple of version
925
- numbers.
926
-
927
- For details, see :func:`distro.version_parts`.
928
- """
929
- version_str = self.version(best=best)
930
- if version_str:
931
- version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")
932
- matches = version_regex.match(version_str)
933
- if matches:
934
- major, minor, build_number = matches.groups()
935
- return major, minor or "", build_number or ""
936
- return "", "", ""
937
-
938
- def major_version(self, best: bool = False) -> str:
939
- """
940
- Return the major version number of the current distribution.
941
-
942
- For details, see :func:`distro.major_version`.
943
- """
944
- return self.version_parts(best)[0]
945
-
946
- def minor_version(self, best: bool = False) -> str:
947
- """
948
- Return the minor version number of the current distribution.
949
-
950
- For details, see :func:`distro.minor_version`.
951
- """
952
- return self.version_parts(best)[1]
953
-
954
- def build_number(self, best: bool = False) -> str:
955
- """
956
- Return the build number of the current distribution.
957
-
958
- For details, see :func:`distro.build_number`.
959
- """
960
- return self.version_parts(best)[2]
961
-
962
- def like(self) -> str:
963
- """
964
- Return the IDs of distributions that are like the OS distribution.
965
-
966
- For details, see :func:`distro.like`.
967
- """
968
- return self.os_release_attr("id_like") or ""
969
-
970
- def codename(self) -> str:
971
- """
972
- Return the codename of the OS distribution.
973
-
974
- For details, see :func:`distro.codename`.
975
- """
976
- try:
977
- # Handle os_release specially since distros might purposefully set
978
- # this to empty string to have no codename
979
- return self._os_release_info["codename"]
980
- except KeyError:
981
- return (
982
- self.lsb_release_attr("codename")
983
- or self.distro_release_attr("codename")
984
- or ""
985
- )
986
-
987
- def info(self, pretty: bool = False, best: bool = False) -> InfoDict:
988
- """
989
- Return certain machine-readable information about the OS
990
- distribution.
991
-
992
- For details, see :func:`distro.info`.
993
- """
994
- return dict(
995
- id=self.id(),
996
- version=self.version(pretty, best),
997
- version_parts=dict(
998
- major=self.major_version(best),
999
- minor=self.minor_version(best),
1000
- build_number=self.build_number(best),
1001
- ),
1002
- like=self.like(),
1003
- codename=self.codename(),
1004
- )
1005
-
1006
- def os_release_info(self) -> Dict[str, str]:
1007
- """
1008
- Return a dictionary containing key-value pairs for the information
1009
- items from the os-release file data source of the OS distribution.
1010
-
1011
- For details, see :func:`distro.os_release_info`.
1012
- """
1013
- return self._os_release_info
1014
-
1015
- def lsb_release_info(self) -> Dict[str, str]:
1016
- """
1017
- Return a dictionary containing key-value pairs for the information
1018
- items from the lsb_release command data source of the OS
1019
- distribution.
1020
-
1021
- For details, see :func:`distro.lsb_release_info`.
1022
- """
1023
- return self._lsb_release_info
1024
-
1025
- def distro_release_info(self) -> Dict[str, str]:
1026
- """
1027
- Return a dictionary containing key-value pairs for the information
1028
- items from the distro release file data source of the OS
1029
- distribution.
1030
-
1031
- For details, see :func:`distro.distro_release_info`.
1032
- """
1033
- return self._distro_release_info
1034
-
1035
- def uname_info(self) -> Dict[str, str]:
1036
- """
1037
- Return a dictionary containing key-value pairs for the information
1038
- items from the uname command data source of the OS distribution.
1039
-
1040
- For details, see :func:`distro.uname_info`.
1041
- """
1042
- return self._uname_info
1043
-
1044
- def oslevel_info(self) -> str:
1045
- """
1046
- Return AIX' oslevel command output.
1047
- """
1048
- return self._oslevel_info
1049
-
1050
- def os_release_attr(self, attribute: str) -> str:
1051
- """
1052
- Return a single named information item from the os-release file data
1053
- source of the OS distribution.
1054
-
1055
- For details, see :func:`distro.os_release_attr`.
1056
- """
1057
- return self._os_release_info.get(attribute, "")
1058
-
1059
- def lsb_release_attr(self, attribute: str) -> str:
1060
- """
1061
- Return a single named information item from the lsb_release command
1062
- output data source of the OS distribution.
1063
-
1064
- For details, see :func:`distro.lsb_release_attr`.
1065
- """
1066
- return self._lsb_release_info.get(attribute, "")
1067
-
1068
- def distro_release_attr(self, attribute: str) -> str:
1069
- """
1070
- Return a single named information item from the distro release file
1071
- data source of the OS distribution.
1072
-
1073
- For details, see :func:`distro.distro_release_attr`.
1074
- """
1075
- return self._distro_release_info.get(attribute, "")
1076
-
1077
- def uname_attr(self, attribute: str) -> str:
1078
- """
1079
- Return a single named information item from the uname command
1080
- output data source of the OS distribution.
1081
-
1082
- For details, see :func:`distro.uname_attr`.
1083
- """
1084
- return self._uname_info.get(attribute, "")
1085
-
1086
- @cached_property
1087
- def _os_release_info(self) -> Dict[str, str]:
1088
- """
1089
- Get the information items from the specified os-release file.
1090
-
1091
- Returns:
1092
- A dictionary containing all information items.
1093
- """
1094
- if os.path.isfile(self.os_release_file):
1095
- with open(self.os_release_file, encoding="utf-8") as release_file:
1096
- return self._parse_os_release_content(release_file)
1097
- return {}
1098
-
1099
- @staticmethod
1100
- def _parse_os_release_content(lines: TextIO) -> Dict[str, str]:
1101
- """
1102
- Parse the lines of an os-release file.
1103
-
1104
- Parameters:
1105
-
1106
- * lines: Iterable through the lines in the os-release file.
1107
- Each line must be a unicode string or a UTF-8 encoded byte
1108
- string.
1109
-
1110
- Returns:
1111
- A dictionary containing all information items.
1112
- """
1113
- props = {}
1114
- lexer = shlex.shlex(lines, posix=True)
1115
- lexer.whitespace_split = True
1116
-
1117
- tokens = list(lexer)
1118
- for token in tokens:
1119
- # At this point, all shell-like parsing has been done (i.e.
1120
- # comments processed, quotes and backslash escape sequences
1121
- # processed, multi-line values assembled, trailing newlines
1122
- # stripped, etc.), so the tokens are now either:
1123
- # * variable assignments: var=value
1124
- # * commands or their arguments (not allowed in os-release)
1125
- # Ignore any tokens that are not variable assignments
1126
- if "=" in token:
1127
- k, v = token.split("=", 1)
1128
- props[k.lower()] = v
1129
-
1130
- if "version" in props:
1131
- # extract release codename (if any) from version attribute
1132
- match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"])
1133
- if match:
1134
- release_codename = match.group(1) or match.group(2)
1135
- props["codename"] = props["release_codename"] = release_codename
1136
-
1137
- if "version_codename" in props:
1138
- # os-release added a version_codename field. Use that in
1139
- # preference to anything else Note that some distros purposefully
1140
- # do not have code names. They should be setting
1141
- # version_codename=""
1142
- props["codename"] = props["version_codename"]
1143
- elif "ubuntu_codename" in props:
1144
- # Same as above but a non-standard field name used on older Ubuntus
1145
- props["codename"] = props["ubuntu_codename"]
1146
-
1147
- return props
1148
-
1149
- @cached_property
1150
- def _lsb_release_info(self) -> Dict[str, str]:
1151
- """
1152
- Get the information items from the lsb_release command output.
1153
-
1154
- Returns:
1155
- A dictionary containing all information items.
1156
- """
1157
- if not self.include_lsb:
1158
- return {}
1159
- try:
1160
- cmd = ("lsb_release", "-a")
1161
- stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
1162
- # Command not found or lsb_release returned error
1163
- except (OSError, subprocess.CalledProcessError):
1164
- return {}
1165
- content = self._to_str(stdout).splitlines()
1166
- return self._parse_lsb_release_content(content)
1167
-
1168
- @staticmethod
1169
- def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]:
1170
- """
1171
- Parse the output of the lsb_release command.
1172
-
1173
- Parameters:
1174
-
1175
- * lines: Iterable through the lines of the lsb_release output.
1176
- Each line must be a unicode string or a UTF-8 encoded byte
1177
- string.
1178
-
1179
- Returns:
1180
- A dictionary containing all information items.
1181
- """
1182
- props = {}
1183
- for line in lines:
1184
- kv = line.strip("\n").split(":", 1)
1185
- if len(kv) != 2:
1186
- # Ignore lines without colon.
1187
- continue
1188
- k, v = kv
1189
- props.update({k.replace(" ", "_").lower(): v.strip()})
1190
- return props
1191
-
1192
- @cached_property
1193
- def _uname_info(self) -> Dict[str, str]:
1194
- if not self.include_uname:
1195
- return {}
1196
- try:
1197
- cmd = ("uname", "-rs")
1198
- stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
1199
- except OSError:
1200
- return {}
1201
- content = self._to_str(stdout).splitlines()
1202
- return self._parse_uname_content(content)
1203
-
1204
- @cached_property
1205
- def _oslevel_info(self) -> str:
1206
- if not self.include_oslevel:
1207
- return ""
1208
- try:
1209
- stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL)
1210
- except (OSError, subprocess.CalledProcessError):
1211
- return ""
1212
- return self._to_str(stdout).strip()
1213
-
1214
- @cached_property
1215
- def _debian_version(self) -> str:
1216
- try:
1217
- with open(
1218
- os.path.join(self.etc_dir, "debian_version"), encoding="ascii"
1219
- ) as fp:
1220
- return fp.readline().rstrip()
1221
- except FileNotFoundError:
1222
- return ""
1223
-
1224
- @staticmethod
1225
- def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]:
1226
- if not lines:
1227
- return {}
1228
- props = {}
1229
- match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())
1230
- if match:
1231
- name, version = match.groups()
1232
-
1233
- # This is to prevent the Linux kernel version from
1234
- # appearing as the 'best' version on otherwise
1235
- # identifiable distributions.
1236
- if name == "Linux":
1237
- return {}
1238
- props["id"] = name.lower()
1239
- props["name"] = name
1240
- props["release"] = version
1241
- return props
1242
-
1243
- @staticmethod
1244
- def _to_str(bytestring: bytes) -> str:
1245
- encoding = sys.getfilesystemencoding()
1246
- return bytestring.decode(encoding)
1247
-
1248
- @cached_property
1249
- def _distro_release_info(self) -> Dict[str, str]:
1250
- """
1251
- Get the information items from the specified distro release file.
1252
-
1253
- Returns:
1254
- A dictionary containing all information items.
1255
- """
1256
- if self.distro_release_file:
1257
- # If it was specified, we use it and parse what we can, even if
1258
- # its file name or content does not match the expected pattern.
1259
- distro_info = self._parse_distro_release_file(self.distro_release_file)
1260
- basename = os.path.basename(self.distro_release_file)
1261
- # The file name pattern for user-specified distro release files
1262
- # is somewhat more tolerant (compared to when searching for the
1263
- # file), because we want to use what was specified as best as
1264
- # possible.
1265
- match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
1266
- else:
1267
- try:
1268
- basenames = [
1269
- basename
1270
- for basename in os.listdir(self.etc_dir)
1271
- if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES
1272
- and os.path.isfile(os.path.join(self.etc_dir, basename))
1273
- ]
1274
- # We sort for repeatability in cases where there are multiple
1275
- # distro specific files; e.g. CentOS, Oracle, Enterprise all
1276
- # containing `redhat-release` on top of their own.
1277
- basenames.sort()
1278
- except OSError:
1279
- # This may occur when /etc is not readable but we can't be
1280
- # sure about the *-release files. Check common entries of
1281
- # /etc for information. If they turn out to not be there the
1282
- # error is handled in `_parse_distro_release_file()`.
1283
- basenames = _DISTRO_RELEASE_BASENAMES
1284
- for basename in basenames:
1285
- match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
1286
- if match is None:
1287
- continue
1288
- filepath = os.path.join(self.etc_dir, basename)
1289
- distro_info = self._parse_distro_release_file(filepath)
1290
- # The name is always present if the pattern matches.
1291
- if "name" not in distro_info:
1292
- continue
1293
- self.distro_release_file = filepath
1294
- break
1295
- else: # the loop didn't "break": no candidate.
1296
- return {}
1297
-
1298
- if match is not None:
1299
- distro_info["id"] = match.group(1)
1300
-
1301
- # CloudLinux < 7: manually enrich info with proper id.
1302
- if "cloudlinux" in distro_info.get("name", "").lower():
1303
- distro_info["id"] = "cloudlinux"
1304
-
1305
- return distro_info
1306
-
1307
- def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]:
1308
- """
1309
- Parse a distro release file.
1310
-
1311
- Parameters:
1312
-
1313
- * filepath: Path name of the distro release file.
1314
-
1315
- Returns:
1316
- A dictionary containing all information items.
1317
- """
1318
- try:
1319
- with open(filepath, encoding="utf-8") as fp:
1320
- # Only parse the first line. For instance, on SLES there
1321
- # are multiple lines. We don't want them...
1322
- return self._parse_distro_release_content(fp.readline())
1323
- except OSError:
1324
- # Ignore not being able to read a specific, seemingly version
1325
- # related file.
1326
- # See https://github.com/python-distro/distro/issues/162
1327
- return {}
1328
-
1329
- @staticmethod
1330
- def _parse_distro_release_content(line: str) -> Dict[str, str]:
1331
- """
1332
- Parse a line from a distro release file.
1333
-
1334
- Parameters:
1335
- * line: Line from the distro release file. Must be a unicode string
1336
- or a UTF-8 encoded byte string.
1337
-
1338
- Returns:
1339
- A dictionary containing all information items.
1340
- """
1341
- matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
1342
- distro_info = {}
1343
- if matches:
1344
- # regexp ensures non-None
1345
- distro_info["name"] = matches.group(3)[::-1]
1346
- if matches.group(2):
1347
- distro_info["version_id"] = matches.group(2)[::-1]
1348
- if matches.group(1):
1349
- distro_info["codename"] = matches.group(1)[::-1]
1350
- elif line:
1351
- distro_info["name"] = line.strip()
1352
- return distro_info
1353
-
1354
-
1355
- _distro = LinuxDistribution()
1356
-
1357
-
1358
- def main() -> None:
1359
- logger = logging.getLogger(__name__)
1360
- logger.setLevel(logging.DEBUG)
1361
- logger.addHandler(logging.StreamHandler(sys.stdout))
1362
-
1363
- parser = argparse.ArgumentParser(description="OS distro info tool")
1364
- parser.add_argument(
1365
- "--json", "-j", help="Output in machine readable format", action="store_true"
1366
- )
1367
-
1368
- parser.add_argument(
1369
- "--root-dir",
1370
- "-r",
1371
- type=str,
1372
- dest="root_dir",
1373
- help="Path to the root filesystem directory (defaults to /)",
1374
- )
1375
-
1376
- args = parser.parse_args()
1377
-
1378
- if args.root_dir:
1379
- dist = LinuxDistribution(
1380
- include_lsb=False,
1381
- include_uname=False,
1382
- include_oslevel=False,
1383
- root_dir=args.root_dir,
1384
- )
1385
- else:
1386
- dist = _distro
1387
-
1388
- if args.json:
1389
- logger.info(json.dumps(dist.info(), indent=4, sort_keys=True))
1390
- else:
1391
- logger.info("Name: %s", dist.name(pretty=True))
1392
- distribution_version = dist.version(pretty=True)
1393
- logger.info("Version: %s", distribution_version)
1394
- distribution_codename = dist.codename()
1395
- logger.info("Codename: %s", distribution_codename)
1396
-
1397
-
1398
- if __name__ == "__main__":
1399
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/config.py DELETED
@@ -1,377 +0,0 @@
1
- """distutils.command.config
2
-
3
- Implements the Distutils 'config' command, a (mostly) empty command class
4
- that exists mainly to be sub-classed by specific module distributions and
5
- applications. The idea is that while every "config" command is different,
6
- at least they're all named the same, and users always see "config" in the
7
- list of standard commands. Also, this is a good place to put common
8
- configure-like tasks: "try to compile this C code", or "figure out where
9
- this header file lives".
10
- """
11
-
12
- import os
13
- import re
14
-
15
- from distutils.core import Command
16
- from distutils.errors import DistutilsExecError
17
- from distutils.sysconfig import customize_compiler
18
- from distutils import log
19
-
20
- LANG_EXT = {"c": ".c", "c++": ".cxx"}
21
-
22
-
23
- class config(Command):
24
-
25
- description = "prepare to build"
26
-
27
- user_options = [
28
- ('compiler=', None, "specify the compiler type"),
29
- ('cc=', None, "specify the compiler executable"),
30
- ('include-dirs=', 'I', "list of directories to search for header files"),
31
- ('define=', 'D', "C preprocessor macros to define"),
32
- ('undef=', 'U', "C preprocessor macros to undefine"),
33
- ('libraries=', 'l', "external C libraries to link with"),
34
- ('library-dirs=', 'L', "directories to search for external C libraries"),
35
- ('noisy', None, "show every action (compile, link, run, ...) taken"),
36
- (
37
- 'dump-source',
38
- None,
39
- "dump generated source files before attempting to compile them",
40
- ),
41
- ]
42
-
43
- # The three standard command methods: since the "config" command
44
- # does nothing by default, these are empty.
45
-
46
- def initialize_options(self):
47
- self.compiler = None
48
- self.cc = None
49
- self.include_dirs = None
50
- self.libraries = None
51
- self.library_dirs = None
52
-
53
- # maximal output for now
54
- self.noisy = 1
55
- self.dump_source = 1
56
-
57
- # list of temporary files generated along-the-way that we have
58
- # to clean at some point
59
- self.temp_files = []
60
-
61
- def finalize_options(self):
62
- if self.include_dirs is None:
63
- self.include_dirs = self.distribution.include_dirs or []
64
- elif isinstance(self.include_dirs, str):
65
- self.include_dirs = self.include_dirs.split(os.pathsep)
66
-
67
- if self.libraries is None:
68
- self.libraries = []
69
- elif isinstance(self.libraries, str):
70
- self.libraries = [self.libraries]
71
-
72
- if self.library_dirs is None:
73
- self.library_dirs = []
74
- elif isinstance(self.library_dirs, str):
75
- self.library_dirs = self.library_dirs.split(os.pathsep)
76
-
77
- def run(self):
78
- pass
79
-
80
- # Utility methods for actual "config" commands. The interfaces are
81
- # loosely based on Autoconf macros of similar names. Sub-classes
82
- # may use these freely.
83
-
84
- def _check_compiler(self):
85
- """Check that 'self.compiler' really is a CCompiler object;
86
- if not, make it one.
87
- """
88
- # We do this late, and only on-demand, because this is an expensive
89
- # import.
90
- from distutils.ccompiler import CCompiler, new_compiler
91
-
92
- if not isinstance(self.compiler, CCompiler):
93
- self.compiler = new_compiler(
94
- compiler=self.compiler, dry_run=self.dry_run, force=1
95
- )
96
- customize_compiler(self.compiler)
97
- if self.include_dirs:
98
- self.compiler.set_include_dirs(self.include_dirs)
99
- if self.libraries:
100
- self.compiler.set_libraries(self.libraries)
101
- if self.library_dirs:
102
- self.compiler.set_library_dirs(self.library_dirs)
103
-
104
- def _gen_temp_sourcefile(self, body, headers, lang):
105
- filename = "_configtest" + LANG_EXT[lang]
106
- with open(filename, "w") as file:
107
- if headers:
108
- for header in headers:
109
- file.write("#include <%s>\n" % header)
110
- file.write("\n")
111
- file.write(body)
112
- if body[-1] != "\n":
113
- file.write("\n")
114
- return filename
115
-
116
- def _preprocess(self, body, headers, include_dirs, lang):
117
- src = self._gen_temp_sourcefile(body, headers, lang)
118
- out = "_configtest.i"
119
- self.temp_files.extend([src, out])
120
- self.compiler.preprocess(src, out, include_dirs=include_dirs)
121
- return (src, out)
122
-
123
- def _compile(self, body, headers, include_dirs, lang):
124
- src = self._gen_temp_sourcefile(body, headers, lang)
125
- if self.dump_source:
126
- dump_file(src, "compiling '%s':" % src)
127
- (obj,) = self.compiler.object_filenames([src])
128
- self.temp_files.extend([src, obj])
129
- self.compiler.compile([src], include_dirs=include_dirs)
130
- return (src, obj)
131
-
132
- def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
133
- (src, obj) = self._compile(body, headers, include_dirs, lang)
134
- prog = os.path.splitext(os.path.basename(src))[0]
135
- self.compiler.link_executable(
136
- [obj],
137
- prog,
138
- libraries=libraries,
139
- library_dirs=library_dirs,
140
- target_lang=lang,
141
- )
142
-
143
- if self.compiler.exe_extension is not None:
144
- prog = prog + self.compiler.exe_extension
145
- self.temp_files.append(prog)
146
-
147
- return (src, obj, prog)
148
-
149
- def _clean(self, *filenames):
150
- if not filenames:
151
- filenames = self.temp_files
152
- self.temp_files = []
153
- log.info("removing: %s", ' '.join(filenames))
154
- for filename in filenames:
155
- try:
156
- os.remove(filename)
157
- except OSError:
158
- pass
159
-
160
- # XXX these ignore the dry-run flag: what to do, what to do? even if
161
- # you want a dry-run build, you still need some sort of configuration
162
- # info. My inclination is to make it up to the real config command to
163
- # consult 'dry_run', and assume a default (minimal) configuration if
164
- # true. The problem with trying to do it here is that you'd have to
165
- # return either true or false from all the 'try' methods, neither of
166
- # which is correct.
167
-
168
- # XXX need access to the header search path and maybe default macros.
169
-
170
- def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
171
- """Construct a source file from 'body' (a string containing lines
172
- of C/C++ code) and 'headers' (a list of header files to include)
173
- and run it through the preprocessor. Return true if the
174
- preprocessor succeeded, false if there were any errors.
175
- ('body' probably isn't of much use, but what the heck.)
176
- """
177
- from distutils.ccompiler import CompileError
178
-
179
- self._check_compiler()
180
- ok = True
181
- try:
182
- self._preprocess(body, headers, include_dirs, lang)
183
- except CompileError:
184
- ok = False
185
-
186
- self._clean()
187
- return ok
188
-
189
- def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"):
190
- """Construct a source file (just like 'try_cpp()'), run it through
191
- the preprocessor, and return true if any line of the output matches
192
- 'pattern'. 'pattern' should either be a compiled regex object or a
193
- string containing a regex. If both 'body' and 'headers' are None,
194
- preprocesses an empty file -- which can be useful to determine the
195
- symbols the preprocessor and compiler set by default.
196
- """
197
- self._check_compiler()
198
- src, out = self._preprocess(body, headers, include_dirs, lang)
199
-
200
- if isinstance(pattern, str):
201
- pattern = re.compile(pattern)
202
-
203
- with open(out) as file:
204
- match = False
205
- while True:
206
- line = file.readline()
207
- if line == '':
208
- break
209
- if pattern.search(line):
210
- match = True
211
- break
212
-
213
- self._clean()
214
- return match
215
-
216
- def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
217
- """Try to compile a source file built from 'body' and 'headers'.
218
- Return true on success, false otherwise.
219
- """
220
- from distutils.ccompiler import CompileError
221
-
222
- self._check_compiler()
223
- try:
224
- self._compile(body, headers, include_dirs, lang)
225
- ok = True
226
- except CompileError:
227
- ok = False
228
-
229
- log.info(ok and "success!" or "failure.")
230
- self._clean()
231
- return ok
232
-
233
- def try_link(
234
- self,
235
- body,
236
- headers=None,
237
- include_dirs=None,
238
- libraries=None,
239
- library_dirs=None,
240
- lang="c",
241
- ):
242
- """Try to compile and link a source file, built from 'body' and
243
- 'headers', to executable form. Return true on success, false
244
- otherwise.
245
- """
246
- from distutils.ccompiler import CompileError, LinkError
247
-
248
- self._check_compiler()
249
- try:
250
- self._link(body, headers, include_dirs, libraries, library_dirs, lang)
251
- ok = True
252
- except (CompileError, LinkError):
253
- ok = False
254
-
255
- log.info(ok and "success!" or "failure.")
256
- self._clean()
257
- return ok
258
-
259
- def try_run(
260
- self,
261
- body,
262
- headers=None,
263
- include_dirs=None,
264
- libraries=None,
265
- library_dirs=None,
266
- lang="c",
267
- ):
268
- """Try to compile, link to an executable, and run a program
269
- built from 'body' and 'headers'. Return true on success, false
270
- otherwise.
271
- """
272
- from distutils.ccompiler import CompileError, LinkError
273
-
274
- self._check_compiler()
275
- try:
276
- src, obj, exe = self._link(
277
- body, headers, include_dirs, libraries, library_dirs, lang
278
- )
279
- self.spawn([exe])
280
- ok = True
281
- except (CompileError, LinkError, DistutilsExecError):
282
- ok = False
283
-
284
- log.info(ok and "success!" or "failure.")
285
- self._clean()
286
- return ok
287
-
288
- # -- High-level methods --------------------------------------------
289
- # (these are the ones that are actually likely to be useful
290
- # when implementing a real-world config command!)
291
-
292
- def check_func(
293
- self,
294
- func,
295
- headers=None,
296
- include_dirs=None,
297
- libraries=None,
298
- library_dirs=None,
299
- decl=0,
300
- call=0,
301
- ):
302
- """Determine if function 'func' is available by constructing a
303
- source file that refers to 'func', and compiles and links it.
304
- If everything succeeds, returns true; otherwise returns false.
305
-
306
- The constructed source file starts out by including the header
307
- files listed in 'headers'. If 'decl' is true, it then declares
308
- 'func' (as "int func()"); you probably shouldn't supply 'headers'
309
- and set 'decl' true in the same call, or you might get errors about
310
- a conflicting declarations for 'func'. Finally, the constructed
311
- 'main()' function either references 'func' or (if 'call' is true)
312
- calls it. 'libraries' and 'library_dirs' are used when
313
- linking.
314
- """
315
- self._check_compiler()
316
- body = []
317
- if decl:
318
- body.append("int %s ();" % func)
319
- body.append("int main () {")
320
- if call:
321
- body.append(" %s();" % func)
322
- else:
323
- body.append(" %s;" % func)
324
- body.append("}")
325
- body = "\n".join(body) + "\n"
326
-
327
- return self.try_link(body, headers, include_dirs, libraries, library_dirs)
328
-
329
- def check_lib(
330
- self,
331
- library,
332
- library_dirs=None,
333
- headers=None,
334
- include_dirs=None,
335
- other_libraries=[],
336
- ):
337
- """Determine if 'library' is available to be linked against,
338
- without actually checking that any particular symbols are provided
339
- by it. 'headers' will be used in constructing the source file to
340
- be compiled, but the only effect of this is to check if all the
341
- header files listed are available. Any libraries listed in
342
- 'other_libraries' will be included in the link, in case 'library'
343
- has symbols that depend on other libraries.
344
- """
345
- self._check_compiler()
346
- return self.try_link(
347
- "int main (void) { }",
348
- headers,
349
- include_dirs,
350
- [library] + other_libraries,
351
- library_dirs,
352
- )
353
-
354
- def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"):
355
- """Determine if the system header file named by 'header_file'
356
- exists and can be found by the preprocessor; return true if so,
357
- false otherwise.
358
- """
359
- return self.try_cpp(
360
- body="/* No body */", headers=[header], include_dirs=include_dirs
361
- )
362
-
363
-
364
- def dump_file(filename, head=None):
365
- """Dumps a file content into log.info.
366
-
367
- If head is not None, will be dumped before the file content.
368
- """
369
- if head is None:
370
- log.info('%s', filename)
371
- else:
372
- log.info(head)
373
- file = open(filename)
374
- try:
375
- log.info(file.read())
376
- finally:
377
- file.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Image Animation Using Thin Plate Spline Motion Model
3
- emoji: 👁
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.48.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/cascade_rpn_head.py DELETED
@@ -1,784 +0,0 @@
1
- from __future__ import division
2
- import copy
3
- import warnings
4
-
5
- import torch
6
- import torch.nn as nn
7
- from mmcv import ConfigDict
8
- from mmcv.cnn import normal_init
9
- from mmcv.ops import DeformConv2d, batched_nms
10
-
11
- from mmdet.core import (RegionAssigner, build_assigner, build_sampler,
12
- images_to_levels, multi_apply)
13
- from ..builder import HEADS, build_head
14
- from .base_dense_head import BaseDenseHead
15
- from .rpn_head import RPNHead
16
-
17
-
18
- class AdaptiveConv(nn.Module):
19
- """AdaptiveConv used to adapt the sampling location with the anchors.
20
-
21
- Args:
22
- in_channels (int): Number of channels in the input image
23
- out_channels (int): Number of channels produced by the convolution
24
- kernel_size (int or tuple): Size of the conv kernel. Default: 3
25
- stride (int or tuple, optional): Stride of the convolution. Default: 1
26
- padding (int or tuple, optional): Zero-padding added to both sides of
27
- the input. Default: 1
28
- dilation (int or tuple, optional): Spacing between kernel elements.
29
- Default: 3
30
- groups (int, optional): Number of blocked connections from input
31
- channels to output channels. Default: 1
32
- bias (bool, optional): If set True, adds a learnable bias to the
33
- output. Default: False.
34
- type (str, optional): Type of adaptive conv, can be either 'offset'
35
- (arbitrary anchors) or 'dilation' (uniform anchor).
36
- Default: 'dilation'.
37
- """
38
-
39
- def __init__(self,
40
- in_channels,
41
- out_channels,
42
- kernel_size=3,
43
- stride=1,
44
- padding=1,
45
- dilation=3,
46
- groups=1,
47
- bias=False,
48
- type='dilation'):
49
- super(AdaptiveConv, self).__init__()
50
- assert type in ['offset', 'dilation']
51
- self.adapt_type = type
52
-
53
- assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
54
- if self.adapt_type == 'offset':
55
- assert stride == 1 and padding == 1 and groups == 1, \
56
- 'Adaptive conv offset mode only supports padding: {1}, ' \
57
- f'stride: {1}, groups: {1}'
58
- self.conv = DeformConv2d(
59
- in_channels,
60
- out_channels,
61
- kernel_size,
62
- padding=padding,
63
- stride=stride,
64
- groups=groups,
65
- bias=bias)
66
- else:
67
- self.conv = nn.Conv2d(
68
- in_channels,
69
- out_channels,
70
- kernel_size,
71
- padding=dilation,
72
- dilation=dilation)
73
-
74
- def init_weights(self):
75
- """Init weights."""
76
- normal_init(self.conv, std=0.01)
77
-
78
- def forward(self, x, offset):
79
- """Forward function."""
80
- if self.adapt_type == 'offset':
81
- N, _, H, W = x.shape
82
- assert offset is not None
83
- assert H * W == offset.shape[1]
84
- # reshape [N, NA, 18] to (N, 18, H, W)
85
- offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)
86
- offset = offset.contiguous()
87
- x = self.conv(x, offset)
88
- else:
89
- assert offset is None
90
- x = self.conv(x)
91
- return x
92
-
93
-
94
- @HEADS.register_module()
95
- class StageCascadeRPNHead(RPNHead):
96
- """Stage of CascadeRPNHead.
97
-
98
- Args:
99
- in_channels (int): Number of channels in the input feature map.
100
- anchor_generator (dict): anchor generator config.
101
- adapt_cfg (dict): adaptation config.
102
- bridged_feature (bool, optional): whether update rpn feature.
103
- Default: False.
104
- with_cls (bool, optional): wheather use classification branch.
105
- Default: True.
106
- sampling (bool, optional): wheather use sampling. Default: True.
107
- """
108
-
109
- def __init__(self,
110
- in_channels,
111
- anchor_generator=dict(
112
- type='AnchorGenerator',
113
- scales=[8],
114
- ratios=[1.0],
115
- strides=[4, 8, 16, 32, 64]),
116
- adapt_cfg=dict(type='dilation', dilation=3),
117
- bridged_feature=False,
118
- with_cls=True,
119
- sampling=True,
120
- **kwargs):
121
- self.with_cls = with_cls
122
- self.anchor_strides = anchor_generator['strides']
123
- self.anchor_scales = anchor_generator['scales']
124
- self.bridged_feature = bridged_feature
125
- self.adapt_cfg = adapt_cfg
126
- super(StageCascadeRPNHead, self).__init__(
127
- in_channels, anchor_generator=anchor_generator, **kwargs)
128
-
129
- # override sampling and sampler
130
- self.sampling = sampling
131
- if self.train_cfg:
132
- self.assigner = build_assigner(self.train_cfg.assigner)
133
- # use PseudoSampler when sampling is False
134
- if self.sampling and hasattr(self.train_cfg, 'sampler'):
135
- sampler_cfg = self.train_cfg.sampler
136
- else:
137
- sampler_cfg = dict(type='PseudoSampler')
138
- self.sampler = build_sampler(sampler_cfg, context=self)
139
-
140
- def _init_layers(self):
141
- """Init layers of a CascadeRPN stage."""
142
- self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,
143
- **self.adapt_cfg)
144
- if self.with_cls:
145
- self.rpn_cls = nn.Conv2d(self.feat_channels,
146
- self.num_anchors * self.cls_out_channels,
147
- 1)
148
- self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
149
- self.relu = nn.ReLU(inplace=True)
150
-
151
- def init_weights(self):
152
- """Init weights of a CascadeRPN stage."""
153
- self.rpn_conv.init_weights()
154
- normal_init(self.rpn_reg, std=0.01)
155
- if self.with_cls:
156
- normal_init(self.rpn_cls, std=0.01)
157
-
158
- def forward_single(self, x, offset):
159
- """Forward function of single scale."""
160
- bridged_x = x
161
- x = self.relu(self.rpn_conv(x, offset))
162
- if self.bridged_feature:
163
- bridged_x = x # update feature
164
- cls_score = self.rpn_cls(x) if self.with_cls else None
165
- bbox_pred = self.rpn_reg(x)
166
- return bridged_x, cls_score, bbox_pred
167
-
168
- def forward(self, feats, offset_list=None):
169
- """Forward function."""
170
- if offset_list is None:
171
- offset_list = [None for _ in range(len(feats))]
172
- return multi_apply(self.forward_single, feats, offset_list)
173
-
174
- def _region_targets_single(self,
175
- anchors,
176
- valid_flags,
177
- gt_bboxes,
178
- gt_bboxes_ignore,
179
- gt_labels,
180
- img_meta,
181
- featmap_sizes,
182
- label_channels=1):
183
- """Get anchor targets based on region for single level."""
184
- assign_result = self.assigner.assign(
185
- anchors,
186
- valid_flags,
187
- gt_bboxes,
188
- img_meta,
189
- featmap_sizes,
190
- self.anchor_scales[0],
191
- self.anchor_strides,
192
- gt_bboxes_ignore=gt_bboxes_ignore,
193
- gt_labels=None,
194
- allowed_border=self.train_cfg.allowed_border)
195
- flat_anchors = torch.cat(anchors)
196
- sampling_result = self.sampler.sample(assign_result, flat_anchors,
197
- gt_bboxes)
198
-
199
- num_anchors = flat_anchors.shape[0]
200
- bbox_targets = torch.zeros_like(flat_anchors)
201
- bbox_weights = torch.zeros_like(flat_anchors)
202
- labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)
203
- label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)
204
-
205
- pos_inds = sampling_result.pos_inds
206
- neg_inds = sampling_result.neg_inds
207
- if len(pos_inds) > 0:
208
- if not self.reg_decoded_bbox:
209
- pos_bbox_targets = self.bbox_coder.encode(
210
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
211
- else:
212
- pos_bbox_targets = sampling_result.pos_gt_bboxes
213
- bbox_targets[pos_inds, :] = pos_bbox_targets
214
- bbox_weights[pos_inds, :] = 1.0
215
- if gt_labels is None:
216
- labels[pos_inds] = 1
217
- else:
218
- labels[pos_inds] = gt_labels[
219
- sampling_result.pos_assigned_gt_inds]
220
- if self.train_cfg.pos_weight <= 0:
221
- label_weights[pos_inds] = 1.0
222
- else:
223
- label_weights[pos_inds] = self.train_cfg.pos_weight
224
- if len(neg_inds) > 0:
225
- label_weights[neg_inds] = 1.0
226
-
227
- return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
228
- neg_inds)
229
-
230
- def region_targets(self,
231
- anchor_list,
232
- valid_flag_list,
233
- gt_bboxes_list,
234
- img_metas,
235
- featmap_sizes,
236
- gt_bboxes_ignore_list=None,
237
- gt_labels_list=None,
238
- label_channels=1,
239
- unmap_outputs=True):
240
- """See :func:`StageCascadeRPNHead.get_targets`."""
241
- num_imgs = len(img_metas)
242
- assert len(anchor_list) == len(valid_flag_list) == num_imgs
243
-
244
- # anchor number of multi levels
245
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
246
-
247
- # compute targets for each image
248
- if gt_bboxes_ignore_list is None:
249
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
250
- if gt_labels_list is None:
251
- gt_labels_list = [None for _ in range(num_imgs)]
252
- (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
253
- pos_inds_list, neg_inds_list) = multi_apply(
254
- self._region_targets_single,
255
- anchor_list,
256
- valid_flag_list,
257
- gt_bboxes_list,
258
- gt_bboxes_ignore_list,
259
- gt_labels_list,
260
- img_metas,
261
- featmap_sizes=featmap_sizes,
262
- label_channels=label_channels)
263
- # no valid anchors
264
- if any([labels is None for labels in all_labels]):
265
- return None
266
- # sampled anchors of all images
267
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
268
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
269
- # split targets to a list w.r.t. multiple levels
270
- labels_list = images_to_levels(all_labels, num_level_anchors)
271
- label_weights_list = images_to_levels(all_label_weights,
272
- num_level_anchors)
273
- bbox_targets_list = images_to_levels(all_bbox_targets,
274
- num_level_anchors)
275
- bbox_weights_list = images_to_levels(all_bbox_weights,
276
- num_level_anchors)
277
- return (labels_list, label_weights_list, bbox_targets_list,
278
- bbox_weights_list, num_total_pos, num_total_neg)
279
-
280
- def get_targets(self,
281
- anchor_list,
282
- valid_flag_list,
283
- gt_bboxes,
284
- img_metas,
285
- featmap_sizes,
286
- gt_bboxes_ignore=None,
287
- label_channels=1):
288
- """Compute regression and classification targets for anchors.
289
-
290
- Args:
291
- anchor_list (list[list]): Multi level anchors of each image.
292
- valid_flag_list (list[list]): Multi level valid flags of each
293
- image.
294
- gt_bboxes (list[Tensor]): Ground truth bboxes of each image.
295
- img_metas (list[dict]): Meta info of each image.
296
- featmap_sizes (list[Tensor]): Feature mapsize each level
297
- gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images
298
- label_channels (int): Channel of label.
299
-
300
- Returns:
301
- cls_reg_targets (tuple)
302
- """
303
- if isinstance(self.assigner, RegionAssigner):
304
- cls_reg_targets = self.region_targets(
305
- anchor_list,
306
- valid_flag_list,
307
- gt_bboxes,
308
- img_metas,
309
- featmap_sizes,
310
- gt_bboxes_ignore_list=gt_bboxes_ignore,
311
- label_channels=label_channels)
312
- else:
313
- cls_reg_targets = super(StageCascadeRPNHead, self).get_targets(
314
- anchor_list,
315
- valid_flag_list,
316
- gt_bboxes,
317
- img_metas,
318
- gt_bboxes_ignore_list=gt_bboxes_ignore,
319
- label_channels=label_channels)
320
- return cls_reg_targets
321
-
322
- def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
323
- """ Get offest for deformable conv based on anchor shape
324
- NOTE: currently support deformable kernel_size=3 and dilation=1
325
-
326
- Args:
327
- anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of
328
- multi-level anchors
329
- anchor_strides (list[int]): anchor stride of each level
330
-
331
- Returns:
332
- offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv
333
- kernel.
334
- """
335
-
336
- def _shape_offset(anchors, stride, ks=3, dilation=1):
337
- # currently support kernel_size=3 and dilation=1
338
- assert ks == 3 and dilation == 1
339
- pad = (ks - 1) // 2
340
- idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)
341
- yy, xx = torch.meshgrid(idx, idx) # return order matters
342
- xx = xx.reshape(-1)
343
- yy = yy.reshape(-1)
344
- w = (anchors[:, 2] - anchors[:, 0]) / stride
345
- h = (anchors[:, 3] - anchors[:, 1]) / stride
346
- w = w / (ks - 1) - dilation
347
- h = h / (ks - 1) - dilation
348
- offset_x = w[:, None] * xx # (NA, ks**2)
349
- offset_y = h[:, None] * yy # (NA, ks**2)
350
- return offset_x, offset_y
351
-
352
- def _ctr_offset(anchors, stride, featmap_size):
353
- feat_h, feat_w = featmap_size
354
- assert len(anchors) == feat_h * feat_w
355
-
356
- x = (anchors[:, 0] + anchors[:, 2]) * 0.5
357
- y = (anchors[:, 1] + anchors[:, 3]) * 0.5
358
- # compute centers on feature map
359
- x = x / stride
360
- y = y / stride
361
- # compute predefine centers
362
- xx = torch.arange(0, feat_w, device=anchors.device)
363
- yy = torch.arange(0, feat_h, device=anchors.device)
364
- yy, xx = torch.meshgrid(yy, xx)
365
- xx = xx.reshape(-1).type_as(x)
366
- yy = yy.reshape(-1).type_as(y)
367
-
368
- offset_x = x - xx # (NA, )
369
- offset_y = y - yy # (NA, )
370
- return offset_x, offset_y
371
-
372
- num_imgs = len(anchor_list)
373
- num_lvls = len(anchor_list[0])
374
- dtype = anchor_list[0][0].dtype
375
- device = anchor_list[0][0].device
376
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
377
-
378
- offset_list = []
379
- for i in range(num_imgs):
380
- mlvl_offset = []
381
- for lvl in range(num_lvls):
382
- c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],
383
- anchor_strides[lvl],
384
- featmap_sizes[lvl])
385
- s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],
386
- anchor_strides[lvl])
387
-
388
- # offset = ctr_offset + shape_offset
389
- offset_x = s_offset_x + c_offset_x[:, None]
390
- offset_y = s_offset_y + c_offset_y[:, None]
391
-
392
- # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)
393
- offset = torch.stack([offset_y, offset_x], dim=-1)
394
- offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2]
395
- mlvl_offset.append(offset)
396
- offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2]
397
- offset_list = images_to_levels(offset_list, num_level_anchors)
398
- return offset_list
399
-
400
- def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
401
- bbox_targets, bbox_weights, num_total_samples):
402
- """Loss function on single scale."""
403
- # classification loss
404
- if self.with_cls:
405
- labels = labels.reshape(-1)
406
- label_weights = label_weights.reshape(-1)
407
- cls_score = cls_score.permute(0, 2, 3,
408
- 1).reshape(-1, self.cls_out_channels)
409
- loss_cls = self.loss_cls(
410
- cls_score, labels, label_weights, avg_factor=num_total_samples)
411
- # regression loss
412
- bbox_targets = bbox_targets.reshape(-1, 4)
413
- bbox_weights = bbox_weights.reshape(-1, 4)
414
- bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
415
- if self.reg_decoded_bbox:
416
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
417
- # is applied directly on the decoded bounding boxes, it
418
- # decodes the already encoded coordinates to absolute format.
419
- anchors = anchors.reshape(-1, 4)
420
- bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
421
- loss_reg = self.loss_bbox(
422
- bbox_pred,
423
- bbox_targets,
424
- bbox_weights,
425
- avg_factor=num_total_samples)
426
- if self.with_cls:
427
- return loss_cls, loss_reg
428
- return None, loss_reg
429
-
430
- def loss(self,
431
- anchor_list,
432
- valid_flag_list,
433
- cls_scores,
434
- bbox_preds,
435
- gt_bboxes,
436
- img_metas,
437
- gt_bboxes_ignore=None):
438
- """Compute losses of the head.
439
-
440
- Args:
441
- anchor_list (list[list]): Multi level anchors of each image.
442
- cls_scores (list[Tensor]): Box scores for each scale level
443
- Has shape (N, num_anchors * num_classes, H, W)
444
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
445
- level with shape (N, num_anchors * 4, H, W)
446
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
447
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
448
- img_metas (list[dict]): Meta information of each image, e.g.,
449
- image size, scaling factor, etc.
450
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
451
- boxes can be ignored when computing the loss. Default: None
452
-
453
- Returns:
454
- dict[str, Tensor]: A dictionary of loss components.
455
- """
456
- featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
457
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
458
- cls_reg_targets = self.get_targets(
459
- anchor_list,
460
- valid_flag_list,
461
- gt_bboxes,
462
- img_metas,
463
- featmap_sizes,
464
- gt_bboxes_ignore=gt_bboxes_ignore,
465
- label_channels=label_channels)
466
- if cls_reg_targets is None:
467
- return None
468
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
469
- num_total_pos, num_total_neg) = cls_reg_targets
470
- if self.sampling:
471
- num_total_samples = num_total_pos + num_total_neg
472
- else:
473
- # 200 is hard-coded average factor,
474
- # which follows guided anchoring.
475
- num_total_samples = sum([label.numel()
476
- for label in labels_list]) / 200.0
477
-
478
- # change per image, per level anchor_list to per_level, per_image
479
- mlvl_anchor_list = list(zip(*anchor_list))
480
- # concat mlvl_anchor_list
481
- mlvl_anchor_list = [
482
- torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list
483
- ]
484
-
485
- losses = multi_apply(
486
- self.loss_single,
487
- cls_scores,
488
- bbox_preds,
489
- mlvl_anchor_list,
490
- labels_list,
491
- label_weights_list,
492
- bbox_targets_list,
493
- bbox_weights_list,
494
- num_total_samples=num_total_samples)
495
- if self.with_cls:
496
- return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])
497
- return dict(loss_rpn_reg=losses[1])
498
-
499
- def get_bboxes(self,
500
- anchor_list,
501
- cls_scores,
502
- bbox_preds,
503
- img_metas,
504
- cfg,
505
- rescale=False):
506
- """Get proposal predict."""
507
- assert len(cls_scores) == len(bbox_preds)
508
- num_levels = len(cls_scores)
509
-
510
- result_list = []
511
- for img_id in range(len(img_metas)):
512
- cls_score_list = [
513
- cls_scores[i][img_id].detach() for i in range(num_levels)
514
- ]
515
- bbox_pred_list = [
516
- bbox_preds[i][img_id].detach() for i in range(num_levels)
517
- ]
518
- img_shape = img_metas[img_id]['img_shape']
519
- scale_factor = img_metas[img_id]['scale_factor']
520
- proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
521
- anchor_list[img_id], img_shape,
522
- scale_factor, cfg, rescale)
523
- result_list.append(proposals)
524
- return result_list
525
-
526
- def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
527
- """Refine bboxes through stages."""
528
- num_levels = len(bbox_preds)
529
- new_anchor_list = []
530
- for img_id in range(len(img_metas)):
531
- mlvl_anchors = []
532
- for i in range(num_levels):
533
- bbox_pred = bbox_preds[i][img_id].detach()
534
- bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
535
- img_shape = img_metas[img_id]['img_shape']
536
- bboxes = self.bbox_coder.decode(anchor_list[img_id][i],
537
- bbox_pred, img_shape)
538
- mlvl_anchors.append(bboxes)
539
- new_anchor_list.append(mlvl_anchors)
540
- return new_anchor_list
541
-
542
- # TODO: temporary plan
543
- def _get_bboxes_single(self,
544
- cls_scores,
545
- bbox_preds,
546
- mlvl_anchors,
547
- img_shape,
548
- scale_factor,
549
- cfg,
550
- rescale=False):
551
- """Transform outputs for a single batch item into bbox predictions.
552
-
553
- Args:
554
- cls_scores (list[Tensor]): Box scores for each scale level
555
- Has shape (num_anchors * num_classes, H, W).
556
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
557
- level with shape (num_anchors * 4, H, W).
558
- mlvl_anchors (list[Tensor]): Box reference for each scale level
559
- with shape (num_total_anchors, 4).
560
- img_shape (tuple[int]): Shape of the input image,
561
- (height, width, 3).
562
- scale_factor (ndarray): Scale factor of the image arange as
563
- (w_scale, h_scale, w_scale, h_scale).
564
- cfg (mmcv.Config): Test / postprocessing configuration,
565
- if None, test_cfg would be used.
566
- rescale (bool): If True, return boxes in original image space.
567
-
568
- Returns:
569
- Tensor: Labeled boxes have the shape of (n,5), where the
570
- first 4 columns are bounding box positions
571
- (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
572
- between 0 and 1.
573
- """
574
- cfg = self.test_cfg if cfg is None else cfg
575
- cfg = copy.deepcopy(cfg)
576
- # bboxes from different level should be independent during NMS,
577
- # level_ids are used as labels for batched NMS to separate them
578
- level_ids = []
579
- mlvl_scores = []
580
- mlvl_bbox_preds = []
581
- mlvl_valid_anchors = []
582
- for idx in range(len(cls_scores)):
583
- rpn_cls_score = cls_scores[idx]
584
- rpn_bbox_pred = bbox_preds[idx]
585
- assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
586
- rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
587
- if self.use_sigmoid_cls:
588
- rpn_cls_score = rpn_cls_score.reshape(-1)
589
- scores = rpn_cls_score.sigmoid()
590
- else:
591
- rpn_cls_score = rpn_cls_score.reshape(-1, 2)
592
- # We set FG labels to [0, num_class-1] and BG label to
593
- # num_class in RPN head since mmdet v2.5, which is unified to
594
- # be consistent with other head since mmdet v2.0. In mmdet v2.0
595
- # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
596
- scores = rpn_cls_score.softmax(dim=1)[:, 0]
597
- rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
598
- anchors = mlvl_anchors[idx]
599
- if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
600
- # sort is faster than topk
601
- # _, topk_inds = scores.topk(cfg.nms_pre)
602
- if torch.onnx.is_in_onnx_export():
603
- # sort op will be converted to TopK in onnx
604
- # and k<=3480 in TensorRT
605
- _, topk_inds = scores.topk(cfg.nms_pre)
606
- scores = scores[topk_inds]
607
- else:
608
- ranked_scores, rank_inds = scores.sort(descending=True)
609
- topk_inds = rank_inds[:cfg.nms_pre]
610
- scores = ranked_scores[:cfg.nms_pre]
611
- rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
612
- anchors = anchors[topk_inds, :]
613
- mlvl_scores.append(scores)
614
- mlvl_bbox_preds.append(rpn_bbox_pred)
615
- mlvl_valid_anchors.append(anchors)
616
- level_ids.append(
617
- scores.new_full((scores.size(0), ), idx, dtype=torch.long))
618
-
619
- scores = torch.cat(mlvl_scores)
620
- anchors = torch.cat(mlvl_valid_anchors)
621
- rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
622
- proposals = self.bbox_coder.decode(
623
- anchors, rpn_bbox_pred, max_shape=img_shape)
624
- ids = torch.cat(level_ids)
625
-
626
- # Skip nonzero op while exporting to ONNX
627
- if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()):
628
- w = proposals[:, 2] - proposals[:, 0]
629
- h = proposals[:, 3] - proposals[:, 1]
630
- valid_inds = torch.nonzero(
631
- (w >= cfg.min_bbox_size)
632
- & (h >= cfg.min_bbox_size),
633
- as_tuple=False).squeeze()
634
- if valid_inds.sum().item() != len(proposals):
635
- proposals = proposals[valid_inds, :]
636
- scores = scores[valid_inds]
637
- ids = ids[valid_inds]
638
-
639
- # deprecate arguments warning
640
- if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
641
- warnings.warn(
642
- 'In rpn_proposal or test_cfg, '
643
- 'nms_thr has been moved to a dict named nms as '
644
- 'iou_threshold, max_num has been renamed as max_per_img, '
645
- 'name of original arguments and the way to specify '
646
- 'iou_threshold of NMS will be deprecated.')
647
- if 'nms' not in cfg:
648
- cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
649
- if 'max_num' in cfg:
650
- if 'max_per_img' in cfg:
651
- assert cfg.max_num == cfg.max_per_img, f'You ' \
652
- f'set max_num and ' \
653
- f'max_per_img at the same time, but get {cfg.max_num} ' \
654
- f'and {cfg.max_per_img} respectively' \
655
- 'Please delete max_num which will be deprecated.'
656
- else:
657
- cfg.max_per_img = cfg.max_num
658
- if 'nms_thr' in cfg:
659
- assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
660
- f' iou_threshold in nms and ' \
661
- f'nms_thr at the same time, but get' \
662
- f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
663
- f' respectively. Please delete the nms_thr ' \
664
- f'which will be deprecated.'
665
-
666
- dets, keep = batched_nms(proposals, scores, ids, cfg.nms)
667
- return dets[:cfg.max_per_img]
668
-
669
-
670
- @HEADS.register_module()
671
- class CascadeRPNHead(BaseDenseHead):
672
- """The CascadeRPNHead will predict more accurate region proposals, which is
673
- required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN
674
- consists of a sequence of RPNStage to progressively improve the accuracy of
675
- the detected proposals.
676
-
677
- More details can be found in ``https://arxiv.org/abs/1909.06720``.
678
-
679
- Args:
680
- num_stages (int): number of CascadeRPN stages.
681
- stages (list[dict]): list of configs to build the stages.
682
- train_cfg (list[dict]): list of configs at training time each stage.
683
- test_cfg (dict): config at testing time.
684
- """
685
-
686
- def __init__(self, num_stages, stages, train_cfg, test_cfg):
687
- super(CascadeRPNHead, self).__init__()
688
- assert num_stages == len(stages)
689
- self.num_stages = num_stages
690
- self.stages = nn.ModuleList()
691
- for i in range(len(stages)):
692
- train_cfg_i = train_cfg[i] if train_cfg is not None else None
693
- stages[i].update(train_cfg=train_cfg_i)
694
- stages[i].update(test_cfg=test_cfg)
695
- self.stages.append(build_head(stages[i]))
696
- self.train_cfg = train_cfg
697
- self.test_cfg = test_cfg
698
-
699
- def init_weights(self):
700
- """Init weight of CascadeRPN."""
701
- for i in range(self.num_stages):
702
- self.stages[i].init_weights()
703
-
704
- def loss(self):
705
- """loss() is implemented in StageCascadeRPNHead."""
706
- pass
707
-
708
- def get_bboxes(self):
709
- """get_bboxes() is implemented in StageCascadeRPNHead."""
710
- pass
711
-
712
- def forward_train(self,
713
- x,
714
- img_metas,
715
- gt_bboxes,
716
- gt_labels=None,
717
- gt_bboxes_ignore=None,
718
- proposal_cfg=None):
719
- """Forward train function."""
720
- assert gt_labels is None, 'RPN does not require gt_labels'
721
-
722
- featmap_sizes = [featmap.size()[-2:] for featmap in x]
723
- device = x[0].device
724
- anchor_list, valid_flag_list = self.stages[0].get_anchors(
725
- featmap_sizes, img_metas, device=device)
726
-
727
- losses = dict()
728
-
729
- for i in range(self.num_stages):
730
- stage = self.stages[i]
731
-
732
- if stage.adapt_cfg['type'] == 'offset':
733
- offset_list = stage.anchor_offset(anchor_list,
734
- stage.anchor_strides,
735
- featmap_sizes)
736
- else:
737
- offset_list = None
738
- x, cls_score, bbox_pred = stage(x, offset_list)
739
- rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
740
- bbox_pred, gt_bboxes, img_metas)
741
- stage_loss = stage.loss(*rpn_loss_inputs)
742
- for name, value in stage_loss.items():
743
- losses['s{}.{}'.format(i, name)] = value
744
-
745
- # refine boxes
746
- if i < self.num_stages - 1:
747
- anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
748
- img_metas)
749
- if proposal_cfg is None:
750
- return losses
751
- else:
752
- proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
753
- bbox_pred, img_metas,
754
- self.test_cfg)
755
- return losses, proposal_list
756
-
757
- def simple_test_rpn(self, x, img_metas):
758
- """Simple forward test function."""
759
- featmap_sizes = [featmap.size()[-2:] for featmap in x]
760
- device = x[0].device
761
- anchor_list, _ = self.stages[0].get_anchors(
762
- featmap_sizes, img_metas, device=device)
763
-
764
- for i in range(self.num_stages):
765
- stage = self.stages[i]
766
- if stage.adapt_cfg['type'] == 'offset':
767
- offset_list = stage.anchor_offset(anchor_list,
768
- stage.anchor_strides,
769
- featmap_sizes)
770
- else:
771
- offset_list = None
772
- x, cls_score, bbox_pred = stage(x, offset_list)
773
- if i < self.num_stages - 1:
774
- anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
775
- img_metas)
776
-
777
- proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
778
- bbox_pred, img_metas,
779
- self.test_cfg)
780
- return proposal_list
781
-
782
- def aug_test_rpn(self, x, img_metas):
783
- """Augmented forward test function."""
784
- raise NotImplementedError
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/drawings-to-human/static/index.html DELETED
@@ -1,209 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="utf-8" />
5
- <link rel="icon" href="/static/favicon.png" />
6
- <meta name="viewport" content="width=device-width, initial-scale=1" />
7
- <script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
8
- <meta http-equiv="content-security-policy" content="">
9
- <link rel="stylesheet" href="/static/_app/immutable/assets/pages/__layout.svelte-cc9dd261.css">
10
- <link rel="stylesheet" href="/static/_app/immutable/assets/pages/index.svelte-7bf249dc.css">
11
- <link rel="modulepreload" href="/static/_app/immutable/start-62e3dfe2.js">
12
- <link rel="modulepreload" href="/static/_app/immutable/chunks/index-bcf2726a.js">
13
- <link rel="modulepreload" href="/static/_app/immutable/chunks/paths-d3bcbd10.js">
14
- <link rel="modulepreload" href="/static/_app/immutable/pages/__layout.svelte-d07d8fed.js">
15
- <link rel="modulepreload" href="/static/_app/immutable/pages/index.svelte-b5d75a5f.js">
16
- </head>
17
- <body class="dark:bg-[rgb(11,15,25)] bg-white dark:text-white text-black">
18
-
19
-
20
-
21
- <div class="max-w-screen-md mx-auto px-3 py-5 relative z-0"><article class="prose dark:prose-invert"><h1>Drawings to Human</h1>
22
- <p>This is an unofficial drawing tool to explore the generative human generator <a href="https://github.com/yumingj/Text2Human" target="_blank"><span>Text2Human</span></a>. Please check all the model features on this
23
- <a href="https://huggingface.co/spaces/CVPR/Text2Human" target="_blank">Space</a>.
24
- </p>
25
- <small><h4 id="thanks-to">Thanks to</h4>
26
- <p>Authors: <a href="https://yumingj.github.io/" target="_blank">Yuming Jiang</a>,
27
- <a href="https://williamyang1991.github.io/" target="_blank">Shuai Yang</a>,
28
- <a href="http://haonanqiu.com/" target="_blank">Haonan Qiu</a>,
29
- <a href="https://wywu.github.io/" target="_blank">Wayne Wu</a>,
30
- <a href="https://www.mmlab-ntu.com/person/ccloy/" target="_blank">Chen Change Loy</a>
31
- and <a href="https://liuziwei7.github.io/" target="_blank">Ziwei Liu</a><br></p>
32
- <p><a href="https://huggingface.co/hysts" target="_blank">@hysts</a> for the original Space implementation
33
- </p></small>
34
- <details><summary class="cursor-pointer"><small>More</small></summary>
35
- <p>The backend is powered by a <a href="https://gradio.app/" target="_blank">Gradio</a>
36
- application running on
37
- <a href="https://huggingface.co/spaces/CVPR/Text2Human" target="_blank">Spaces</a>. You can
38
- also check the source code and clone it locally if you want:
39
- </p>
40
-
41
- <p><code class="block whitespace-pre overflow-x-scroll">git clone https://huggingface.co/spaces/CVPR/Text2Human
42
- </code></p></details></article>
43
- <form><h4 class="font-bold mt-6 mb-2 leading-6 my-3">Set the Brush Type</h4>
44
- <div class="colors svelte-1oy4poo" name="colors"><div class="snap-always snap-start"><input name="color" type="radio" id="color-0" value="0" class="svelte-1oy4poo">
45
- <label for="color-0" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(0,0,0)"></rect></svg>
46
- <span class="svelte-1oy4poo">background</span></label>
47
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-1" value="1" class="svelte-1oy4poo">
48
- <label for="color-1" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(255,140,0)"></rect></svg>
49
- <span class="svelte-1oy4poo">bag</span></label>
50
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-2" value="2" class="svelte-1oy4poo">
51
- <label for="color-2" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(255,255,0)"></rect></svg>
52
- <span class="svelte-1oy4poo">belt</span></label>
53
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-3" value="3" class="svelte-1oy4poo">
54
- <label for="color-3" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(255,250,205)"></rect></svg>
55
- <span class="svelte-1oy4poo">dress</span></label>
56
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-4" value="4" class="svelte-1oy4poo">
57
- <label for="color-4" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(130,165,180)"></rect></svg>
58
- <span class="svelte-1oy4poo">earrings</span></label>
59
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-5" value="5" class="svelte-1oy4poo">
60
- <label for="color-5" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(0,100,0)"></rect></svg>
61
- <span class="svelte-1oy4poo">eyeglass</span></label>
62
- </div><div class="snap-always snap-start"><input name="color" checked type="radio" id="color-6" value="6" class="svelte-1oy4poo">
63
- <label for="color-6" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(16,78,139)"></rect></svg>
64
- <span class="svelte-1oy4poo">face</span></label>
65
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-7" value="7" class="svelte-1oy4poo">
66
- <label for="color-7" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(245,222,179)"></rect></svg>
67
- <span class="svelte-1oy4poo">footwear</span></label>
68
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-8" value="8" class="svelte-1oy4poo">
69
- <label for="color-8" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(213,140,88)"></rect></svg>
70
- <span class="svelte-1oy4poo">gloves</span></label>
71
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-9" value="9" class="svelte-1oy4poo">
72
- <label for="color-9" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(255,0,0)"></rect></svg>
73
- <span class="svelte-1oy4poo">hair</span></label>
74
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-10" value="10" class="svelte-1oy4poo">
75
- <label for="color-10" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(127,255,212)"></rect></svg>
76
- <span class="svelte-1oy4poo">headwear</span></label>
77
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-11" value="11" class="svelte-1oy4poo">
78
- <label for="color-11" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(70,130,180)"></rect></svg>
79
- <span class="svelte-1oy4poo">leggings</span></label>
80
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-12" value="12" class="svelte-1oy4poo">
81
- <label for="color-12" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(90,140,90)"></rect></svg>
82
- <span class="svelte-1oy4poo">necklace</span></label>
83
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-13" value="13" class="svelte-1oy4poo">
84
- <label for="color-13" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(50,205,50)"></rect></svg>
85
- <span class="svelte-1oy4poo">neckwear</span></label>
86
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-14" value="14" class="svelte-1oy4poo">
87
- <label for="color-14" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(220,220,220)"></rect></svg>
88
- <span class="svelte-1oy4poo">outer</span></label>
89
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-15" value="15" class="svelte-1oy4poo">
90
- <label for="color-15" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(211,211,211)"></rect></svg>
91
- <span class="svelte-1oy4poo">pants</span></label>
92
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-16" value="16" class="svelte-1oy4poo">
93
- <label for="color-16" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(50,205,174)"></rect></svg>
94
- <span class="svelte-1oy4poo">ring</span></label>
95
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-17" value="17" class="svelte-1oy4poo">
96
- <label for="color-17" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(185,210,205)"></rect></svg>
97
- <span class="svelte-1oy4poo">rompers</span></label>
98
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-18" value="18" class="svelte-1oy4poo">
99
- <label for="color-18" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(144,238,144)"></rect></svg>
100
- <span class="svelte-1oy4poo">skin</span></label>
101
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-19" value="19" class="svelte-1oy4poo">
102
- <label for="color-19" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(250,235,215)"></rect></svg>
103
- <span class="svelte-1oy4poo">skirt</span></label>
104
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-20" value="20" class="svelte-1oy4poo">
105
- <label for="color-20" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(160,140,88)"></rect></svg>
106
- <span class="svelte-1oy4poo">socks</span></label>
107
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-21" value="21" class="svelte-1oy4poo">
108
- <label for="color-21" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(225,141,151)"></rect></svg>
109
- <span class="svelte-1oy4poo">tie</span></label>
110
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-22" value="22" class="svelte-1oy4poo">
111
- <label for="color-22" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(255,250,250)"></rect></svg>
112
- <span class="svelte-1oy4poo">top</span></label>
113
- </div><div class="snap-always snap-start"><input name="color" type="radio" id="color-23" value="23" class="svelte-1oy4poo">
114
- <label for="color-23" class="svelte-1oy4poo"><svg width="20" height="20" viewBox="0 0 20 20" class="svelte-1oy4poo"><rect x="0" y="0" width="20" height="20" fill="rgb(50,155,250)"></rect></svg>
115
- <span class="svelte-1oy4poo">wrist wearing</span></label>
116
- </div></div>
117
- <h4 class="font-bold mt-6 mb-2 my-6 leading-6">Set the Brush Size</h4>
118
- <div class="brush svelte-1oy4poo"><input value="10" min="1" max="50" step="1" name="brush" type="range">
119
- <label class="pl-2 svelte-1oy4poo" for="brush">40</label></div>
120
- </form>
121
- <div><h4 class="font-bold mt-6 mb-2 my-6 leading-6">Select a Template</h4>
122
- <form class="svelte-1gwcbp"><div class="samples svelte-1gwcbp"><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-0" value="0" class="svelte-1gwcbp">
123
- <label for="sample-0" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Skirts-id_00004406-02_7_additional_segm.png" alt="/samples/WOMEN-Skirts-id_00004406-02_7_additional_segm.png" class="svelte-1gwcbp"></label>
124
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-1" value="1" class="svelte-1gwcbp">
125
- <label for="sample-1" class="svelte-1gwcbp"><img src="/static/samples/MEN-Pants-id_00002565-02_1_front_segm.png" alt="/samples/MEN-Pants-id_00002565-02_1_front_segm.png" class="svelte-1gwcbp"></label>
126
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-2" value="2" class="svelte-1gwcbp">
127
- <label for="sample-2" class="svelte-1gwcbp"><img src="/static/samples/MEN-Pants-id_00005213-02_4_full_segm.png" alt="/samples/MEN-Pants-id_00005213-02_4_full_segm.png" class="svelte-1gwcbp"></label>
128
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-3" value="3" class="svelte-1gwcbp">
129
- <label for="sample-3" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Blouses_Shirts-id_00002356-02_4_full_segm.png" alt="/samples/WOMEN-Blouses_Shirts-id_00002356-02_4_full_segm.png" class="svelte-1gwcbp"></label>
130
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-4" value="4" class="svelte-1gwcbp">
131
- <label for="sample-4" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Blouses_Shirts-id_00004090-03_7_additional_segm.png" alt="/samples/WOMEN-Blouses_Shirts-id_00004090-03_7_additional_segm.png" class="svelte-1gwcbp"></label>
132
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-5" value="5" class="svelte-1gwcbp">
133
- <label for="sample-5" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Cardigans-id_00000853-01_2_side_segm.png" alt="/samples/WOMEN-Cardigans-id_00000853-01_2_side_segm.png" class="svelte-1gwcbp"></label>
134
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-6" value="6" class="svelte-1gwcbp">
135
- <label for="sample-6" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Cardigans-id_00000899-02_1_front_segm.png" alt="/samples/WOMEN-Cardigans-id_00000899-02_1_front_segm.png" class="svelte-1gwcbp"></label>
136
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-7" value="7" class="svelte-1gwcbp">
137
- <label for="sample-7" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Cardigans-id_00006462-02_7_additional_segm.png" alt="/samples/WOMEN-Cardigans-id_00006462-02_7_additional_segm.png" class="svelte-1gwcbp"></label>
138
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-8" value="8" class="svelte-1gwcbp">
139
- <label for="sample-8" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Dresses-id_00000021-05_1_front_segm.png" alt="/samples/WOMEN-Dresses-id_00000021-05_1_front_segm.png" class="svelte-1gwcbp"></label>
140
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-9" value="9" class="svelte-1gwcbp">
141
- <label for="sample-9" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Dresses-id_00002430-04_1_front_segm.png" alt="/samples/WOMEN-Dresses-id_00002430-04_1_front_segm.png" class="svelte-1gwcbp"></label>
142
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-10" value="10" class="svelte-1gwcbp">
143
- <label for="sample-10" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Dresses-id_00002966-01_7_additional_segm.png" alt="/samples/WOMEN-Dresses-id_00002966-01_7_additional_segm.png" class="svelte-1gwcbp"></label>
144
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-11" value="11" class="svelte-1gwcbp">
145
- <label for="sample-11" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Dresses-id_00007332-01_3_back_segm.png" alt="/samples/WOMEN-Dresses-id_00007332-01_3_back_segm.png" class="svelte-1gwcbp"></label>
146
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-12" value="12" class="svelte-1gwcbp">
147
- <label for="sample-12" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Graphic_Tees-id_00007242-01_4_full_segm.png" alt="/samples/WOMEN-Graphic_Tees-id_00007242-01_4_full_segm.png" class="svelte-1gwcbp"></label>
148
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-13" value="13" class="svelte-1gwcbp">
149
- <label for="sample-13" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Jackets_Coats-id_00005263-06_1_front_segm.png" alt="/samples/WOMEN-Jackets_Coats-id_00005263-06_1_front_segm.png" class="svelte-1gwcbp"></label>
150
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-14" value="14" class="svelte-1gwcbp">
151
- <label for="sample-14" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Jackets_Coats-id_00006296-05_7_additional_segm.png" alt="/samples/WOMEN-Jackets_Coats-id_00006296-05_7_additional_segm.png" class="svelte-1gwcbp"></label>
152
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-15" value="15" class="svelte-1gwcbp">
153
- <label for="sample-15" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Rompers_Jumpsuits-id_00004575-02_1_front_segm.png" alt="/samples/WOMEN-Rompers_Jumpsuits-id_00004575-02_1_front_segm.png" class="svelte-1gwcbp"></label>
154
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-16" value="16" class="svelte-1gwcbp">
155
- <label for="sample-16" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Sweaters-id_00004667-01_4_full_segm.png" alt="/samples/WOMEN-Sweaters-id_00004667-01_4_full_segm.png" class="svelte-1gwcbp"></label>
156
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-17" value="17" class="svelte-1gwcbp">
157
- <label for="sample-17" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Tees_Tanks-id_00001620-02_4_full_segm.png" alt="/samples/WOMEN-Tees_Tanks-id_00001620-02_4_full_segm.png" class="svelte-1gwcbp"></label>
158
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-18" value="18" class="svelte-1gwcbp">
159
- <label for="sample-18" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Tees_Tanks-id_00005288-01_2_side_segm.png" alt="/samples/WOMEN-Tees_Tanks-id_00005288-01_2_side_segm.png" class="svelte-1gwcbp"></label>
160
- </div><div class="snap-always snap-start"><input type="radio" name="samples" id="sample-19" value="19" class="svelte-1gwcbp">
161
- <label for="sample-19" class="svelte-1gwcbp"><img src="/static/samples/WOMEN-Tees_Tanks-id_00006566-04_4_full_segm.png" alt="/samples/WOMEN-Tees_Tanks-id_00006566-04_4_full_segm.png" class="svelte-1gwcbp"></label>
162
- </div></div></form>
163
- </div>
164
- <div class="drawings py-3 -mx-3 svelte-237ry5"><div><div class="relative overflow-clip"><canvas class="canvas svelte-1k5plc8" width="256" height="512"></canvas>
165
- <canvas class="brush svelte-1k5plc8" width="10" height="10"></canvas>
166
- <span class="label svelte-1k5plc8">face</span>
167
- <button class="absolute bottom-0 left-0 p-3" disabled><svg xmlns="http://www.w3.org/2000/svg" width="20" viewBox="0 0 512 512" class=""><path fill="white" stroke="black" stroke-width="30" d="M480 256c0 123.4-100.5 223.9-223.9 223.9c-48.84 0-95.17-15.58-134.2-44.86c-14.12-10.59-16.97-30.66-6.375-44.81c10.59-14.12 30.62-16.94 44.81-6.375c27.84 20.91 61 31.94 95.88 31.94C344.3 415.8 416 344.1 416 256s-71.69-159.8-159.8-159.8c-37.46 0-73.09 13.49-101.3 36.64l45.12 45.14c17.01 17.02 4.955 46.1-19.1 46.1H35.17C24.58 224.1 16 215.5 16 204.9V59.04c0-24.04 29.07-36.08 46.07-19.07l47.6 47.63C149.9 52.71 201.5 32.11 256.1 32.11C379.5 32.11 480 132.6 480 256z"></path></svg></button></div>
168
- </div>
169
- <div class="relative overflow-clip flex flex-col justify-center items-center w-full h-full">
170
- </div>
171
-
172
- </div>
173
- <button class="svelte-237ry5">Generate Human
174
- </button>
175
- <button disabled class="svelte-237ry5">Save Result
176
- </button>
177
- <form><h4 class="font-bold mt-6 mb-2 my-6 leading-6">Texture Description</h4>
178
- <div class="sections svelte-uoay71"><select name="texture0" class="svelte-uoay71"><option disabled selected value="upper clothing texture">upper clothing texture</option><option value="pure color">pure color</option>`<option value="stripe/spline">stripe/spline</option>`<option value="plaid/lattice">plaid/lattice</option>`<option value="floral">floral</option>`<option value="denim">denim</option>`</select><select name="texture1" class="svelte-uoay71"><option disabled selected value="lower clothing texture">lower clothing texture</option><option value="pure color">pure color</option>`<option value="stripe/spline">stripe/spline</option>`<option value="plaid/lattice">plaid/lattice</option>`<option value="floral">floral</option>`<option value="denim">denim</option>`</select><select name="texture2" class="svelte-uoay71"><option disabled selected value="outer clothing texture">outer clothing texture</option><option value="pure color">pure color</option>`<option value="stripe/spline">stripe/spline</option>`<option value="plaid/lattice">plaid/lattice</option>`<option value="floral">floral</option>`<option value="denim">denim</option>`</select></div>
179
- <h4 class="font-bold mt-6 mb-2 my-6 leading-6">Random Seed</h4>
180
- <input type="Number" name="seed" placeholder="Integer Seed" class="svelte-uoay71" value="861014016">
181
- <button class="svelte-uoay71">Random
182
- </button>
183
- <h4 class="font-bold mt-6 mb-2 my-6 leading-6">Sample Steps</h4>
184
- <div class="flex"><input type="range" name="steps" min="10" max="300" step="1" class="svelte-uoay71" value="10">
185
- <label class="pl-2 svelte-uoay71" for="steps">10</label></div>
186
- </form>
187
- </div>
188
-
189
-
190
- <script type="module" data-sveltekit-hydrate="1lpy11h">
191
- import { start } from "/static/_app/immutable/start-62e3dfe2.js";
192
- start({
193
- target: document.querySelector('[data-sveltekit-hydrate="1lpy11h"]').parentNode,
194
- paths: {"base":"/static","assets":"/static"},
195
- session: {},
196
- route: true,
197
- spa: false,
198
- trailing_slash: "never",
199
- hydrate: {
200
- status: 200,
201
- error: null,
202
- nodes: [0, 2],
203
- params: {},
204
- routeId: ""
205
- }
206
- });
207
- </script>
208
- </body>
209
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/evaluation/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
3
- from .coco_evaluation import COCOEvaluator
4
- from .rotated_coco_evaluation import RotatedCOCOEvaluator
5
- from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
6
- from .lvis_evaluation import LVISEvaluator
7
- from .panoptic_evaluation import COCOPanopticEvaluator
8
- from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
9
- from .sem_seg_evaluation import SemSegEvaluator
10
- from .testing import print_csv_format, verify_results
11
- from .flickr30k_evaluation import FLICKR30KEvaluator
12
-
13
- __all__ = [k for k in globals().keys() if not k.startswith("_")]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/utils/metrics.py DELETED
@@ -1,219 +0,0 @@
1
- import numpy as np
2
- from skimage import measure
3
- from sklearn.metrics import auc
4
- from sklearn.metrics import precision_recall_curve
5
- from sklearn.metrics import roc_auc_score
6
- from sklearn.metrics import roc_curve
7
-
8
- def calculate_max_f1(gt, scores):
9
- precision, recall, thresholds = precision_recall_curve(gt, scores)
10
- a = 2 * precision * recall
11
- b = precision + recall
12
- f1s = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
13
- index = np.argmax(f1s)
14
- max_f1 = f1s[index]
15
- threshold = thresholds[index]
16
- return max_f1, threshold
17
-
18
- def metric_cal(scores, gt_list, gt_mask_list, cal_pro=False):
19
- # calculate image-level ROC AUC score
20
- img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
21
- gt_list = np.asarray(gt_list, dtype=int)
22
- fpr, tpr, _ = roc_curve(gt_list, img_scores)
23
- img_roc_auc = roc_auc_score(gt_list, img_scores)
24
- # print('INFO: image ROCAUC: %.3f' % (img_roc_auc))
25
-
26
- img_f1, img_threshold = calculate_max_f1(gt_list, img_scores)
27
-
28
- gt_mask = np.asarray(gt_mask_list, dtype=int)
29
- pxl_f1, pxl_threshold = calculate_max_f1(gt_mask.flatten(), scores.flatten())
30
-
31
- # calculate per-pixel level ROCAUC
32
- fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
33
- per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
34
-
35
-
36
- # calculate max-f1 region
37
- if cal_pro:
38
- # pro_auc_score = cal_pro_metric(gt_mask_list, scores, fpr_thresh=0.3)
39
- # calculate max-f1 region
40
- max_f1_region = calculate_max_f1_region(gt_mask_list, scores)
41
-
42
- else:
43
- # pro_auc_score = 0
44
- # calculate max-f1 region
45
- max_f1_region = 0
46
-
47
- result_dict = {'i_roc': img_roc_auc * 100, 'p_roc': per_pixel_rocauc * 100,
48
- 'i_f1': img_f1 * 100, 'i_thresh': img_threshold, 'p_f1': pxl_f1 * 100, 'p_thresh': pxl_threshold, 'r_f1': max_f1_region * 100}
49
-
50
- return result_dict
51
-
52
-
53
- def rescale(x):
54
- return (x - x.min()) / (x.max() - x.min())
55
-
56
-
57
- def cal_pro_metric(labeled_imgs, score_imgs, fpr_thresh=0.3, max_steps=200):
58
- labeled_imgs = np.array(labeled_imgs)
59
- labeled_imgs[labeled_imgs <= 0.45] = 0
60
- labeled_imgs[labeled_imgs > 0.45] = 1
61
- labeled_imgs = labeled_imgs.astype(np.bool)
62
-
63
- max_th = score_imgs.max()
64
- min_th = score_imgs.min()
65
- delta = (max_th - min_th) / max_steps
66
-
67
- ious_mean = []
68
- ious_std = []
69
- pros_mean = []
70
- pros_std = []
71
- threds = []
72
- fprs = []
73
- binary_score_maps = np.zeros_like(score_imgs, dtype=bool)
74
- for step in range(max_steps):
75
- thred = max_th - step * delta
76
- # segmentation
77
- binary_score_maps[score_imgs <= thred] = 0
78
- binary_score_maps[score_imgs > thred] = 1
79
-
80
- pro = [] # per region overlap
81
- iou = [] # per image iou
82
- # pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region
83
- # iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map
84
- for i in range(len(binary_score_maps)): # for i th image
85
- # pro (per region level)
86
- label_map = measure.label(labeled_imgs[i], connectivity=2)
87
- props = measure.regionprops(label_map)
88
- for prop in props:
89
- x_min, y_min, x_max, y_max = prop.bbox
90
- cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max]
91
- # cropped_mask = masks[i][x_min:x_max, y_min:y_max]
92
- cropped_mask = prop.filled_image # corrected!
93
- intersection = np.logical_and(cropped_pred_label, cropped_mask).astype(np.float32).sum()
94
- pro.append(intersection / prop.area)
95
- # iou (per image level)
96
- intersection = np.logical_and(binary_score_maps[i], labeled_imgs[i]).astype(np.float32).sum()
97
- union = np.logical_or(binary_score_maps[i], labeled_imgs[i]).astype(np.float32).sum()
98
- if labeled_imgs[i].any() > 0: # when the gt have no anomaly pixels, skip it
99
- iou.append(intersection / union)
100
- # against steps and average metrics on the testing data
101
- ious_mean.append(np.array(iou).mean())
102
- # print("per image mean iou:", np.array(iou).mean())
103
- ious_std.append(np.array(iou).std())
104
- pros_mean.append(np.array(pro).mean())
105
- pros_std.append(np.array(pro).std())
106
- # fpr for pro-auc
107
- masks_neg = ~labeled_imgs
108
- fpr = np.logical_and(masks_neg, binary_score_maps).sum() / masks_neg.sum()
109
- fprs.append(fpr)
110
- threds.append(thred)
111
-
112
- # as array
113
- threds = np.array(threds)
114
- pros_mean = np.array(pros_mean)
115
- pros_std = np.array(pros_std)
116
- fprs = np.array(fprs)
117
-
118
- # default 30% fpr vs pro, pro_auc
119
- idx = fprs <= fpr_thresh # find the indexs of fprs that is less than expect_fpr (default 0.3)
120
- fprs_selected = fprs[idx]
121
- fprs_selected = rescale(fprs_selected) # rescale fpr [0,0.3] -> [0, 1]
122
- pros_mean_selected = pros_mean[idx]
123
- pro_auc_score = auc(fprs_selected, pros_mean_selected)
124
- # print("pro auc ({}% FPR):".format(int(expect_fpr * 100)), pro_auc_score)
125
- return pro_auc_score
126
-
127
- def calculate_max_f1_region(labeled_imgs, score_imgs, pro_thresh=0.6, max_steps=200):
128
- labeled_imgs = np.array(labeled_imgs)
129
- # labeled_imgs[labeled_imgs <= 0.1] = 0
130
- # labeled_imgs[labeled_imgs > 0.1] = 1
131
- labeled_imgs = labeled_imgs.astype(bool)
132
-
133
- max_th = score_imgs.max()
134
- min_th = score_imgs.min()
135
- delta = (max_th - min_th) / max_steps
136
-
137
- f1_list = []
138
- recall_list = []
139
- precision_list = []
140
-
141
- binary_score_maps = np.zeros_like(score_imgs, dtype=bool)
142
- for step in range(max_steps):
143
- thred = max_th - step * delta
144
- # segmentation
145
- binary_score_maps[score_imgs <= thred] = 0
146
- binary_score_maps[score_imgs > thred] = 1
147
-
148
- pro = [] # per region overlap
149
-
150
- predict_region_number = 0
151
- gt_region_number = 0
152
-
153
- # pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region
154
- # iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map
155
- for i in range(len(binary_score_maps)): # for i th image
156
- # pro (per region level)
157
- label_map = measure.label(labeled_imgs[i], connectivity=2)
158
- props = measure.regionprops(label_map)
159
-
160
- score_map = measure.label(binary_score_maps[i], connectivity=2)
161
- score_props = measure.regionprops(score_map)
162
-
163
- predict_region_number += len(score_props)
164
- gt_region_number += len(props)
165
-
166
- # if len(score_props) == 0 or len(props) == 0:
167
- # pro.append(0)
168
- # continue
169
-
170
- for score_prop in score_props:
171
- x_min_0, y_min_0, x_max_0, y_max_0 = score_prop.bbox
172
- cur_pros = [0]
173
- for prop in props:
174
- x_min_1, y_min_1, x_max_1, y_max_1 = prop.bbox
175
-
176
- x_min = min(x_min_0, x_min_1)
177
- y_min = min(y_min_0, y_min_1)
178
- x_max = max(x_max_0, x_max_1)
179
- y_max = max(y_max_0, y_max_1)
180
-
181
- cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max]
182
- cropped_gt_label = labeled_imgs[i][x_min:x_max, y_min:y_max]
183
-
184
- # cropped_mask = masks[i][x_min:x_max, y_min:y_max]
185
- # cropped_mask = prop.filled_image # corrected!
186
- intersection = np.logical_and(cropped_pred_label, cropped_gt_label).astype(np.float32).sum()
187
- union = np.logical_or(cropped_pred_label, cropped_gt_label).astype(np.float32).sum()
188
- cur_pros.append(intersection / union)
189
-
190
- pro.append(max(cur_pros))
191
-
192
- pro = np.array(pro)
193
-
194
- if gt_region_number == 0 or predict_region_number == 0:
195
- print(f'gt_number: {gt_region_number}, pred_number: {predict_region_number}')
196
- recall = 0
197
- precision = 0
198
- f1 = 0
199
- else:
200
- recall = np.array(pro >= pro_thresh).astype(np.float32).sum() / gt_region_number
201
- precision = np.array(pro >= pro_thresh).astype(np.float32).sum() / predict_region_number
202
-
203
- if recall == 0 or precision == 0:
204
- f1 = 0
205
- else:
206
- f1 = 2 * recall * precision / (recall + precision)
207
-
208
-
209
- f1_list.append(f1)
210
- recall_list.append(recall)
211
- precision_list.append(precision)
212
-
213
- # as array
214
- f1_list = np.array(f1_list)
215
- max_f1 = f1_list.max()
216
- cor_recall = recall_list[f1_list.argmax()]
217
- cor_precision = precision_list[f1_list.argmax()]
218
- print(f'cor recall: {cor_recall}, cor precision: {cor_precision}')
219
- return max_f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlDennis/HYTTS/text/mandarin.py DELETED
@@ -1,170 +0,0 @@
1
- import os
2
- import re
3
- import sys
4
-
5
- import jieba
6
- import cn2an
7
- import logging
8
- from pypinyin import lazy_pinyin, BOPOMOFO
9
-
10
- logging.getLogger('jieba').setLevel(logging.WARNING)
11
-
12
-
13
- # List of (Latin alphabet, bopomofo) pairs:
14
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
15
- ('a', 'ㄟˉ'),
16
- ('b', 'ㄅㄧˋ'),
17
- ('c', 'ㄙㄧˉ'),
18
- ('d', 'ㄉㄧˋ'),
19
- ('e', 'ㄧˋ'),
20
- ('f', 'ㄝˊㄈㄨˋ'),
21
- ('g', 'ㄐㄧˋ'),
22
- ('h', 'ㄝˇㄑㄩˋ'),
23
- ('i', 'ㄞˋ'),
24
- ('j', 'ㄐㄟˋ'),
25
- ('k', 'ㄎㄟˋ'),
26
- ('l', 'ㄝˊㄛˋ'),
27
- ('m', 'ㄝˊㄇㄨˋ'),
28
- ('n', 'ㄣˉ'),
29
- ('o', 'ㄡˉ'),
30
- ('p', 'ㄆㄧˉ'),
31
- ('q', 'ㄎㄧㄡˉ'),
32
- ('r', 'ㄚˋ'),
33
- ('s', 'ㄝˊㄙˋ'),
34
- ('t', 'ㄊㄧˋ'),
35
- ('u', 'ㄧㄡˉ'),
36
- ('v', 'ㄨㄧˉ'),
37
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
38
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
39
- ('y', 'ㄨㄞˋ'),
40
- ('z', 'ㄗㄟˋ')
41
- ]]
42
-
43
- # List of (bopomofo, romaji) pairs:
44
- _bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
45
- ('ㄅㄛ', 'p⁼wo'),
46
- ('ㄆㄛ', 'pʰwo'),
47
- ('ㄇㄛ', 'mwo'),
48
- ('ㄈㄛ', 'fwo'),
49
- ('ㄅ', 'p⁼'),
50
- ('ㄆ', 'pʰ'),
51
- ('ㄇ', 'm'),
52
- ('ㄈ', 'f'),
53
- ('ㄉ', 't⁼'),
54
- ('ㄊ', 'tʰ'),
55
- ('ㄋ', 'n'),
56
- ('ㄌ', 'l'),
57
- ('ㄍ', 'k⁼'),
58
- ('ㄎ', 'kʰ'),
59
- ('ㄏ', 'h'),
60
- ('ㄐ', 'ʧ⁼'),
61
- ('ㄑ', 'ʧʰ'),
62
- ('ㄒ', 'ʃ'),
63
- ('ㄓ', 'ʦ`⁼'),
64
- ('ㄔ', 'ʦ`ʰ'),
65
- ('ㄕ', 's`'),
66
- ('ㄖ', 'ɹ`'),
67
- ('ㄗ', 'ʦ⁼'),
68
- ('ㄘ', 'ʦʰ'),
69
- ('ㄙ', 's'),
70
- ('ㄚ', 'a'),
71
- ('ㄛ', 'o'),
72
- ('ㄜ', 'ə'),
73
- ('ㄝ', 'e'),
74
- ('ㄞ', 'ai'),
75
- ('ㄟ', 'ei'),
76
- ('ㄠ', 'au'),
77
- ('ㄡ', 'ou'),
78
- ('ㄧㄢ', 'yeNN'),
79
- ('ㄢ', 'aNN'),
80
- ('ㄧㄣ', 'iNN'),
81
- ('ㄣ', 'əNN'),
82
- ('ㄤ', 'aNg'),
83
- ('ㄧㄥ', 'iNg'),
84
- ('ㄨㄥ', 'uNg'),
85
- ('ㄩㄥ', 'yuNg'),
86
- ('ㄥ', 'əNg'),
87
- ('ㄦ', 'əɻ'),
88
- ('ㄧ', 'i'),
89
- ('ㄨ', 'u'),
90
- ('ㄩ', 'ɥ'),
91
- ('ˉ', '→'),
92
- ('ˊ', '↑'),
93
- ('ˇ', '↓↑'),
94
- ('ˋ', '↓'),
95
- ('˙', ''),
96
- (',', ','),
97
- ('。', '.'),
98
- ('!', '!'),
99
- ('?', '?'),
100
- ('—', '-')
101
- ]]
102
-
103
- # List of (romaji, ipa) pairs:
104
- _romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
105
- ('ʃy', 'ʃ'),
106
- ('ʧʰy', 'ʧʰ'),
107
- ('ʧ⁼y', 'ʧ⁼'),
108
- ('NN', 'n'),
109
- ('Ng', 'ŋ'),
110
- ('y', 'j'),
111
- ('h', 'x')
112
- ]]
113
-
114
-
115
- def number_to_chinese(text):
116
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
117
- for number in numbers:
118
- text = text.replace(number, cn2an.an2cn(number), 1)
119
- return text
120
-
121
-
122
- def chinese_to_bopomofo(text):
123
- text = text.replace('、', ',').replace(';', ',').replace(':', ',')
124
- words = jieba.lcut(text, cut_all=False)
125
- text = ''
126
- for word in words:
127
- bopomofos = lazy_pinyin(word, BOPOMOFO)
128
- if not re.search('[\u4e00-\u9fff]', word):
129
- text += word
130
- continue
131
- for i in range(len(bopomofos)):
132
- if re.match('[\u3105-\u3129]', bopomofos[i][-1]):
133
- bopomofos[i] += 'ˉ'
134
- if text != '':
135
- text += ' '
136
- text += ''.join(bopomofos)
137
- return text
138
-
139
-
140
- def latin_to_bopomofo(text):
141
- for regex, replacement in _latin_to_bopomofo:
142
- text = re.sub(regex, replacement, text)
143
- return text
144
-
145
-
146
- def bopomofo_to_romaji(text):
147
- for regex, replacement in _bopomofo_to_romaji:
148
- text = re.sub(regex, replacement, text)
149
- return text
150
-
151
-
152
- def chinese_to_romaji(text):
153
- text = number_to_chinese(text)
154
- text = chinese_to_bopomofo(text)
155
- text = latin_to_bopomofo(text)
156
- text = bopomofo_to_romaji(text)
157
- text = re.sub('i[aoe]', lambda x: 'y' + x.group(0)[1:], text)
158
- text = re.sub('u[aoəe]', lambda x: 'w' + x.group(0)[1:], text)
159
- text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', lambda x: x.group(1) +
160
- 'ɹ`' + x.group(2), text).replace('ɻ', 'ɹ`')
161
- text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)',
162
- lambda x: x.group(1) + 'ɹ' + x.group(2), text)
163
- return text
164
-
165
-
166
- def chinese_to_lazy_ipa(text):
167
- text = chinese_to_romaji(text)
168
- for regex, replacement in _romaji_to_ipa:
169
- text = re.sub(regex, replacement, text)
170
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/beat_head/__init__.py DELETED
@@ -1,47 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL.Image import Image as IMG
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.exception import TextOverLength
9
- from meme_generator.utils import save_gif
10
-
11
- img_dir = Path(__file__).parent / "images"
12
-
13
-
14
- def beat_head(images: List[BuildImage], texts: List[str], args):
15
- text = texts[0] if texts else "怎么说话的你"
16
- img = images[0].convert("RGBA")
17
- locs = [(160, 121, 76, 76), (172, 124, 69, 69), (208, 166, 52, 52)]
18
- frames: List[IMG] = []
19
- for i in range(3):
20
- x, y, w, h = locs[i]
21
- head = img.resize((w, h), keep_ratio=True).circle()
22
- frame = BuildImage.open(img_dir / f"{i}.png")
23
- frame.paste(head, (x, y), below=True)
24
- try:
25
- frame.draw_text(
26
- (175, 28, 316, 82),
27
- text,
28
- max_fontsize=50,
29
- min_fontsize=10,
30
- allow_wrap=True,
31
- )
32
- except ValueError:
33
- raise TextOverLength(text)
34
-
35
- frames.append(frame.image)
36
- return save_gif(frames, 0.05)
37
-
38
-
39
- add_meme(
40
- "beat_head",
41
- beat_head,
42
- min_images=1,
43
- max_images=1,
44
- min_texts=0,
45
- max_texts=1,
46
- keywords=["拍头"],
47
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/高级功能函数模板.py DELETED
@@ -1,29 +0,0 @@
1
- from toolbox import CatchException, update_ui
2
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
- import datetime
4
- @CatchException
5
- def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
6
- """
7
- txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
8
- llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
9
- plugin_kwargs 插件模型的参数,暂时没有用武之地
10
- chatbot 聊天显示框的句柄,用于显示给用户
11
- history 聊天历史,前情提要
12
- system_prompt 给gpt的静默提醒
13
- web_port 当前软件运行的端口号
14
- """
15
- history = [] # 清空历史,以免输入溢出
16
- chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!"))
17
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
18
- for i in range(5):
19
- currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
20
- currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
21
- i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
22
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
23
- inputs=i_say, inputs_show_user=i_say,
24
- llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
25
- sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。"
26
- )
27
- chatbot[-1] = (i_say, gpt_say)
28
- history.append(i_say);history.append(gpt_say)
29
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CosmoAI/ChitChat/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ChitChat
3
- emoji: 🔥
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/collect_env.py DELETED
@@ -1,14 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import PIL
3
-
4
- from torch.utils.collect_env import get_pretty_env_info
5
-
6
-
7
- def get_pil_version():
8
- return "\n Pillow ({})".format(PIL.__version__)
9
-
10
-
11
- def collect_env_info():
12
- env_str = get_pretty_env_info()
13
- env_str += get_pil_version()
14
- return env_str