parquet-converter commited on
Commit
b4815c2
·
1 Parent(s): 8e84f43

Update parquet files (step 17 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/theb/README.md +0 -14
  2. spaces/17TheWord/RealESRGAN/tests/test_dataset.py +0 -151
  3. spaces/17TheWord/vits-models/modules.py +0 -388
  4. spaces/1gistliPinn/ChatGPT4/Examples/BMW Coding E-SYS V.3.24.3 Plken And PIN Utorrent LINK.md +0 -7
  5. spaces/1line/AutoGPT/CODE_OF_CONDUCT.md +0 -40
  6. spaces/1line/AutoGPT/main.py +0 -1
  7. spaces/1phancelerku/anime-remove-background/Download APK for GTA Vice City and Experience the 80s on Your Phone.md +0 -211
  8. spaces/1phancelerku/anime-remove-background/Download My Talking Tom Mod Apk Versi Lama untuk Android dan iOS.md +0 -104
  9. spaces/44ov41za8i/FreeVC/speaker_encoder/params_data.py +0 -29
  10. spaces/52Hz/SRMNet_AWGN_denoising/main_test_SRMNet.py +0 -97
  11. spaces/AI-Dashboards/Graph.Visualization.Plotly.Sunbursts.Treemaps.WebGL/README.md +0 -12
  12. spaces/AIConsultant/MusicGen/audiocraft/data/zip.py +0 -76
  13. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/loss.py +0 -307
  14. spaces/AISloth/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline/README.md +0 -14
  15. spaces/Abhiboken12/travelling_ai/app.py +0 -34
  16. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Theb.py +0 -28
  17. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/random.py +0 -21
  18. spaces/AlekseyCalvin/dreambooth-training3/README.md +0 -14
  19. spaces/AlexWang/lama/saicinpainting/training/modules/spatial_transform.py +0 -49
  20. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/解析项目源代码.py +0 -266
  21. spaces/Amrrs/fashion-aggregator-duplicated/README.md +0 -13
  22. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/style_dataset.py +0 -18
  23. spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/README.md +0 -39
  24. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py +0 -2
  25. spaces/Andy1621/uniformer_image_segmentation/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py +0 -9
  26. spaces/Anitha0531/SpeechtoText/README.md +0 -13
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/__init__.py +0 -41
  28. spaces/Ariharasudhan/YoloV5/README.md +0 -13
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/wheel.py +0 -1082
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/abc.py +0 -137
  31. spaces/Avinash-12035/MyGenAIChatBot/README.md +0 -12
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/config.py +0 -87
  33. spaces/BAAI/vid2vid-zero/app.py +0 -72
  34. spaces/BaitMan/abroader-otters/greeting.md +0 -6
  35. spaces/Banbri/zcvzcv/src/components/ui/command.tsx +0 -155
  36. spaces/Bart92/RVC_HF/diffq/diffq.py +0 -286
  37. spaces/Bart92/RVC_HF/infer/lib/rmvpe.py +0 -717
  38. spaces/Benson/text-generation/Examples/Descargar Fid Q Ielewe Mitaa Mp3.md +0 -49
  39. spaces/Benson/text-generation/Examples/Descargar Gratis Bitcoin Bot.md +0 -77
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/args.py +0 -648
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/tasks.py +0 -387
  42. spaces/Billius/runwayml-stable-diffusion-v1-5-04-07-2023/README.md +0 -13
  43. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/cityscapes.py +0 -333
  44. spaces/CVPR/LIVE/thrust/thrust/count.h +0 -235
  45. spaces/CVPR/LIVE/thrust/thrust/random/uniform_real_distribution.h +0 -274
  46. spaces/CVPR/drawings-to-human/static/_app/immutable/pages/index.svelte-b5d75a5f.js +0 -19
  47. spaces/CVPR/regionclip-demo/detectron2/layers/batch_norm.py +0 -243
  48. spaces/CamCam17/Alexwww-davide-comic-book-characters/Dockerfile +0 -17
  49. spaces/CikeyQI/meme-api/meme_generator/memes/loading/__init__.py +0 -36
  50. spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/theb.py +0 -48
spaces/101-5/gpt4free/g4f/.v1/gpt4free/theb/README.md DELETED
@@ -1,14 +0,0 @@
1
- ### Example: `theb` (use like openai pypi package) <a name="example-theb"></a>
2
-
3
- ```python
4
- # import library
5
- from gpt4free import theb
6
-
7
- # simple streaming completion
8
-
9
- while True:
10
- x = input()
11
- for token in theb.Completion.create(x):
12
- print(token, end='', flush=True)
13
- print("")
14
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/tests/test_dataset.py DELETED
@@ -1,151 +0,0 @@
1
- import pytest
2
- import yaml
3
-
4
- from realesrgan.data.realesrgan_dataset import RealESRGANDataset
5
- from realesrgan.data.realesrgan_paired_dataset import RealESRGANPairedDataset
6
-
7
-
8
- def test_realesrgan_dataset():
9
-
10
- with open('tests/data/test_realesrgan_dataset.yml', mode='r') as f:
11
- opt = yaml.load(f, Loader=yaml.FullLoader)
12
-
13
- dataset = RealESRGANDataset(opt)
14
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
15
- assert len(dataset) == 2 # whether to read correct meta info
16
- assert dataset.kernel_list == [
17
- 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
18
- ] # correct initialization the degradation configurations
19
- assert dataset.betag_range2 == [0.5, 4]
20
-
21
- # test __getitem__
22
- result = dataset.__getitem__(0)
23
- # check returned keys
24
- expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
25
- assert set(expected_keys).issubset(set(result.keys()))
26
- # check shape and contents
27
- assert result['gt'].shape == (3, 400, 400)
28
- assert result['kernel1'].shape == (21, 21)
29
- assert result['kernel2'].shape == (21, 21)
30
- assert result['sinc_kernel'].shape == (21, 21)
31
- assert result['gt_path'] == 'tests/data/gt/baboon.png'
32
-
33
- # ------------------ test lmdb backend -------------------- #
34
- opt['dataroot_gt'] = 'tests/data/gt.lmdb'
35
- opt['io_backend']['type'] = 'lmdb'
36
-
37
- dataset = RealESRGANDataset(opt)
38
- assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
39
- assert len(dataset.paths) == 2 # whether to read correct meta info
40
- assert dataset.kernel_list == [
41
- 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
42
- ] # correct initialization the degradation configurations
43
- assert dataset.betag_range2 == [0.5, 4]
44
-
45
- # test __getitem__
46
- result = dataset.__getitem__(1)
47
- # check returned keys
48
- expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
49
- assert set(expected_keys).issubset(set(result.keys()))
50
- # check shape and contents
51
- assert result['gt'].shape == (3, 400, 400)
52
- assert result['kernel1'].shape == (21, 21)
53
- assert result['kernel2'].shape == (21, 21)
54
- assert result['sinc_kernel'].shape == (21, 21)
55
- assert result['gt_path'] == 'comic'
56
-
57
- # ------------------ test with sinc_prob = 0 -------------------- #
58
- opt['dataroot_gt'] = 'tests/data/gt.lmdb'
59
- opt['io_backend']['type'] = 'lmdb'
60
- opt['sinc_prob'] = 0
61
- opt['sinc_prob2'] = 0
62
- opt['final_sinc_prob'] = 0
63
- dataset = RealESRGANDataset(opt)
64
- result = dataset.__getitem__(0)
65
- # check returned keys
66
- expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
67
- assert set(expected_keys).issubset(set(result.keys()))
68
- # check shape and contents
69
- assert result['gt'].shape == (3, 400, 400)
70
- assert result['kernel1'].shape == (21, 21)
71
- assert result['kernel2'].shape == (21, 21)
72
- assert result['sinc_kernel'].shape == (21, 21)
73
- assert result['gt_path'] == 'baboon'
74
-
75
- # ------------------ lmdb backend should have paths ends with lmdb -------------------- #
76
- with pytest.raises(ValueError):
77
- opt['dataroot_gt'] = 'tests/data/gt'
78
- opt['io_backend']['type'] = 'lmdb'
79
- dataset = RealESRGANDataset(opt)
80
-
81
-
82
- def test_realesrgan_paired_dataset():
83
-
84
- with open('tests/data/test_realesrgan_paired_dataset.yml', mode='r') as f:
85
- opt = yaml.load(f, Loader=yaml.FullLoader)
86
-
87
- dataset = RealESRGANPairedDataset(opt)
88
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
89
- assert len(dataset) == 2 # whether to read correct meta info
90
-
91
- # test __getitem__
92
- result = dataset.__getitem__(0)
93
- # check returned keys
94
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
95
- assert set(expected_keys).issubset(set(result.keys()))
96
- # check shape and contents
97
- assert result['gt'].shape == (3, 128, 128)
98
- assert result['lq'].shape == (3, 32, 32)
99
- assert result['gt_path'] == 'tests/data/gt/baboon.png'
100
- assert result['lq_path'] == 'tests/data/lq/baboon.png'
101
-
102
- # ------------------ test lmdb backend -------------------- #
103
- opt['dataroot_gt'] = 'tests/data/gt.lmdb'
104
- opt['dataroot_lq'] = 'tests/data/lq.lmdb'
105
- opt['io_backend']['type'] = 'lmdb'
106
-
107
- dataset = RealESRGANPairedDataset(opt)
108
- assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
109
- assert len(dataset) == 2 # whether to read correct meta info
110
-
111
- # test __getitem__
112
- result = dataset.__getitem__(1)
113
- # check returned keys
114
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
115
- assert set(expected_keys).issubset(set(result.keys()))
116
- # check shape and contents
117
- assert result['gt'].shape == (3, 128, 128)
118
- assert result['lq'].shape == (3, 32, 32)
119
- assert result['gt_path'] == 'comic'
120
- assert result['lq_path'] == 'comic'
121
-
122
- # ------------------ test paired_paths_from_folder -------------------- #
123
- opt['dataroot_gt'] = 'tests/data/gt'
124
- opt['dataroot_lq'] = 'tests/data/lq'
125
- opt['io_backend'] = dict(type='disk')
126
- opt['meta_info'] = None
127
-
128
- dataset = RealESRGANPairedDataset(opt)
129
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
130
- assert len(dataset) == 2 # whether to read correct meta info
131
-
132
- # test __getitem__
133
- result = dataset.__getitem__(0)
134
- # check returned keys
135
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
136
- assert set(expected_keys).issubset(set(result.keys()))
137
- # check shape and contents
138
- assert result['gt'].shape == (3, 128, 128)
139
- assert result['lq'].shape == (3, 32, 32)
140
-
141
- # ------------------ test normalization -------------------- #
142
- dataset.mean = [0.5, 0.5, 0.5]
143
- dataset.std = [0.5, 0.5, 0.5]
144
- # test __getitem__
145
- result = dataset.__getitem__(0)
146
- # check returned keys
147
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
148
- assert set(expected_keys).issubset(set(result.keys()))
149
- # check shape and contents
150
- assert result['gt'].shape == (3, 128, 128)
151
- assert result['lq'].shape == (3, 32, 32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/vits-models/modules.py DELETED
@@ -1,388 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
8
- from torch.nn.utils import weight_norm, remove_weight_norm
9
-
10
- import commons
11
- from commons import init_weights, get_padding
12
- from transforms import piecewise_rational_quadratic_transform
13
-
14
-
15
- LRELU_SLOPE = 0.1
16
-
17
-
18
- class LayerNorm(nn.Module):
19
- def __init__(self, channels, eps=1e-5):
20
- super().__init__()
21
- self.channels = channels
22
- self.eps = eps
23
-
24
- self.gamma = nn.Parameter(torch.ones(channels))
25
- self.beta = nn.Parameter(torch.zeros(channels))
26
-
27
- def forward(self, x):
28
- x = x.transpose(1, -1)
29
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
30
- return x.transpose(1, -1)
31
-
32
-
33
- class ConvReluNorm(nn.Module):
34
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
35
- super().__init__()
36
- self.in_channels = in_channels
37
- self.hidden_channels = hidden_channels
38
- self.out_channels = out_channels
39
- self.kernel_size = kernel_size
40
- self.n_layers = n_layers
41
- self.p_dropout = p_dropout
42
- assert n_layers > 1, "Number of layers should be larger than 0."
43
-
44
- self.conv_layers = nn.ModuleList()
45
- self.norm_layers = nn.ModuleList()
46
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
47
- self.norm_layers.append(LayerNorm(hidden_channels))
48
- self.relu_drop = nn.Sequential(
49
- nn.ReLU(),
50
- nn.Dropout(p_dropout))
51
- for _ in range(n_layers-1):
52
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
53
- self.norm_layers.append(LayerNorm(hidden_channels))
54
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
55
- self.proj.weight.data.zero_()
56
- self.proj.bias.data.zero_()
57
-
58
- def forward(self, x, x_mask):
59
- x_org = x
60
- for i in range(self.n_layers):
61
- x = self.conv_layers[i](x * x_mask)
62
- x = self.norm_layers[i](x)
63
- x = self.relu_drop(x)
64
- x = x_org + self.proj(x)
65
- return x * x_mask
66
-
67
-
68
- class DDSConv(nn.Module):
69
- """
70
- Dialted and Depth-Separable Convolution
71
- """
72
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
73
- super().__init__()
74
- self.channels = channels
75
- self.kernel_size = kernel_size
76
- self.n_layers = n_layers
77
- self.p_dropout = p_dropout
78
-
79
- self.drop = nn.Dropout(p_dropout)
80
- self.convs_sep = nn.ModuleList()
81
- self.convs_1x1 = nn.ModuleList()
82
- self.norms_1 = nn.ModuleList()
83
- self.norms_2 = nn.ModuleList()
84
- for i in range(n_layers):
85
- dilation = kernel_size ** i
86
- padding = (kernel_size * dilation - dilation) // 2
87
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
88
- groups=channels, dilation=dilation, padding=padding
89
- ))
90
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
91
- self.norms_1.append(LayerNorm(channels))
92
- self.norms_2.append(LayerNorm(channels))
93
-
94
- def forward(self, x, x_mask, g=None):
95
- if g is not None:
96
- x = x + g
97
- for i in range(self.n_layers):
98
- y = self.convs_sep[i](x * x_mask)
99
- y = self.norms_1[i](y)
100
- y = F.gelu(y)
101
- y = self.convs_1x1[i](y)
102
- y = self.norms_2[i](y)
103
- y = F.gelu(y)
104
- y = self.drop(y)
105
- x = x + y
106
- return x * x_mask
107
-
108
-
109
- class WN(torch.nn.Module):
110
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
111
- super(WN, self).__init__()
112
- assert(kernel_size % 2 == 1)
113
- self.hidden_channels =hidden_channels
114
- self.kernel_size = kernel_size,
115
- self.dilation_rate = dilation_rate
116
- self.n_layers = n_layers
117
- self.gin_channels = gin_channels
118
- self.p_dropout = p_dropout
119
-
120
- self.in_layers = torch.nn.ModuleList()
121
- self.res_skip_layers = torch.nn.ModuleList()
122
- self.drop = nn.Dropout(p_dropout)
123
-
124
- if gin_channels != 0:
125
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
126
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
127
-
128
- for i in range(n_layers):
129
- dilation = dilation_rate ** i
130
- padding = int((kernel_size * dilation - dilation) / 2)
131
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
132
- dilation=dilation, padding=padding)
133
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
134
- self.in_layers.append(in_layer)
135
-
136
- # last one is not necessary
137
- if i < n_layers - 1:
138
- res_skip_channels = 2 * hidden_channels
139
- else:
140
- res_skip_channels = hidden_channels
141
-
142
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
143
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
144
- self.res_skip_layers.append(res_skip_layer)
145
-
146
- def forward(self, x, x_mask, g=None, **kwargs):
147
- output = torch.zeros_like(x)
148
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
149
-
150
- if g is not None:
151
- g = self.cond_layer(g)
152
-
153
- for i in range(self.n_layers):
154
- x_in = self.in_layers[i](x)
155
- if g is not None:
156
- cond_offset = i * 2 * self.hidden_channels
157
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
158
- else:
159
- g_l = torch.zeros_like(x_in)
160
-
161
- acts = commons.fused_add_tanh_sigmoid_multiply(
162
- x_in,
163
- g_l,
164
- n_channels_tensor)
165
- acts = self.drop(acts)
166
-
167
- res_skip_acts = self.res_skip_layers[i](acts)
168
- if i < self.n_layers - 1:
169
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
170
- x = (x + res_acts) * x_mask
171
- output = output + res_skip_acts[:,self.hidden_channels:,:]
172
- else:
173
- output = output + res_skip_acts
174
- return output * x_mask
175
-
176
- def remove_weight_norm(self):
177
- if self.gin_channels != 0:
178
- torch.nn.utils.remove_weight_norm(self.cond_layer)
179
- for l in self.in_layers:
180
- torch.nn.utils.remove_weight_norm(l)
181
- for l in self.res_skip_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
-
184
-
185
- class ResBlock1(torch.nn.Module):
186
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
187
- super(ResBlock1, self).__init__()
188
- self.convs1 = nn.ModuleList([
189
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
190
- padding=get_padding(kernel_size, dilation[0]))),
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
192
- padding=get_padding(kernel_size, dilation[1]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
194
- padding=get_padding(kernel_size, dilation[2])))
195
- ])
196
- self.convs1.apply(init_weights)
197
-
198
- self.convs2 = nn.ModuleList([
199
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
200
- padding=get_padding(kernel_size, 1))),
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1)))
205
- ])
206
- self.convs2.apply(init_weights)
207
-
208
- def forward(self, x, x_mask=None):
209
- for c1, c2 in zip(self.convs1, self.convs2):
210
- xt = F.leaky_relu(x, LRELU_SLOPE)
211
- if x_mask is not None:
212
- xt = xt * x_mask
213
- xt = c1(xt)
214
- xt = F.leaky_relu(xt, LRELU_SLOPE)
215
- if x_mask is not None:
216
- xt = xt * x_mask
217
- xt = c2(xt)
218
- x = xt + x
219
- if x_mask is not None:
220
- x = x * x_mask
221
- return x
222
-
223
- def remove_weight_norm(self):
224
- for l in self.convs1:
225
- remove_weight_norm(l)
226
- for l in self.convs2:
227
- remove_weight_norm(l)
228
-
229
-
230
- class ResBlock2(torch.nn.Module):
231
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
232
- super(ResBlock2, self).__init__()
233
- self.convs = nn.ModuleList([
234
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
235
- padding=get_padding(kernel_size, dilation[0]))),
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
237
- padding=get_padding(kernel_size, dilation[1])))
238
- ])
239
- self.convs.apply(init_weights)
240
-
241
- def forward(self, x, x_mask=None):
242
- for c in self.convs:
243
- xt = F.leaky_relu(x, LRELU_SLOPE)
244
- if x_mask is not None:
245
- xt = xt * x_mask
246
- xt = c(xt)
247
- x = xt + x
248
- if x_mask is not None:
249
- x = x * x_mask
250
- return x
251
-
252
- def remove_weight_norm(self):
253
- for l in self.convs:
254
- remove_weight_norm(l)
255
-
256
-
257
- class Log(nn.Module):
258
- def forward(self, x, x_mask, reverse=False, **kwargs):
259
- if not reverse:
260
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
261
- logdet = torch.sum(-y, [1, 2])
262
- return y, logdet
263
- else:
264
- x = torch.exp(x) * x_mask
265
- return x
266
-
267
-
268
- class Flip(nn.Module):
269
- def forward(self, x, *args, reverse=False, **kwargs):
270
- x = torch.flip(x, [1])
271
- if not reverse:
272
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
273
- return x, logdet
274
- else:
275
- return x
276
-
277
-
278
- class ElementwiseAffine(nn.Module):
279
- def __init__(self, channels):
280
- super().__init__()
281
- self.channels = channels
282
- self.m = nn.Parameter(torch.zeros(channels,1))
283
- self.logs = nn.Parameter(torch.zeros(channels,1))
284
-
285
- def forward(self, x, x_mask, reverse=False, **kwargs):
286
- if not reverse:
287
- y = self.m + torch.exp(self.logs) * x
288
- y = y * x_mask
289
- logdet = torch.sum(self.logs * x_mask, [1,2])
290
- return y, logdet
291
- else:
292
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
293
- return x
294
-
295
-
296
- class ResidualCouplingLayer(nn.Module):
297
- def __init__(self,
298
- channels,
299
- hidden_channels,
300
- kernel_size,
301
- dilation_rate,
302
- n_layers,
303
- p_dropout=0,
304
- gin_channels=0,
305
- mean_only=False):
306
- assert channels % 2 == 0, "channels should be divisible by 2"
307
- super().__init__()
308
- self.channels = channels
309
- self.hidden_channels = hidden_channels
310
- self.kernel_size = kernel_size
311
- self.dilation_rate = dilation_rate
312
- self.n_layers = n_layers
313
- self.half_channels = channels // 2
314
- self.mean_only = mean_only
315
-
316
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
317
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
318
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
319
- self.post.weight.data.zero_()
320
- self.post.bias.data.zero_()
321
-
322
- def forward(self, x, x_mask, g=None, reverse=False):
323
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
324
- h = self.pre(x0) * x_mask
325
- h = self.enc(h, x_mask, g=g)
326
- stats = self.post(h) * x_mask
327
- if not self.mean_only:
328
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
329
- else:
330
- m = stats
331
- logs = torch.zeros_like(m)
332
-
333
- if not reverse:
334
- x1 = m + x1 * torch.exp(logs) * x_mask
335
- x = torch.cat([x0, x1], 1)
336
- logdet = torch.sum(logs, [1,2])
337
- return x, logdet
338
- else:
339
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
340
- x = torch.cat([x0, x1], 1)
341
- return x
342
-
343
-
344
- class ConvFlow(nn.Module):
345
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
346
- super().__init__()
347
- self.in_channels = in_channels
348
- self.filter_channels = filter_channels
349
- self.kernel_size = kernel_size
350
- self.n_layers = n_layers
351
- self.num_bins = num_bins
352
- self.tail_bound = tail_bound
353
- self.half_channels = in_channels // 2
354
-
355
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
356
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
357
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
358
- self.proj.weight.data.zero_()
359
- self.proj.bias.data.zero_()
360
-
361
- def forward(self, x, x_mask, g=None, reverse=False):
362
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
363
- h = self.pre(x0)
364
- h = self.convs(h, x_mask, g=g)
365
- h = self.proj(h) * x_mask
366
-
367
- b, c, t = x0.shape
368
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
369
-
370
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
371
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
372
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
373
-
374
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
375
- unnormalized_widths,
376
- unnormalized_heights,
377
- unnormalized_derivatives,
378
- inverse=reverse,
379
- tails='linear',
380
- tail_bound=self.tail_bound
381
- )
382
-
383
- x = torch.cat([x0, x1], 1) * x_mask
384
- logdet = torch.sum(logabsdet * x_mask, [1,2])
385
- if not reverse:
386
- return x, logdet
387
- else:
388
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/BMW Coding E-SYS V.3.24.3 Plken And PIN Utorrent LINK.md DELETED
@@ -1,7 +0,0 @@
1
- <br />
2
- <p>We propose nonadaptive protocols that can convert any classical message into a secure quantum bit string when used with a quantum channel. This task, which we call superdense coding, goes beyond the standard dense coding by demonstrating the possibility of using one qubit per transmitted photon to secure a long bit string (for example, a secret key for quantum key distribution). The task of superdense coding can also be achieved by using entanglement to secure a bit string.</p>
3
- <h2>BMW Coding E-SYS V.3.24.3 Plken And PIN Utorrent</h2><br /><p><b><b>DOWNLOAD</b> &#9745; <a href="https://imgfil.com/2uy28G">https://imgfil.com/2uy28G</a></b></p><br /><br />
4
- <p>We have recently proposed a non-adaptive protocol for secure quantum key distribution, which outperforms existing non-adaptive protocols. A significant drawback of non-adaptive protocols is that they can readily be broken by the eavesdropper by performing an optimal attack, which is in the same spirit as existing adaptive protocols. Here, we show that non-adaptive protocols may also be useful for other applications. In particular, we propose a non-adaptive protocol for dense coding with a single sender and two receivers. This protocol is based on a quantum error correcting code and is optimal in the sense that the achievable rate is the maximum one for a given squeezing parameter. The encoding uses two perfect single-photon entanglement sources, which are required in the protocol and which can be realized in a linear optical setup. As a byproduct of the non-adaptive approach, the protocol also allows one to completely control the quantum states at the input of the channel. This opens up new possibilities in the study of decoherence in quantum information systems.less</p>
5
- <p>We report the first demonstration of quantum Darwinism: the redundant encoding of information about a decohering system in its environment. For a system to be both decoherent and open, it must acquire memories that become increasingly redundant as the system is shrunk, then protect the redundancy for an extended period. We demonstrate the first effective implementation of the decoherence–protection cycle for quantum Darwinism. As a prototype, we have encoded information about the quantum state of a superconducting flux qubit into its resistance, a form of a memory highly resistant to radiation noise--a decoherence mechanism of a type that can be engineered in a variety of systems. This quantum memory, when transferred to a second qubit, protects the initial code from decoherence up to an appreciable length scale, due to quantum Darwinism.</p> 899543212b<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/CODE_OF_CONDUCT.md DELETED
@@ -1,40 +0,0 @@
1
- # Code of Conduct for auto-gpt
2
-
3
- ## 1. Purpose
4
-
5
- The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
6
-
7
- ## 2. Scope
8
-
9
- This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
10
-
11
- ## 3. Our Standards
12
-
13
- We encourage the following behavior:
14
-
15
- * Being respectful and considerate to others
16
- * Actively seeking diverse perspectives
17
- * Providing constructive feedback and assistance
18
- * Demonstrating empathy and understanding
19
-
20
- We discourage the following behavior:
21
-
22
- * Harassment or discrimination of any kind
23
- * Disrespectful, offensive, or inappropriate language or content
24
- * Personal attacks or insults
25
- * Unwarranted criticism or negativity
26
-
27
- ## 4. Reporting and Enforcement
28
-
29
- If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
30
-
31
- Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
32
-
33
- ## 5. Acknowledgements
34
-
35
- This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
36
-
37
- ## 6. Contact
38
-
39
- If you have any questions or concerns, please contact the project maintainers.
40
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/main.py DELETED
@@ -1 +0,0 @@
1
- from autogpt import main
 
 
spaces/1phancelerku/anime-remove-background/Download APK for GTA Vice City and Experience the 80s on Your Phone.md DELETED
@@ -1,211 +0,0 @@
1
-
2
- <h1>Download APK for GTA Vice City: How to Play One of the Best Games Ever on Your Android Device</h1>
3
- <p>If you are a fan of open-world action-adventure games, you probably have heard of GTA Vice City. This game is one of the most iconic and influential titles in the history of gaming, and it is still loved by millions of players around the world. But did you know that you can play GTA Vice City on your Android device? In this article, we will show you how to download APK for GTA Vice City and enjoy this classic game on your smartphone or tablet.</p>
4
- <h2>download apk for gta vice city</h2><br /><p><b><b>Download Zip</b> &#10037;&#10037;&#10037; <a href="https://jinyurl.com/2uNM2r">https://jinyurl.com/2uNM2r</a></b></p><br /><br />
5
- <h2>What is GTA Vice City?</h2>
6
- <h3>A brief introduction to the game and its features</h3>
7
- <p>GTA Vice City is a game developed by Rockstar Games and released in 2002 for PlayStation 2, Xbox, and PC. It is the sixth installment in the Grand Theft Auto series, and it is set in a fictional city based on Miami in the 1980s. The game follows the story of Tommy Vercetti, a former mobster who is sent to Vice City by his boss to establish a criminal empire. The game allows you to explore a vast and colorful open world, where you can drive various vehicles, use different weapons, interact with various characters, complete missions, and cause mayhem.</p>
8
- <h3>Why is GTA Vice City so popular and beloved?</h3>
9
- <p>GTA Vice City is widely regarded as one of the best games ever made, and it has received critical acclaim and commercial success. Some of the reasons why GTA Vice City is so popular and beloved are:</p>
10
- <ul>
11
- <li>It has a captivating and immersive storyline, with memorable characters, dialogues, and voice acting.</li>
12
- <li>It has a rich and vibrant atmosphere, with a stunning graphics, sound, and music that capture the essence of the 1980s culture.</li>
13
- <li>It has a fun and addictive gameplay, with a lot of variety, freedom, and replay value.</li>
14
- <li>It has a huge fan base, with a lot of mods, cheats, guides, and fan-made content available online.</li>
15
- </ul>
16
- <h2>How to download APK for GTA Vice City?</h2>
17
- <h3>The official way: buy the game from Google Play Store</h3>
18
- <p>The easiest and safest way to download APK for GTA Vice City is to buy the game from Google Play Store. The game was officially released for Android devices in 2012, and it costs $4.99. The game is compatible with most Android devices running Android 7.0 or higher, and it requires about 1.5 GB of free space. To buy the game from Google Play Store, you need to:</p>
19
- <p>How to download apk for gta vice city on pc<br />
20
- Download apk for gta vice city android free<br />
21
- Download apk for gta vice city mod unlimited money<br />
22
- Download apk for gta vice city stories<br />
23
- Download apk for gta vice city 10th anniversary edition<br />
24
- Download apk for gta vice city cheats<br />
25
- Download apk for gta vice city lite<br />
26
- Download apk for gta vice city highly compressed<br />
27
- Download apk for gta vice city obb file<br />
28
- Download apk for gta vice city offline<br />
29
- Download apk for gta vice city latest version<br />
30
- Download apk for gta vice city windows 10<br />
31
- Download apk for gta vice city full game<br />
32
- Download apk for gta vice city deluxe<br />
33
- Download apk for gta vice city 2023<br />
34
- Download apk for gta vice city with sound<br />
35
- Download apk for gta vice city original<br />
36
- Download apk for gta vice city hd graphics<br />
37
- Download apk for gta vice city data file<br />
38
- Download apk for gta vice city no verification<br />
39
- Download apk for gta vice city in hindi<br />
40
- Download apk for gta vice city rockstar games<br />
41
- Download apk for gta vice city ultimate trainer<br />
42
- Download apk for gta vice city cleo mod<br />
43
- Download apk for gta vice city 200mb<br />
44
- Download apk for gta vice city real life mod<br />
45
- Download apk for gta vice city zombie mod<br />
46
- Download apk for gta vice city online multiplayer<br />
47
- Download apk for gta vice city malayalam version<br />
48
- Download apk for gta vice city radio stations<br />
49
- Download apk for gta vice city remastered<br />
50
- Download apk for gta vice city 4k resolution mod<br />
51
- Download apk for gta vice city all missions unlocked<br />
52
- Download apk for gta vice city bike mod<br />
53
- Download apk for gta vice city best graphics mod<br />
54
- Download apk for gta vice city car mod pack<br />
55
- Download apk for gta vice city direct download link<br />
56
- Download apk for gta vice city english version<br />
57
- Download apk for gta vice city fast and furious mod<br />
58
- Download apk for gta vice city gamepad support mod<br />
59
- Download apk for gta vice city helicopter mod<br />
60
- Download apk for gta vice city iron man mod<br />
61
- Download apk for gta vice city jetpack cheat code mod<br />
62
- Download apk for gta vice city keyboard and mouse support mod <br />
63
- Download apk for gta vice city low mb download size <br />
64
- Download apk for gta vice city new cars and bikes mod <br />
65
- Download apk for gta vice city no root required <br />
66
- Download apk for gta vice city psp emulator <br />
67
- Download apk for gta vice city spiderman mod</p>
68
- <ol>
69
- <li>Open Google Play Store on your device and search for "GTA Vice City".</li>
70
- <li>Select the game from the results and tap on "Buy".</li>
71
- <li>Enter your payment details and confirm your purchase.</li>
72
- <li>Wait for the game to download and install on your device.</li>
73
- <li>Launch the game from your app drawer or home screen.</li>
74
- </ol>
75
- <h3>The unofficial way: download the APK file from a third-party source</h3>
76
- <p>If you don't want to pay for the game or if your device is not compatible with Google Play Store, you can also download APK for GTA Vice City from a third-party source. However, this method is not recommended, as it may expose your device to malware <p>The unofficial way: download the APK file from a third-party source</p>
77
- <p>If you don't want to pay for the game or if your device is not compatible with Google Play Store, you can also download APK for GTA Vice City from a third-party source. However, this method is not recommended, as it may expose your device to malware, viruses, or legal issues. If you still want to try this method, you need to follow these steps:</p>
78
- <ol>
79
- <li>Find a reliable and trustworthy website that offers the APK file for GTA Vice City. You can search online or use some of the links provided below . Make sure to read the reviews and ratings of the website before downloading anything.</li>
80
- <li>Download the APK file and the OBB data file from the website. The APK file is the application file that installs the game on your device, while the OBB data file contains the game data and assets. The size of these files may vary depending on the website, but they are usually around 1.5 GB in total.</li>
81
- <li>Enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps that are not from Google Play Store.</li>
82
- <li>Locate the downloaded APK file and OBB data file on your device using a file manager app. You can use any file manager app that you prefer, such as ES File Explorer or ZArchiver.</li>
83
- <li>Install the APK file by tapping on it and following the instructions on the screen. Do not launch the game yet.</li>
84
- <li>Extract the OBB data file using a file extractor app, such as ZArchiver. You will get a folder named "com.rockstargames.gtavc". Copy this folder and paste it in the Android/OBB directory on your device's internal storage.</li>
85
- <li>Launch the game from your app drawer or home screen and enjoy GTA Vice City on your Android device.</li>
86
- </ol>
87
- <h4>Pros and cons of using the unofficial way</h4>
88
- <p>Using the unofficial way to download APK for GTA Vice City has some pros and cons that you should be aware of before trying it. Here are some of them:</p>
89
- <table>
90
- <tr>
91
- <th>Pros</th>
92
- <th>Cons</th>
93
- </tr>
94
- <tr>
95
- <td>You can get the game for free without paying anything.</td>
96
- <td>You may violate the intellectual property rights of Rockstar Games and face legal consequences.</td>
97
- </tr>
98
- <tr>
99
- <td>You can play the game on devices that are not compatible with Google Play Store.</td>
100
- <td>You may encounter bugs, glitches, crashes, or performance issues while playing the game.</td>
101
- </tr>
102
- <tr>
103
- <td>You can access some mods, cheats, or hacks that are not available on the official version.</td>
104
- <td>You may risk infecting your device with malware, viruses, or spyware that can harm your data or privacy.</td>
105
- </tr>
106
- <tr>
107
- <td>You can update the game manually whenever a new version is available.</td>
108
- <td>You may not receive any official support or updates from Rockstar Games or Google Play Store.</td>
109
- </tr>
110
- </table>
111
- <h4>How to install the APK file on your device</h4>
112
- <p>If you have followed the steps above correctly, you should have installed the APK file and the OBB data file on your device successfully. However, if you face any problems or errors while installing or launching the game, you can try some of these solutions:</p>
113
- <ul>
114
- <li>Make sure that you have enough free space on your device's internal storage. You need at least 1.5 GB of free space to install and run GTA Vice City smoothly.</li>
115
- <li>Make sure that you have downloaded the correct APK file and OBB data file for your device's architecture and Android version. You can check your device's specifications using an app like CPU-Z or Droid Hardware Info.</li>
116
- <li>Make sure that you have copied and pasted the OBB data folder in the right location on your device's internal storage. The folder name should be "com.rockstargames.gtavc" and it should be inside the Android/OBB directory.</li>
117
- <li>Make sure that you have enabled the installation of apps from unknown sources on your device's settings. You can disable it after installing GTA Vice City if you want to increase your security.</li>
118
- <li>Make sure that you have granted all the necessary permissions to GTA Vice City when launching it for the first time. The game may require access to your storage, network, location, microphone, or other features to function properly.</li>
119
- <li>If none of these solutions work, you may need to uninstall GTA Vice City and download it again from a different website or source. You may also need to restart your device or clear its cache before <p>If none of these solutions work, you may need to uninstall GTA Vice City and download it again from a different website or source. You may also need to restart your device or clear its cache before installing the game.</p>
120
- <h2>How to play GTA Vice City on your Android device?</h2>
121
- <h3>The basic controls and gameplay</h3>
122
- <p>Once you have installed GTA Vice City on your Android device, you can start playing it and enjoy the open-world action-adventure experience. The game has a simple and intuitive control scheme that adapts to your device's screen size and orientation. You can customize the controls to your liking by going to the Settings > Controls menu. Here are some of the basic controls and gameplay features of GTA Vice City:</p>
123
- <ul>
124
- <li>To move your character, use the virtual joystick on the left side of the screen. To run, tap and hold the joystick. To jump, swipe up on the joystick.</li>
125
- <li>To look around, swipe on the right side of the screen. To aim, tap and hold on the right side of the screen. To shoot, tap on the fire button on the bottom right corner of the screen.</li>
126
- <li>To enter or exit a vehicle, tap on the vehicle icon on the bottom left corner of the screen. To drive a vehicle, use the same joystick as for walking. To accelerate, tap and hold the gas pedal on the bottom right corner of the screen. To brake or reverse, tap and hold the brake pedal on the bottom left corner of the screen. To steer, tilt your device left or right.</li>
127
- <li>To access the map, pause menu, or mission menu, tap on the icons on the top right corner of the screen. To view your stats, inventory, or options, swipe down from the top of the screen.</li>
128
- <li>To switch between different weapons, tap on the weapon icon on the top left corner of the screen. To reload your weapon, swipe down on the weapon icon.</li>
129
- <li>To interact with other characters or objects, tap on them when they are highlighted. To answer a phone call or start a mission, tap on the green phone icon or blue marker respectively.</li>
130
- </ul>
131
- <h3>The best tips and tricks to enjoy the game</h3>
132
- <p>GTA Vice City is a game that offers a lot of fun and excitement, but it can also be challenging and frustrating at times. To help you enjoy the game more and avoid some common pitfalls, here are some of the best tips and tricks that you should know:</p>
133
- <ul>
134
- <li>Save your game frequently. You can save your game at any safe house that you own or rent by entering it and tapping on the cassette icon. Saving your game will also restore your health and armor.</li>
135
- <li>Collect hidden packages, rampages, and unique jumps. These are special items or challenges that are scattered around Vice City. They will reward you with money, weapons, vehicles, or bonuses when you find them or complete them.</li>
136
- <li>Use taxis, buses, or helicopters to travel faster. These are convenient ways to get around Vice City without having to drive yourself. You can hail a taxi by tapping on it when it is nearby. You can board a bus by entering it when it stops at a bus stop. You can fly a helicopter by finding one at a helipad or airport.</li>
137
- <li>Buy properties and businesses. These are valuable assets that will generate income for you over time. You can buy properties and businesses by going to their locations and tapping on the for sale sign. Some properties and businesses will also unlock new missions or features for you.</li>
138
- <li>Pay attention to the radio stations and news reports. These are entertaining sources of information that will keep you updated on what is happening in Vice City. They will also give you hints, tips, or warnings about certain events or situations.</li>
139
- </ul>
140
- <h4>How to use cheats and mods</h4>
141
- <p>If you want to spice up your gameplay or make things easier for yourself, you can use cheats and mods in GTA Vice City. Cheats are codes that you can enter during gameplay to activate various effects, such as unlimited health, ammo, money, or weapons. Mods are modifications that you can install to change or enhance certain aspects of the game, such as graphics, sound, gameplay, or content.</p>
142
- <p>To use cheats in GTA Vice City, you need to have a keyboard app installed on your device. You can use any keyboard app that you prefer, such as Gboard or SwiftKey. To enter a cheat code, simply open the keyboard app during gameplay and type in the code. You will see a confirmation message if the cheat is activated successfully. You can find a list of cheat codes online . Be careful when using cheats, as they may cause glitches or prevent you from completing some missions or achievements.</p>
143
- <p>To use mods in GTA Vice City, To use mods in GTA Vice City, you need to have a file manager app and a file extractor app installed on your device. You can use any apps that you prefer, such as ES File Explorer and ZArchiver. To install a mod, you need to follow these steps: <ol>
144
- <li>Find a reliable and trustworthy website that offers mods for GTA Vice City. You can search online or use some of the links provided below . Make sure to read the reviews and ratings of the website and the mod before downloading anything.</li>
145
- <li>Download the mod file from the website. The mod file may be in ZIP, RAR, or APK format, depending on the type of mod. The size of the mod file may vary depending on the mod, but they are usually around 100 MB or less.</li>
146
- <li>Locate the downloaded mod file on your device using a file manager app. You can use any file manager app that you prefer, such as ES File Explorer or ZArchiver.</li>
147
- <li>Extract the mod file using a file extractor app, such as ZArchiver. You will get a folder or a file that contains the mod data and instructions.</li>
148
- <li>Follow the instructions provided by the mod creator to install the mod on your device. The instructions may vary depending on the type of mod, but they usually involve copying and pasting some files or folders into the GTA Vice City directory on your device's internal storage.</li>
149
- <li>Launch GTA Vice City from your app drawer or home screen and enjoy the modded game.</li>
150
- </ol>
151
- <p>Be careful when using mods, as they may cause conflicts or errors with the original game or other mods. You may also need to backup your game data before installing any mods, in case something goes wrong. You can find a guide on how to backup your game data online .</p>
152
- <h4>How to save your progress and avoid bugs</h4>
153
- <p>GTA Vice City is a game that can be very fun and rewarding, but it can also be very frustrating and annoying if you lose your progress or encounter bugs. To prevent this from happening, you should follow some of these tips:</p>
154
- <ul>
155
- <li>Save your game frequently. You can save your game at any safe house that you own or rent by entering it and tapping on the cassette icon. Saving your game will also restore your health and armor.</li>
156
- <li>Avoid using cheats or mods that may interfere with the game's functionality or stability. Some cheats or mods may cause glitches, crashes, or corruption of your game data.</li>
157
- <li>Clear your game cache regularly. This will help improve your game's performance and reduce lag or loading times. To clear your game cache, go to Settings > Apps > GTA Vice City > Storage > Clear Cache.</li>
158
- <li>Update your game and device regularly. This will help fix any bugs or issues that may affect your game's quality or compatibility. To update your game, go to Google Play Store and check for any updates available. To update your device, go to Settings > System > System Update and check for any updates available.</li>
159
- <li>Contact Rockstar Games or Google Play Store for any technical support or feedback. If you have any problems or questions regarding GTA Vice City, you can contact Rockstar Games through their website or Google Play Store through their help center . They may be able to help you resolve your issues or provide you with useful information.</li>
160
- </ul>
161
- <h2>Conclusion</h2>
162
- <h3>A summary of the main points and a call to action</h3>
163
- <p>GTA Vice City is one of the best games ever made, and it is still enjoyable and relevant today. You can play GTA Vice City on your Android device by downloading APK for GTA Vice City from Google Play Store or a third-party source. However, you should be aware of the pros and cons of each method, and follow some tips and tricks to install and play the game smoothly and safely. GTA Vice City is a game that offers a lot of fun and excitement, but it also requires some skill and patience. If you are ready to take on the challenge of becoming a criminal mastermind in Vice City, download APK for GTA Vice City today and start playing!</p>
164
- <p>If you liked this article, please share it with your friends and family who are also fans of GTA Vice City. You can also leave us a comment below and let us know what you think about GTA Vice City on Android devices. We would love to hear from you!</p>
165
- <h2>Frequently Asked Questions</h2>
166
- <h3>Q: How much does GTA Vice City cost on Google Play Store?</h3>
167
- <p>A: GTA Vice City costs $4.99 on Google Play Store.</p>
168
- <h3>Q: What are the minimum requirements to play GTA Vice City on Android devices?</h3>
169
- <p>A: The minimum requirements to play GTA Vice City on Android devices <p>A: The minimum requirements to play GTA Vice City on Android devices are:</p>
170
- <ul>
171
- <li>Android 7.0 or higher</li>
172
- <li>1.5 GB of free space</li>
173
- <li>1 GB of RAM</li>
174
- <li>Dual-core 1.2 GHz processor</li>
175
- <li>Adreno 220, Mali 400, PowerVR SGX540, or Tegra 3 GPU</li>
176
- </ul>
177
- <h3>Q: How can I restore my purchases or progress if I delete GTA Vice City or change my device?</h3>
178
- <p>A: If you have bought GTA Vice City from Google Play Store, you can restore your purchases or progress by logging in to the same Google account that you used to buy the game. You can also use Google Play Games to sync your game data across different devices. To do this, you need to:</p>
179
- <ol>
180
- <li>Open Google Play Games on your device and sign in with your Google account.</li>
181
- <li>Tap on the menu icon and select Settings.</li>
182
- <li>Enable the option "Automatically sign in to supported games".</li>
183
- <li>Launch GTA Vice City and tap on the Google Play Games icon on the top right corner of the screen.</li>
184
- <li>Select your Google account and follow the instructions on the screen.</li>
185
- </ol>
186
- <p>If you have downloaded GTA Vice City from a third-party source, you may not be able to restore your purchases or progress easily. You may need to backup your game data manually using a file manager app or a backup app before deleting or changing your device. You can find a guide on how to backup your game data online .</p>
187
- <h3>Q: How can I play GTA Vice City with a controller or a keyboard and mouse?</h3>
188
- <p>A: GTA Vice City supports various types of controllers and input devices, such as Bluetooth controllers, USB controllers, keyboards, and mice. To play GTA Vice City with a controller or a keyboard and mouse, you need to:</p>
189
- <ol>
190
- <li>Connect your controller or input device to your Android device using Bluetooth, USB, or an adapter.</li>
191
- <li>Launch GTA Vice City and go to Settings > Controls.</li>
192
- <li>Select the option "Controller" or "Keyboard & Mouse" depending on your device.</li>
193
- <li>Customize the buttons or keys according to your preference.</li>
194
- <li>Enjoy playing GTA Vice City with your controller or input device.</li>
195
- </ol>
196
- <h3>Q: How can I change the language or subtitles of GTA Vice City?</h3>
197
- <p>A: GTA Vice City supports various languages and subtitles, such as English, French, German, Italian, Spanish, Russian, Japanese, and more. To change the language or subtitles of GTA Vice City, you need to:</p>
198
- <ol>
199
- <li>Launch GTA Vice City and go to Settings > Display.</li>
200
- <li>Select the option "Language" or "Subtitles" depending on what you want to change.</li>
201
- <li>Select the language that you prefer from the list.</li>
202
- <li>Enjoy playing GTA Vice City in your chosen language or with subtitles.</li>
203
- </ol>
204
- <h3>Q: How can I contact Rockstar Games or Google Play Store for any technical support or feedback?</h3>
205
- <p>A: If you have any problems or questions regarding GTA Vice City, you can contact Rockstar Games or Google Play Store for any technical support or feedback. Here are some of the ways that you can contact them:</p>
206
- <ul>
207
- <li>To contact Rockstar Games, you can visit their website and fill out a support request form. You can also email them at [email protected] or call them at +1-866-922-8694.</li>
208
- <li>To contact Google Play Store, you can visit their help center and browse through their articles and FAQs. You can also email them at [email protected] or call them at +1-855-836-3987.</li>
209
- </ul></p> 401be4b1e0<br />
210
- <br />
211
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download My Talking Tom Mod Apk Versi Lama untuk Android dan iOS.md DELETED
@@ -1,104 +0,0 @@
1
- <br />
2
- <h1>Download My Talking Tom Mod Apk Versi Lama: A Fun and Free Game for Cat Lovers</h1>
3
- <p>Do you love cats? Do you want to have a virtual pet that you can take care of and play with? If yes, then you should try My Talking Tom, a popular simulation game where you can adopt and care for a cute cat named Tom. But wait, there's more! You can also download My Talking Tom Mod Apk Versi Lama, a modified version of the game that offers more benefits and features. In this article, we will tell you everything you need to know about My Talking Tom and its mod apk versi lama. Read on to find out more!</p>
4
- <h2>download my talking tom mod apk versi lama</h2><br /><p><b><b>Download File</b> &rArr; <a href="https://jinyurl.com/2uNRTt">https://jinyurl.com/2uNRTt</a></b></p><br /><br />
5
- <h2>What is My Talking Tom?</h2>
6
- <h3>A simulation game where you can adopt and care for a virtual cat named Tom</h3>
7
- <p>My Talking Tom is a simulation game developed by Outfit7 Limited, a company that specializes in creating games featuring talking animals. The game was released in 2013 for Android and iOS devices. The game allows you to adopt a kitten named Tom and take care of him as he grows up. You can feed him, bathe him, dress him, and play with him. You can also customize his appearance and his home according to your preferences. The game has over 500 million downloads on Google Play Store and has received positive reviews from users.</p>
8
- <h3>Features of My Talking Tom</h3>
9
- <h4>Feed, bathe, dress, and play with Tom</h4>
10
- <p>One of the main features of My Talking Tom is that you can interact with Tom in various ways. You can feed him different types of food, such as fruits, vegetables, pizza, cake, and more. You can also bathe him, brush his teeth, and put him to bed. You can also dress him up with different outfits, hats, glasses, and accessories. You can also play with him by tickling him, poking him, or petting him. You can also make him happy by giving him toys, such as balls, planes, cars, and more.</p>
11
- <h4>Customize Tom's appearance and home</h4>
12
- <p>Another feature of My Talking Tom is that you can customize Tom's appearance and home according to your preferences. You can change his fur color, eye color, shape of his ears, nose, mouth, and tail. You can also change his home's wallpaper, furniture, decorations, and more. You can create your own unique style for Tom and his home.</p>
13
- <h4>Interact with Tom and hear him repeat your words</h4>
14
- <p>A fun feature of My Talking Tom is that you can interact with Tom and hear him repeat your words in a funny voice. You can talk to him using your device's microphone or type your messages on the screen. You can also record videos of your conversations with Tom and share them with your friends on social media.</p>
15
- <h4>Explore different mini-games and earn coins</h4>
16
- <p>Another feature of My Talking Tom is that you can explore different mini-games and earn coins. You can play games such as Flappy Tom, Bubble Shooter, Planet Hop, and more. You can also watch videos and complete tasks to earn more coins. You can use the coins to buy food, clothes, toys, and other items for Tom.</p>
17
- <p>naruto senki ultimate ninja storm 2 mod apk free download<br />
18
- download naruto senki ultimate ninja storm 2 mod apk unlimited money<br />
19
- naruto senki ultimate ninja storm 2 mod apk android<br />
20
- download naruto senki ultimate ninja storm 2 mod apk offline<br />
21
- naruto senki ultimate ninja storm 2 mod apk latest version<br />
22
- download naruto senki ultimate ninja storm 2 mod apk full character<br />
23
- naruto senki ultimate ninja storm 2 mod apk obb<br />
24
- download naruto senki ultimate ninja storm 2 mod apk no root<br />
25
- naruto senki ultimate ninja storm 2 mod apk rexdl<br />
26
- download naruto senki ultimate ninja storm 2 mod apk revdl<br />
27
- naruto senki ultimate ninja storm 2 mod apk data<br />
28
- download naruto senki ultimate ninja storm 2 mod apk for pc<br />
29
- naruto senki ultimate ninja storm 2 mod apk mega<br />
30
- download naruto senki ultimate ninja storm 2 mod apk mediafire<br />
31
- naruto senki ultimate ninja storm 2 mod apk update<br />
32
- download naruto senki ultimate ninja storm 2 mod apk terbaru<br />
33
- naruto senki ultimate ninja storm 2 mod apk online<br />
34
- download naruto senki ultimate ninja storm 2 mod apk original<br />
35
- naruto senki ultimate ninja storm 2 mod apk hack<br />
36
- download naruto senki ultimate ninja storm 2 mod apk cheat<br />
37
- naruto senki ultimate ninja storm 2 mod apk gameplay<br />
38
- download naruto senki ultimate ninja storm 2 mod apk highly compressed<br />
39
- naruto senki ultimate ninja storm 2 mod apk features<br />
40
- download naruto senki ultimate ninja storm 2 mod apk unlocked all<br />
41
- naruto senki ultimate ninja storm 2 mod apk best settings<br />
42
- download naruto senki ultimate ninja storm 2 mod apk low mb<br />
43
- naruto senki ultimate ninja storm 2 mod apk graphics<br />
44
- download naruto senki ultimate ninja storm 2 mod apk new update<br />
45
- naruto senki ultimate ninja storm 2 mod apk english version<br />
46
- download naruto senki ultimate ninja storm 2 mod apk bahasa indonesia<br />
47
- naruto senki ultimate ninja storm 2 mod apk file size<br />
48
- download naruto senki ultimate ninja storm 2 mod apk google drive<br />
49
- naruto senki ultimate ninja storm 2 mod apk system requirements<br />
50
- download naruto senki ultimate ninja storm 2 mod apk android oreo<br />
51
- naruto senki ultimate ninja storm 2 mod apk installation guide<br />
52
- download naruto senki ultimate ninja storm 2 mod apk android pie<br />
53
- naruto senki ultimate ninja storm 2 mod apk screenshots<br />
54
- download naruto senki ultimate ninja storm 2 mod apk android q<br />
55
- naruto senki ultimate ninja storm 2 mod apk video review<br />
56
- download naruto senki ultimate ninja storm 2 mod apk android r<br />
57
- naruto senki ultimate ninja storm 2 mod apk how to play<br />
58
- download naruto senki ultimate ninja storm 2 mod apk android s<br />
59
- naruto senki ultimate ninja storm 2 mod apk tips and tricks<br />
60
- download naruto senki ultimate ninja storm 2 mod apk android t<br />
61
- naruto senki ultimate ninja storm 2 mod apk ratings and reviews<br />
62
- download naruto senki ultimate ninja storm 2 mod apk android u<br />
63
- naruto senki ultimate ninja storm 2 mod apk support and feedback<br />
64
- download naruto senki ultimate ninja storm 2 mod apk android v<br />
65
- naruto senki ultimate ninja storm 2 mod apk bugs and issues</p>
66
- <h2>What is My Talking Tom Mod Apk Versi Lama?</h2>
67
- <h3>A modified version of the game that offers more benefits</h3>
68
- <p>My Talking Tom Mod Apk Versi Lama is a modified version of the game that offers more benefits and features than the original game. It is also known as My Talking Tom Old Version Mod Apk or My Talking Tom Hack Apk. It is a free and safe download that does not require root access or any other permissions. It is compatible with most Android devices and does not affect the performance of your device.</p>
69
- <h3>Advantages of My Talking Tom Mod Apk Versi Lama</h3>
70
- <h4>No ads to interrupt your gameplay</h4>
71
- <p>One of the advantages of My Talking Tom Mod Apk Versi Lama is that it removes all the ads from the game. You can enjoy playing with Tom without any annoying interruptions or distractions. You can also save your data and battery life by not having to watch ads.</p>
72
- <h4>Free access to premium items and features</h4>
73
- <p>Another advantage of My Talking Tom Mod Apk Versi Lama is that it gives you free access to premium items and features that are normally locked or require real money to purchase. You can get unlimited food, clothes, toys, and other items for Tom without spending a dime. You can also unlock all the levels, mini-games, and achievements in the game without any hassle.</p>
74
- <h4>Unlimited money and diamonds to buy anything you want</h4>
75
- <p>Another advantage of My Talking Tom Mod Apk Versi Lama is that it gives you unlimited money and diamonds to buy anything you want in the game. You can buy any food, clothes, toys, and other items for Tom without worrying about running out of money or diamonds. You can also upgrade Tom's home and appearance to your liking without any limitations.</p>
76
- <h4>Smaller file size and no internet connection required</h4>
77
- <p>Another advantage of My Talking Tom Mod Apk Versi Lama is that it has a smaller file size and does not require an internet connection to play. You can download the game faster and save your storage space on your device. You can also play the game offline without any problems.</p>
78
- <h2>How to Download and Install My Talking Tom Mod Apk Versi Lama?</h2>
79
- <h3>A simple guide to get the game on your device</h3>
80
- <p>If you want to download and install My Talking Tom Mod Apk Versi Lama on your device, you need to follow some simple steps. The process is easy and does not take much time. However, you need to make sure that you download the mod apk file from a trusted source and not from any random website that may contain viruses or malware.</p>
81
- <h3>Steps to download and install My Talking Tom Mod Apk Versi Lama</h3>
82
- <h4>Enable unknown sources on your device settings</h4>
83
- <p>The first step to download and install My Talking Tom Mod Apk Versi Lama is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to your device settings > security > unknown sources > enable.</p>
84
- <h4>Download the mod apk file from a trusted source</h4>
85
- <p>The second step to download and install My Talking Tom Mod Apk Versi Lama is to download the mod apk file from a trusted source. You can find many websites that offer the mod apk file for free, but you need to be careful and choose a reliable one. One of the best sources to download the mod apk file is [this website]. This website provides a safe and fast download link for the mod apk file. You can also read more information about the mod apk file on this website.</p>
86
- <h4>Install the mod apk file on your device</h4>
87
- <p>The third step to download and install My Talking Tom Mod Apk Versi Lama is to install the mod apk file on your device. To do this, locate the downloaded mod apk file on your device storage and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on install and wait for a few seconds until the installation is complete.</p>
88
- <h4>Launch the game and enjoy</h4>
89
- <p>The final step to download and install My Talking Tom Mod Apk Versi Lama is to launch the game and enjoy. To do this, go to your app drawer and tap on the game icon. You will see the game's main screen with Tom and his home. You can start playing with Tom and enjoy all the benefits and features of the mod apk versi lama. You can also access the game's settings and options by tapping on the menu button on the top left corner of the screen.</p>
90
- <h2>Conclusion</h2>
91
- <p>My Talking Tom is a fun and free game for cat lovers. You can adopt and care for a virtual cat named Tom and interact with him in various ways. You can also download My Talking Tom Mod Apk Versi Lama, a modified version of the game that offers more benefits and features than the original game. You can get rid of ads, access premium items and features, get unlimited money and diamonds, and play the game offline. You can download and install My Talking Tom Mod Apk Versi Lama by following some simple steps. You can also visit [this website] to get more information about the mod apk file. Download My Talking Tom Mod Apk Versi Lama today and have fun with your furry friend!</p>
92
- <h2>FAQs</h2>
93
- <h4>Q: Is My Talking Tom Mod Apk Versi Lama safe to download and install?</h4>
94
- <p>A: Yes, My Talking Tom Mod Apk Versi Lama is safe to download and install. It does not contain any viruses or malware that can harm your device or data. However, you need to make sure that you download the mod apk file from a trusted source and not from any random website.</p>
95
- <h4>Q: Do I need to root my device to use My Talking Tom Mod Apk Versi Lama?</h4>
96
- <p>A: No, you do not need to root your device to use My Talking Tom Mod Apk Versi Lama. The mod apk file does not require any root access or any other permissions to work. You can use it on any Android device without any problems.</p>
97
- <h4>Q: Will I lose my progress if I uninstall My Talking Tom Mod Apk Versi Lama?</h4>
98
- <p>A: No, you will not lose your progress if you uninstall My Talking Tom Mod Apk Versi Lama. The mod apk file saves your progress on your device's storage and not on the cloud. You can reinstall the mod apk file anytime and resume your game from where you left off.</p>
99
- <h4>Q: Can I play My Talking Tom Mod Apk Versi Lama with my friends?</h4>
100
- <p>A: Yes, you can play My Talking Tom Mod Apk Versi Lama with your friends. The mod apk file supports multiplayer mode, where you can connect with your friends online and visit their homes, play mini-games, and chat with them.</p>
101
- <h4>Q: How can I update My Talking Tom Mod Apk Versi Lama?</h4>
102
- <p>A: You can update My Talking Tom Mod Apk Versi Lama by visiting [this website] and downloading the latest version of the mod apk file. You can also check for updates on the game's settings and options menu. You can install the updated mod apk file over the existing one without losing your progress.</p> 401be4b1e0<br />
103
- <br />
104
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/speaker_encoder/params_data.py DELETED
@@ -1,29 +0,0 @@
1
-
2
- ## Mel-filterbank
3
- mel_window_length = 25 # In milliseconds
4
- mel_window_step = 10 # In milliseconds
5
- mel_n_channels = 40
6
-
7
-
8
- ## Audio
9
- sampling_rate = 16000
10
- # Number of spectrogram frames in a partial utterance
11
- partials_n_frames = 160 # 1600 ms
12
- # Number of spectrogram frames at inference
13
- inference_n_frames = 80 # 800 ms
14
-
15
-
16
- ## Voice Activation Detection
17
- # Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
18
- # This sets the granularity of the VAD. Should not need to be changed.
19
- vad_window_length = 30 # In milliseconds
20
- # Number of frames to average together when performing the moving average smoothing.
21
- # The larger this value, the larger the VAD variations must be to not get smoothed out.
22
- vad_moving_average_width = 8
23
- # Maximum number of consecutive silent frames a segment can have.
24
- vad_max_silence_length = 6
25
-
26
-
27
- ## Audio volume normalization
28
- audio_norm_target_dBFS = -30
29
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_AWGN_denoising/main_test_SRMNet.py DELETED
@@ -1,97 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import numpy as np
5
- from collections import OrderedDict
6
- from skimage import img_as_ubyte
7
- import os
8
- import torch
9
- import requests
10
- from PIL import Image
11
- import torchvision.transforms.functional as TF
12
- import torch.nn.functional as F
13
- from natsort import natsorted
14
- from model.SRMNet import SRMNet
15
-
16
- def clean_folder(folder):
17
- for filename in os.listdir(folder):
18
- file_path = os.path.join(folder, filename)
19
- try:
20
- if os.path.isfile(file_path) or os.path.islink(file_path):
21
- os.unlink(file_path)
22
- elif os.path.isdir(file_path):
23
- shutil.rmtree(file_path)
24
- except Exception as e:
25
- print('Failed to delete %s. Reason: %s' % (file_path, e))
26
-
27
- def main():
28
- parser = argparse.ArgumentParser(description='Demo Image Denoising')
29
- parser.add_argument('--input_dir', default='test', type=str, help='Input images')
30
- parser.add_argument('--result_dir', default='result', type=str, help='Directory for results')
31
- parser.add_argument('--weights',
32
- default='experiments/pretrained_models/AWGN_denoising_SRMNet.pth', type=str,
33
- help='Path to weights')
34
-
35
- args = parser.parse_args()
36
-
37
- inp_dir = args.input_dir
38
- out_dir = args.result_dir
39
-
40
- os.makedirs(out_dir, exist_ok=True)
41
-
42
- files = natsorted(glob.glob(os.path.join(inp_dir, '*')))
43
-
44
- if len(files) == 0:
45
- raise Exception(f"No files found at {inp_dir}")
46
-
47
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
48
-
49
- # Load corresponding models architecture and weights
50
- model = SRMNet()
51
- model = model.to(device)
52
- model.eval()
53
- load_checkpoint(model, args.weights)
54
-
55
-
56
- mul = 16
57
- for file_ in files:
58
- img = Image.open(file_).convert('RGB')
59
- input_ = TF.to_tensor(img).unsqueeze(0).to(device)
60
-
61
- # Pad the input if not_multiple_of 8
62
- h, w = input_.shape[2], input_.shape[3]
63
- H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
64
- padh = H - h if h % mul != 0 else 0
65
- padw = W - w if w % mul != 0 else 0
66
- input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
67
- with torch.no_grad():
68
- restored = model(input_)
69
-
70
- restored = torch.clamp(restored, 0, 1)
71
- restored = restored[:, :, :h, :w]
72
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
73
- restored = img_as_ubyte(restored[0])
74
-
75
- f = os.path.splitext(os.path.split(file_)[-1])[0]
76
- save_img((os.path.join(out_dir, f + '.png')), restored)
77
- clean_folder(inp_dir)
78
-
79
- def save_img(filepath, img):
80
- cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
81
-
82
-
83
- def load_checkpoint(model, weights):
84
- checkpoint = torch.load(weights, map_location=torch.device('cpu'))
85
- try:
86
- model.load_state_dict(checkpoint["state_dict"])
87
- except:
88
- state_dict = checkpoint["state_dict"]
89
- new_state_dict = OrderedDict()
90
- for k, v in state_dict.items():
91
- name = k[7:] # remove `module.`
92
- new_state_dict[name] = v
93
- model.load_state_dict(new_state_dict)
94
-
95
-
96
- if __name__ == '__main__':
97
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/Graph.Visualization.Plotly.Sunbursts.Treemaps.WebGL/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 🧠Visualization Plotly Treemaps WebGL🩺
3
- emoji: 🩺Vis🧠
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/data/zip.py DELETED
@@ -1,76 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- """Utility for reading some info from inside a zip file.
7
- """
8
-
9
- import typing
10
- import zipfile
11
-
12
- from dataclasses import dataclass
13
- from functools import lru_cache
14
- from typing_extensions import Literal
15
-
16
-
17
- DEFAULT_SIZE = 32
18
- MODE = Literal['r', 'w', 'x', 'a']
19
-
20
-
21
- @dataclass(order=True)
22
- class PathInZip:
23
- """Hold a path of file within a zip file.
24
-
25
- Args:
26
- path (str): The convention is <path_to_zip>:<relative_path_inside_zip>.
27
- Let's assume there is a zip file /some/location/foo.zip
28
- and inside of it is a json file located at /data/file1.json,
29
- Then we expect path = "/some/location/foo.zip:/data/file1.json".
30
- """
31
-
32
- INFO_PATH_SEP = ':'
33
- zip_path: str
34
- file_path: str
35
-
36
- def __init__(self, path: str) -> None:
37
- split_path = path.split(self.INFO_PATH_SEP)
38
- assert len(split_path) == 2
39
- self.zip_path, self.file_path = split_path
40
-
41
- @classmethod
42
- def from_paths(cls, zip_path: str, file_path: str):
43
- return cls(zip_path + cls.INFO_PATH_SEP + file_path)
44
-
45
- def __str__(self) -> str:
46
- return self.zip_path + self.INFO_PATH_SEP + self.file_path
47
-
48
-
49
- def _open_zip(path: str, mode: MODE = 'r'):
50
- return zipfile.ZipFile(path, mode)
51
-
52
-
53
- _cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip)
54
-
55
-
56
- def set_zip_cache_size(max_size: int):
57
- """Sets the maximal LRU caching for zip file opening.
58
-
59
- Args:
60
- max_size (int): the maximal LRU cache.
61
- """
62
- global _cached_open_zip
63
- _cached_open_zip = lru_cache(max_size)(_open_zip)
64
-
65
-
66
- def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO:
67
- """Opens a file stored inside a zip and returns a file-like object.
68
-
69
- Args:
70
- path_in_zip (PathInZip): A PathInZip object representing the file to return a file-like object of.
71
- mode (str): The mode in which to open the file with.
72
- Returns:
73
- A file-like object for PathInZip.
74
- """
75
- zf = _cached_open_zip(path_in_zip.zip_path)
76
- return zf.open(path_in_zip.file_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/loss.py DELETED
@@ -1,307 +0,0 @@
1
- from multiprocessing.sharedctypes import Value
2
- import torch
3
- import torch.distributed.nn
4
- from torch import distributed as dist, nn as nn
5
- from torch.nn import functional as F
6
- import numpy as np
7
- from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
8
-
9
- try:
10
- import horovod.torch as hvd
11
- except ImportError:
12
- hvd = None
13
-
14
-
15
- def gather_features(
16
- audio_features,
17
- text_features,
18
- audio_features_mlp=None,
19
- text_features_mlp=None,
20
- local_loss=False,
21
- gather_with_grad=False,
22
- rank=0,
23
- world_size=1,
24
- use_horovod=False,
25
- mlp_loss=False
26
- ):
27
- if use_horovod:
28
- assert hvd is not None, 'Please install horovod'
29
- if gather_with_grad:
30
- all_audio_features = hvd.allgather(audio_features)
31
- all_text_features = hvd.allgather(text_features)
32
- if mlp_loss:
33
- all_audio_features_mlp = hvd.allgather(audio_features_mlp)
34
- all_text_features_mlp = hvd.allgather(text_features_mlp)
35
- else:
36
- with torch.no_grad():
37
- all_audio_features = hvd.allgather(audio_features)
38
- all_text_features = hvd.allgather(text_features)
39
- if mlp_loss:
40
- all_audio_features_mlp = hvd.allgather(audio_features_mlp)
41
- all_text_features_mlp = hvd.allgather(text_features_mlp)
42
- if not local_loss:
43
- # ensure grads for local rank when all_* features don't have a gradient
44
- gathered_audio_features = list(all_audio_features.chunk(world_size, dim=0))
45
- gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
46
- gathered_audio_features[rank] = audio_features
47
- gathered_text_features[rank] = text_features
48
- all_audio_features = torch.cat(gathered_audio_features, dim=0)
49
- all_text_features = torch.cat(gathered_text_features, dim=0)
50
- if mlp_loss:
51
- gathered_audio_features_mlp = list(all_audio_features_mlp.chunk(world_size, dim=0))
52
- gathered_text_features_mlp = list(all_text_features_mlp.chunk(world_size, dim=0))
53
- gathered_audio_features_mlp[rank] = audio_features_mlp
54
- gathered_text_features_mlp[rank] = text_features_mlp
55
- all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
56
- all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
57
- else:
58
- # We gather tensors from all gpus
59
- if gather_with_grad:
60
- all_audio_features = torch.cat(torch.distributed.nn.all_gather(audio_features), dim=0)
61
- all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
62
- if mlp_loss:
63
- all_audio_features_mlp = torch.cat(torch.distributed.nn.all_gather(audio_features_mlp), dim=0)
64
- all_text_features_mlp = torch.cat(torch.distributed.nn.all_gather(text_features_mlp), dim=0)
65
- else:
66
- gathered_audio_features = [torch.zeros_like(audio_features) for _ in range(world_size)]
67
- gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
68
- dist.all_gather(gathered_audio_features, audio_features)
69
- dist.all_gather(gathered_text_features, text_features)
70
- if mlp_loss:
71
- gathered_audio_features_mlp = [torch.zeros_like(audio_features_mlp) for _ in range(world_size)]
72
- gathered_text_features_mlp = [torch.zeros_like(text_features_mlp) for _ in range(world_size)]
73
- dist.all_gather(gathered_audio_features_mlp, audio_features_mlp)
74
- dist.all_gather(gathered_text_features_mlp, text_features_mlp)
75
- if not local_loss:
76
- # ensure grads for local rank when all_* features don't have a gradient
77
- gathered_audio_features[rank] = audio_features
78
- gathered_text_features[rank] = text_features
79
- if mlp_loss:
80
- gathered_audio_features_mlp[rank] = audio_features_mlp
81
- gathered_text_features_mlp[rank] = text_features_mlp
82
-
83
- all_audio_features = torch.cat(gathered_audio_features, dim=0)
84
- all_text_features = torch.cat(gathered_text_features, dim=0)
85
- if mlp_loss:
86
- all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
87
- all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
88
- if mlp_loss:
89
- return all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp
90
- else:
91
- return all_audio_features, all_text_features
92
-
93
- class ClipLoss(nn.Module):
94
-
95
- def __init__(
96
- self,
97
- local_loss=False,
98
- gather_with_grad=False,
99
- cache_labels=False,
100
- rank=0,
101
- world_size=1,
102
- use_horovod=False,
103
- mlp_loss=False,
104
- weight_loss_kappa=0,
105
- ):
106
- super().__init__()
107
- self.local_loss = local_loss
108
- self.gather_with_grad = gather_with_grad
109
- self.cache_labels = cache_labels
110
- self.rank = rank
111
- self.world_size = world_size
112
- self.use_horovod = use_horovod
113
- self.mlp_loss = mlp_loss
114
- self.weighted_loss = bool(weight_loss_kappa!=0)
115
- self.weight_loss_kappa = weight_loss_kappa
116
- # cache state
117
- self.prev_num_logits = 0
118
- self.labels = {}
119
-
120
- def forward(self, audio_features, text_features, logit_scale_a, logit_scale_t=None, audio_features_mlp=None, text_features_mlp=None):
121
- device = audio_features.device
122
- if self.mlp_loss:
123
- if self.world_size > 1:
124
- all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp = gather_features(
125
- audio_features=audio_features,text_features=text_features,
126
- audio_features_mlp=audio_features_mlp,text_features_mlp=text_features_mlp,
127
- local_loss=self.local_loss,gather_with_grad=self.gather_with_grad,
128
- rank=self.rank,world_size=self.world_size,use_horovod=self.use_horovod,
129
- mlp_loss=self.mlp_loss
130
- )
131
- if self.local_loss:
132
- a_logits_per_audio = logit_scale_a * audio_features @ all_text_features_mlp.T
133
- a_logits_per_text = logit_scale_a * text_features_mlp @ all_audio_features.T
134
- t_logits_per_audio = logit_scale_t * audio_features_mlp @ all_text_features.T
135
- t_logits_per_text = logit_scale_t * text_features @ all_audio_features_mlp.T
136
- else:
137
- a_logits_per_audio = logit_scale_a * all_audio_features @ all_text_features_mlp.T
138
- a_logits_per_text = a_logits_per_audio.T
139
- t_logits_per_audio = logit_scale_t * all_audio_features_mlp @ all_text_features.T
140
- t_logits_per_text = t_logits_per_audio.T
141
- else:
142
- a_logits_per_audio = logit_scale_a * audio_features @ text_features_mlp.T
143
- a_logits_per_text = logit_scale_a * text_features_mlp @ audio_features.T
144
- t_logits_per_audio = logit_scale_t * audio_features_mlp @ text_features.T
145
- t_logits_per_text = logit_scale_t * text_features @ audio_features_mlp.T
146
-
147
- # calculated ground-truth and cache if enabled
148
- num_logits = a_logits_per_audio.shape[0]
149
- if self.prev_num_logits != num_logits or device not in self.labels:
150
- labels = torch.arange(num_logits, device=device, dtype=torch.long)
151
- if self.world_size > 1 and self.local_loss:
152
- labels = labels + num_logits * self.rank
153
- if self.cache_labels:
154
- self.labels[device] = labels
155
- self.prev_num_logits = num_logits
156
- else:
157
- labels = self.labels[device]
158
-
159
- if not self.weighted_loss:
160
- total_loss = (
161
- F.cross_entropy(a_logits_per_audio, labels) +
162
- F.cross_entropy(a_logits_per_text, labels) +
163
- F.cross_entropy(t_logits_per_audio, labels) +
164
- F.cross_entropy(t_logits_per_text, labels)
165
- ) / 4
166
- else:
167
- audio_weight = (audio_features@audio_features.T).detach()
168
- audio_weight = (torch.exp(torch.sum(audio_weight, axis=1)/(self.weight_loss_kappa*len(audio_weight)))).detach()
169
- text_weight = (text_features@text_features.T).detach()
170
- text_weight = (torch.exp(torch.sum(text_weight, axis=1)/(self.weight_loss_kappa*len(text_features)))).detach()
171
- total_loss = (
172
- F.cross_entropy(a_logits_per_audio, labels, weight=audio_weight) +
173
- F.cross_entropy(a_logits_per_text, labels, weight=audio_weight) +
174
- F.cross_entropy(t_logits_per_audio, labels, weight=text_weight) +
175
- F.cross_entropy(t_logits_per_text, labels, weight=text_weight)
176
- ) / 4
177
- else:
178
- if self.world_size > 1:
179
- all_audio_features, all_text_features = gather_features(
180
- audio_features=audio_features,text_features=text_features,
181
- local_loss=self.local_loss,gather_with_grad=self.gather_with_grad,
182
- rank=self.rank,world_size=self.world_size,use_horovod=self.use_horovod,
183
- mlp_loss=self.mlp_loss
184
- )
185
-
186
- if self.local_loss:
187
- logits_per_audio = logit_scale_a * audio_features @ all_text_features.T
188
- logits_per_text = logit_scale_a * text_features @ all_audio_features.T
189
- else:
190
- logits_per_audio = logit_scale_a * all_audio_features @ all_text_features.T
191
- logits_per_text = logits_per_audio.T
192
- else:
193
- logits_per_audio = logit_scale_a * audio_features @ text_features.T
194
- logits_per_text = logit_scale_a * text_features @ audio_features.T
195
-
196
- # calculated ground-truth and cache if enabled
197
- num_logits = logits_per_audio.shape[0]
198
- if self.prev_num_logits != num_logits or device not in self.labels:
199
- labels = torch.arange(num_logits, device=device, dtype=torch.long)
200
- if self.world_size > 1 and self.local_loss:
201
- labels = labels + num_logits * self.rank
202
- if self.cache_labels:
203
- self.labels[device] = labels
204
- self.prev_num_logits = num_logits
205
- else:
206
- labels = self.labels[device]
207
- if not self.weighted_loss:
208
- total_loss = (
209
- F.cross_entropy(logits_per_audio, labels) +
210
- F.cross_entropy(logits_per_text, labels)
211
- ) / 2
212
- else:
213
- audio_weight = (all_audio_features@all_audio_features.T).detach()
214
- audio_weight = (torch.exp(torch.sum(audio_weight, axis=1)/(self.weight_loss_kappa*len(all_audio_features)))).detach()
215
- text_weight = (all_text_features@all_text_features.T).detach()
216
- text_weight = (torch.exp(torch.sum(text_weight, axis=1)/(self.weight_loss_kappa*len(all_text_features)))).detach()
217
- total_loss = (
218
- F.cross_entropy(logits_per_audio, labels, weight=text_weight) +
219
- F.cross_entropy(logits_per_text, labels, weight=audio_weight)
220
- ) / 2
221
- return total_loss
222
-
223
- def lp_gather_features(
224
- pred,
225
- target,
226
- world_size=1,
227
- use_horovod=False
228
- ):
229
- if use_horovod:
230
- assert hvd is not None, 'Please install horovod'
231
- with torch.no_grad():
232
- all_preds = hvd.allgather(pred)
233
- all_targets = hvd.allgath(target)
234
- else:
235
- gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
236
- gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
237
-
238
- dist.all_gather(gathered_preds, pred)
239
- dist.all_gather(gathered_targets, target)
240
- all_preds = torch.cat(gathered_preds, dim=0)
241
- all_targets = torch.cat(gathered_targets, dim=0)
242
-
243
- return all_preds, all_targets
244
-
245
-
246
- def get_map(pred, target):
247
- pred = torch.sigmoid(pred).numpy()
248
- target = target.numpy()
249
- return np.mean(average_precision_score(target, pred, average=None))
250
-
251
- def get_acc(pred, target):
252
- pred = torch.argmax(pred,1).numpy()
253
- target = torch.argmax(target,1).numpy()
254
- return accuracy_score(target, pred)
255
-
256
- def get_mauc(pred, target):
257
- pred = torch.sigmoid(pred).numpy()
258
- target = target.numpy()
259
- return np.mean(roc_auc_score(target, pred, average=None))
260
-
261
-
262
- class LPMetrics(object):
263
- def __init__(self, metric_names = ['map','acc','mauc']):
264
- self.metrics = []
265
- for name in metric_names:
266
- self.metrics.append(self.get_metric(name))
267
- self.metric_names = metric_names
268
-
269
- def get_metric(self,name):
270
- if name == 'map':
271
- return get_map
272
- elif name == 'acc':
273
- return get_acc
274
- elif name == 'mauc':
275
- return get_mauc
276
- else:
277
- raise ValueError(f'the metric should be at least one of [map, acc, mauc]')
278
-
279
- def evaluate_mertics(self, pred, target):
280
- metric_dict = {}
281
- for i in range(len(self.metric_names)):
282
- metric_dict[self.metric_names[i]] = self.metrics[i](pred, target)
283
- return metric_dict
284
-
285
-
286
- def calc_celoss(pred, target):
287
- target = torch.argmax(target, 1).long()
288
- return nn.CrossEntropyLoss()(pred, target)
289
-
290
-
291
- class LPLoss(nn.Module):
292
-
293
- def __init__(self, loss_name):
294
- super().__init__()
295
- if loss_name == 'bce':
296
- self.loss_func = nn.BCEWithLogitsLoss()
297
- elif loss_name == 'ce':
298
- self.loss_func = calc_celoss
299
- elif loss_name == 'mse':
300
- self.loss_func = nn.MSELoss()
301
- else:
302
- raise ValueError(f'the loss func should be at least one of [bce, ce, mse]')
303
-
304
- def forward(self, pred, target):
305
- loss = self.loss_func(pred, target)
306
- return loss
307
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISloth/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: ChatGPTwithAPI
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.20.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: awacke1/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhiboken12/travelling_ai/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """You are a helpful assistant to answer all user queries.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Theb.py DELETED
@@ -1,28 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://theb.ai'
9
- model = ['gpt-3.5-turbo']
10
- supports_stream = True
11
- needs_auth = False
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
-
15
- path = os.path.dirname(os.path.realpath(__file__))
16
- config = json.dumps({
17
- 'messages': messages,
18
- 'model': model}, separators=(',', ':'))
19
-
20
- cmd = ['python3', f'{path}/helpers/theb.py', config]
21
-
22
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
23
-
24
- for line in iter(p.stdout.readline, b''):
25
- yield line.decode('utf-8')
26
-
27
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
28
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/random.py DELETED
@@ -1,21 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import random
4
- from typing import TYPE_CHECKING, List
5
-
6
- from . import order_registry as OrderRegistry
7
- from .base import BaseOrder
8
-
9
- if TYPE_CHECKING:
10
- from agentverse.environments import BaseEnvironment
11
-
12
-
13
- @OrderRegistry.register("random")
14
- class RandomOrder(BaseOrder):
15
- """
16
- Order for random conversation
17
- The agents speak in a random order
18
- """
19
-
20
- def get_next_agent_idx(self, environment: BaseEnvironment) -> List[int]:
21
- return [random.randint(0, len(environment.agents) - 1)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyCalvin/dreambooth-training3/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Dreambooth Training
3
- emoji: ☁️
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.11
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: multimodalart/dreambooth-training
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/modules/spatial_transform.py DELETED
@@ -1,49 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from kornia.geometry.transform import rotate
5
-
6
-
7
- class LearnableSpatialTransformWrapper(nn.Module):
8
- def __init__(self, impl, pad_coef=0.5, angle_init_range=80, train_angle=True):
9
- super().__init__()
10
- self.impl = impl
11
- self.angle = torch.rand(1) * angle_init_range
12
- if train_angle:
13
- self.angle = nn.Parameter(self.angle, requires_grad=True)
14
- self.pad_coef = pad_coef
15
-
16
- def forward(self, x):
17
- if torch.is_tensor(x):
18
- return self.inverse_transform(self.impl(self.transform(x)), x)
19
- elif isinstance(x, tuple):
20
- x_trans = tuple(self.transform(elem) for elem in x)
21
- y_trans = self.impl(x_trans)
22
- return tuple(self.inverse_transform(elem, orig_x) for elem, orig_x in zip(y_trans, x))
23
- else:
24
- raise ValueError(f'Unexpected input type {type(x)}')
25
-
26
- def transform(self, x):
27
- height, width = x.shape[2:]
28
- pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef)
29
- x_padded = F.pad(x, [pad_w, pad_w, pad_h, pad_h], mode='reflect')
30
- x_padded_rotated = rotate(x_padded, angle=self.angle.to(x_padded))
31
- return x_padded_rotated
32
-
33
- def inverse_transform(self, y_padded_rotated, orig_x):
34
- height, width = orig_x.shape[2:]
35
- pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef)
36
-
37
- y_padded = rotate(y_padded_rotated, angle=-self.angle.to(y_padded_rotated))
38
- y_height, y_width = y_padded.shape[2:]
39
- y = y_padded[:, :, pad_h : y_height - pad_h, pad_w : y_width - pad_w]
40
- return y
41
-
42
-
43
- if __name__ == '__main__':
44
- layer = LearnableSpatialTransformWrapper(nn.Identity())
45
- x = torch.arange(2* 3 * 15 * 15).view(2, 3, 15, 15).float()
46
- y = layer(x)
47
- assert x.shape == y.shape
48
- assert torch.allclose(x[:, :, 1:, 1:][:, :, :-1, :-1], y[:, :, 1:, 1:][:, :, :-1, :-1])
49
- print('all ok')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/解析项目源代码.py DELETED
@@ -1,266 +0,0 @@
1
- from toolbox import update_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file
3
-
4
- def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
5
- import os, copy
6
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
7
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
8
- msg = '正常'
9
- inputs_array = []
10
- inputs_show_user_array = []
11
- history_array = []
12
- sys_prompt_array = []
13
- report_part_1 = []
14
-
15
- assert len(file_manifest) <= 512, "源文件太多(超过512个), 请缩减输入文件的数量。或者,您也可以选择删除此行警告,并修改代码拆分file_manifest列表,从而实现分批次处理。"
16
- ############################## <第一步,逐个文件分析,多线程> ##################################
17
- for index, fp in enumerate(file_manifest):
18
- # 读取文件
19
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
20
- file_content = f.read()
21
- prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
22
- i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
23
- i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
24
- # 装载请求内容
25
- inputs_array.append(i_say)
26
- inputs_show_user_array.append(i_say_show_user)
27
- history_array.append([])
28
- sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。")
29
-
30
- # 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到chatgpt进行分析
31
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
32
- inputs_array = inputs_array,
33
- inputs_show_user_array = inputs_show_user_array,
34
- history_array = history_array,
35
- sys_prompt_array = sys_prompt_array,
36
- llm_kwargs = llm_kwargs,
37
- chatbot = chatbot,
38
- show_user_at_complete = True
39
- )
40
-
41
- # 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析
42
- report_part_1 = copy.deepcopy(gpt_response_collection)
43
- history_to_return = report_part_1
44
- res = write_results_to_file(report_part_1)
45
- chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
46
- yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
47
-
48
- ############################## <第二步,综合,单线程,分组+迭代处理> ##################################
49
- batchsize = 16 # 10个文件为一组
50
- report_part_2 = []
51
- previous_iteration_files = []
52
- last_iteration_result = ""
53
- while True:
54
- if len(file_manifest) == 0: break
55
- this_iteration_file_manifest = file_manifest[:batchsize]
56
- this_iteration_gpt_response_collection = gpt_response_collection[:batchsize*2]
57
- file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)]
58
- # 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}"
59
- for index, content in enumerate(this_iteration_gpt_response_collection):
60
- if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token
61
- previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
62
- previous_iteration_files_string = ', '.join(previous_iteration_files)
63
- current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
64
- i_say = f'根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括{previous_iteration_files_string})。'
65
- inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。'
66
- this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection)
67
- this_iteration_history.append(last_iteration_result)
68
- result = yield from request_gpt_model_in_new_thread_with_ui_alive(
69
- inputs=i_say, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot,
70
- history=this_iteration_history, # 迭代之前的分析
71
- sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。")
72
- report_part_2.extend([i_say, result])
73
- last_iteration_result = result
74
-
75
- file_manifest = file_manifest[batchsize:]
76
- gpt_response_collection = gpt_response_collection[batchsize*2:]
77
-
78
- ############################## <END> ##################################
79
- history_to_return.extend(report_part_2)
80
- res = write_results_to_file(history_to_return)
81
- chatbot.append(("完成了吗?", res))
82
- yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
83
-
84
-
85
- @CatchException
86
- def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
87
- history = [] # 清空历史,以免输入溢出
88
- import glob
89
- file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
90
- [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]+ \
91
- [f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
92
- project_folder = './'
93
- if len(file_manifest) == 0:
94
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
95
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
96
- return
97
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
98
-
99
- @CatchException
100
- def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
101
- history = [] # 清空历史,以免输入溢出
102
- import glob, os
103
- if os.path.exists(txt):
104
- project_folder = txt
105
- else:
106
- if txt == "": txt = '空空如也的输入栏'
107
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
108
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
109
- return
110
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
111
- if len(file_manifest) == 0:
112
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
113
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
114
- return
115
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
116
-
117
-
118
- @CatchException
119
- def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
120
- history = [] # 清空历史,以免输入溢出
121
- import glob, os
122
- if os.path.exists(txt):
123
- project_folder = txt
124
- else:
125
- if txt == "": txt = '空空如也的输入栏'
126
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
127
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
128
- return
129
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
130
- [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
131
- # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
132
- if len(file_manifest) == 0:
133
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
134
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
135
- return
136
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
137
-
138
- @CatchException
139
- def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
140
- history = [] # 清空历史,以免输入溢出
141
- import glob, os
142
- if os.path.exists(txt):
143
- project_folder = txt
144
- else:
145
- if txt == "": txt = '空空如也的输入栏'
146
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
147
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
148
- return
149
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
150
- [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
151
- [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
152
- [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
153
- if len(file_manifest) == 0:
154
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
155
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
156
- return
157
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
158
-
159
-
160
- @CatchException
161
- def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
162
- history = [] # 清空历史,以免输入溢出
163
- import glob, os
164
- if os.path.exists(txt):
165
- project_folder = txt
166
- else:
167
- if txt == "": txt = '空空如也的输入栏'
168
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
169
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
170
- return
171
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
172
- [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
173
- [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
174
- [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
175
- if len(file_manifest) == 0:
176
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
177
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
178
- return
179
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
180
-
181
-
182
- @CatchException
183
- def 解析一个Rect项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
184
- history = [] # 清空历史,以免输入溢出
185
- import glob, os
186
- if os.path.exists(txt):
187
- project_folder = txt
188
- else:
189
- if txt == "": txt = '空空如也的输入栏'
190
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
191
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
192
- return
193
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
194
- [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
195
- [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
196
- [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \
197
- [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
198
- if len(file_manifest) == 0:
199
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
200
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
201
- return
202
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
203
-
204
-
205
- @CatchException
206
- def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
207
- history = [] # 清空历史,以免输入溢出
208
- import glob, os
209
- if os.path.exists(txt):
210
- project_folder = txt
211
- else:
212
- if txt == "": txt = '空空如也的输入栏'
213
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
214
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
215
- return
216
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
217
- [f for f in glob.glob(f'{project_folder}/**/go.mod', recursive=True)] + \
218
- [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
219
- [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
220
- if len(file_manifest) == 0:
221
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
222
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
223
- return
224
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
225
-
226
-
227
- @CatchException
228
- def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
229
- history = [] # 清空历史,以免输入溢出
230
- import glob, os
231
- if os.path.exists(txt):
232
- project_folder = txt
233
- else:
234
- if txt == "": txt = '空空如也的输入栏'
235
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
236
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
237
- return
238
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
239
- [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
240
- [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
241
- [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
242
- if len(file_manifest) == 0:
243
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
244
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
245
- return
246
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
247
-
248
-
249
- @CatchException
250
- def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
251
- history = [] # 清空历史,以��输入溢出
252
- import glob, os
253
- if os.path.exists(txt):
254
- project_folder = txt
255
- else:
256
- if txt == "": txt = '空空如也的输入栏'
257
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
258
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
259
- return
260
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \
261
- [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)]
262
- if len(file_manifest) == 0:
263
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}")
264
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
265
- return
266
- yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/fashion-aggregator-duplicated/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Fashion Aggregator
3
- emoji: 👕
4
- colorFrom: purple
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.9
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: ryparmar/fashion-aggregator
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/style_dataset.py DELETED
@@ -1,18 +0,0 @@
1
- from torch.utils.data import Dataset
2
- from torchvision import datasets
3
- import torchvision.transforms as T
4
- from torch.utils.data import DataLoader
5
-
6
- class StyleDataset(Dataset):
7
- def __init__(self, datadir, batch_size, sampler, image_side_length=256, num_workers=2):
8
- transform = T.Compose([
9
- T.Resize(size=(image_side_length * 2, image_side_length * 2)),
10
- T.RandomCrop(image_side_length),
11
- T.ToTensor(),
12
- ])
13
-
14
- dataset = datasets.ImageFolder(datadir, transform=transform)
15
- dataloader = DataLoader(dataset, batch_size=batch_size, sampler=sampler(len(dataset)),
16
- num_workers=num_workers)
17
-
18
- return dataloader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/README.md DELETED
@@ -1,39 +0,0 @@
1
- # Dynamic Multi-scale Filters for Semantic Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @InProceedings{He_2019_ICCV,
9
- author = {He, Junjun and Deng, Zhongying and Qiao, Yu},
10
- title = {Dynamic Multi-Scale Filters for Semantic Segmentation},
11
- booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
12
- month = {October},
13
- year = {2019}
14
- }
15
- ```
16
-
17
- ## Results and models
18
-
19
- ### Cityscapes
20
-
21
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
22
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
23
- | DMNet | R-50-D8 | 512x1024 | 40000 | 7.0 | 3.66 | 77.78 | 79.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes-20201214_115717.log.json) |
24
- | DMNet | R-101-D8 | 512x1024 | 40000 | 10.6 | 2.54 | 78.37 | 79.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes-20201214_115716.log.json) |
25
- | DMNet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.57 | 78.49 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes-20201214_115717.log.json) |
26
- | DMNet | R-101-D8 | 769x769 | 40000 | 12.0 | 1.01 | 77.62 | 78.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes-20201214_115718.log.json) |
27
- | DMNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.07 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes-20201214_115716.log.json) |
28
- | DMNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes-20201214_115705.log.json) |
29
- | DMNet | R-50-D8 | 769x769 | 80000 | - | - | 79.22 | 80.55 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes-20201214_115718.log.json) |
30
- | DMNet | R-101-D8 | 769x769 | 80000 | - | - | 79.19 | 80.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes-20201214_115716.log.json) |
31
-
32
- ### ADE20K
33
-
34
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
35
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
36
- | DMNet | R-50-D8 | 512x512 | 80000 | 9.4 | 20.95 | 42.37 | 43.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k-20201214_115705.log.json) |
37
- | DMNet | R-101-D8 | 512x512 | 80000 | 13.0 | 13.88 | 45.34 | 46.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k-20201214_115704.log.json) |
38
- | DMNet | R-50-D8 | 512x512 | 160000 | - | - | 43.15 | 44.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k-20201214_115706.log.json) |
39
- | DMNet | R-101-D8 | 512x512 | 160000 | - | - | 45.42 | 46.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k-20201214_115705.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnest101',
4
- backbone=dict(
5
- type='ResNeSt',
6
- stem_channels=128,
7
- radix=2,
8
- reduction_factor=4,
9
- avg_down_stride=True))
 
 
 
 
 
 
 
 
 
 
spaces/Anitha0531/SpeechtoText/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: SpeechtoText
3
- emoji: 🌖
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/__init__.py DELETED
@@ -1,41 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from .alexnet import AlexNet
3
- # yapf: disable
4
- from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
5
- PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS,
6
- ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule,
7
- ConvTranspose2d, ConvTranspose3d, ConvWS2d,
8
- DepthwiseSeparableConvModule, GeneralizedAttention,
9
- HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d,
10
- NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish,
11
- build_activation_layer, build_conv_layer,
12
- build_norm_layer, build_padding_layer, build_plugin_layer,
13
- build_upsample_layer, conv_ws_2d, is_norm)
14
- from .builder import MODELS, build_model_from_cfg
15
- # yapf: enable
16
- from .resnet import ResNet, make_res_layer
17
- from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit,
18
- NormalInit, PretrainedInit, TruncNormalInit, UniformInit,
19
- XavierInit, bias_init_with_prob, caffe2_xavier_init,
20
- constant_init, fuse_conv_bn, get_model_complexity_info,
21
- initialize, kaiming_init, normal_init, trunc_normal_init,
22
- uniform_init, xavier_init)
23
- from .vgg import VGG, make_vgg_layer
24
-
25
- __all__ = [
26
- 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer',
27
- 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
28
- 'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
29
- 'bias_init_with_prob', 'ConvModule', 'build_activation_layer',
30
- 'build_conv_layer', 'build_norm_layer', 'build_padding_layer',
31
- 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d',
32
- 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish',
33
- 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS',
34
- 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale',
35
- 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d',
36
- 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d',
37
- 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d',
38
- 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
39
- 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
40
- 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg'
41
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Yolov5
3
- emoji: 🐨
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.44.1
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/wheel.py DELETED
@@ -1,1082 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2013-2020 Vinay Sajip.
4
- # Licensed to the Python Software Foundation under a contributor agreement.
5
- # See LICENSE.txt and CONTRIBUTORS.txt.
6
- #
7
- from __future__ import unicode_literals
8
-
9
- import base64
10
- import codecs
11
- import datetime
12
- from email import message_from_file
13
- import hashlib
14
- import json
15
- import logging
16
- import os
17
- import posixpath
18
- import re
19
- import shutil
20
- import sys
21
- import tempfile
22
- import zipfile
23
-
24
- from . import __version__, DistlibException
25
- from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
26
- from .database import InstalledDistribution
27
- from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
28
- LEGACY_METADATA_FILENAME)
29
- from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
30
- cached_property, get_cache_base, read_exports, tempdir,
31
- get_platform)
32
- from .version import NormalizedVersion, UnsupportedVersionError
33
-
34
- logger = logging.getLogger(__name__)
35
-
36
- cache = None # created when needed
37
-
38
- if hasattr(sys, 'pypy_version_info'): # pragma: no cover
39
- IMP_PREFIX = 'pp'
40
- elif sys.platform.startswith('java'): # pragma: no cover
41
- IMP_PREFIX = 'jy'
42
- elif sys.platform == 'cli': # pragma: no cover
43
- IMP_PREFIX = 'ip'
44
- else:
45
- IMP_PREFIX = 'cp'
46
-
47
- VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
48
- if not VER_SUFFIX: # pragma: no cover
49
- VER_SUFFIX = '%s%s' % sys.version_info[:2]
50
- PYVER = 'py' + VER_SUFFIX
51
- IMPVER = IMP_PREFIX + VER_SUFFIX
52
-
53
- ARCH = get_platform().replace('-', '_').replace('.', '_')
54
-
55
- ABI = sysconfig.get_config_var('SOABI')
56
- if ABI and ABI.startswith('cpython-'):
57
- ABI = ABI.replace('cpython-', 'cp').split('-')[0]
58
- else:
59
- def _derive_abi():
60
- parts = ['cp', VER_SUFFIX]
61
- if sysconfig.get_config_var('Py_DEBUG'):
62
- parts.append('d')
63
- if IMP_PREFIX == 'cp':
64
- vi = sys.version_info[:2]
65
- if vi < (3, 8):
66
- wpm = sysconfig.get_config_var('WITH_PYMALLOC')
67
- if wpm is None:
68
- wpm = True
69
- if wpm:
70
- parts.append('m')
71
- if vi < (3, 3):
72
- us = sysconfig.get_config_var('Py_UNICODE_SIZE')
73
- if us == 4 or (us is None and sys.maxunicode == 0x10FFFF):
74
- parts.append('u')
75
- return ''.join(parts)
76
- ABI = _derive_abi()
77
- del _derive_abi
78
-
79
- FILENAME_RE = re.compile(r'''
80
- (?P<nm>[^-]+)
81
- -(?P<vn>\d+[^-]*)
82
- (-(?P<bn>\d+[^-]*))?
83
- -(?P<py>\w+\d+(\.\w+\d+)*)
84
- -(?P<bi>\w+)
85
- -(?P<ar>\w+(\.\w+)*)
86
- \.whl$
87
- ''', re.IGNORECASE | re.VERBOSE)
88
-
89
- NAME_VERSION_RE = re.compile(r'''
90
- (?P<nm>[^-]+)
91
- -(?P<vn>\d+[^-]*)
92
- (-(?P<bn>\d+[^-]*))?$
93
- ''', re.IGNORECASE | re.VERBOSE)
94
-
95
- SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
96
- SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
97
- SHEBANG_PYTHON = b'#!python'
98
- SHEBANG_PYTHONW = b'#!pythonw'
99
-
100
- if os.sep == '/':
101
- to_posix = lambda o: o
102
- else:
103
- to_posix = lambda o: o.replace(os.sep, '/')
104
-
105
- if sys.version_info[0] < 3:
106
- import imp
107
- else:
108
- imp = None
109
- import importlib.machinery
110
- import importlib.util
111
-
112
- def _get_suffixes():
113
- if imp:
114
- return [s[0] for s in imp.get_suffixes()]
115
- else:
116
- return importlib.machinery.EXTENSION_SUFFIXES
117
-
118
- def _load_dynamic(name, path):
119
- # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
120
- if imp:
121
- return imp.load_dynamic(name, path)
122
- else:
123
- spec = importlib.util.spec_from_file_location(name, path)
124
- module = importlib.util.module_from_spec(spec)
125
- sys.modules[name] = module
126
- spec.loader.exec_module(module)
127
- return module
128
-
129
- class Mounter(object):
130
- def __init__(self):
131
- self.impure_wheels = {}
132
- self.libs = {}
133
-
134
- def add(self, pathname, extensions):
135
- self.impure_wheels[pathname] = extensions
136
- self.libs.update(extensions)
137
-
138
- def remove(self, pathname):
139
- extensions = self.impure_wheels.pop(pathname)
140
- for k, v in extensions:
141
- if k in self.libs:
142
- del self.libs[k]
143
-
144
- def find_module(self, fullname, path=None):
145
- if fullname in self.libs:
146
- result = self
147
- else:
148
- result = None
149
- return result
150
-
151
- def load_module(self, fullname):
152
- if fullname in sys.modules:
153
- result = sys.modules[fullname]
154
- else:
155
- if fullname not in self.libs:
156
- raise ImportError('unable to find extension for %s' % fullname)
157
- result = _load_dynamic(fullname, self.libs[fullname])
158
- result.__loader__ = self
159
- parts = fullname.rsplit('.', 1)
160
- if len(parts) > 1:
161
- result.__package__ = parts[0]
162
- return result
163
-
164
- _hook = Mounter()
165
-
166
-
167
- class Wheel(object):
168
- """
169
- Class to build and install from Wheel files (PEP 427).
170
- """
171
-
172
- wheel_version = (1, 1)
173
- hash_kind = 'sha256'
174
-
175
- def __init__(self, filename=None, sign=False, verify=False):
176
- """
177
- Initialise an instance using a (valid) filename.
178
- """
179
- self.sign = sign
180
- self.should_verify = verify
181
- self.buildver = ''
182
- self.pyver = [PYVER]
183
- self.abi = ['none']
184
- self.arch = ['any']
185
- self.dirname = os.getcwd()
186
- if filename is None:
187
- self.name = 'dummy'
188
- self.version = '0.1'
189
- self._filename = self.filename
190
- else:
191
- m = NAME_VERSION_RE.match(filename)
192
- if m:
193
- info = m.groupdict('')
194
- self.name = info['nm']
195
- # Reinstate the local version separator
196
- self.version = info['vn'].replace('_', '-')
197
- self.buildver = info['bn']
198
- self._filename = self.filename
199
- else:
200
- dirname, filename = os.path.split(filename)
201
- m = FILENAME_RE.match(filename)
202
- if not m:
203
- raise DistlibException('Invalid name or '
204
- 'filename: %r' % filename)
205
- if dirname:
206
- self.dirname = os.path.abspath(dirname)
207
- self._filename = filename
208
- info = m.groupdict('')
209
- self.name = info['nm']
210
- self.version = info['vn']
211
- self.buildver = info['bn']
212
- self.pyver = info['py'].split('.')
213
- self.abi = info['bi'].split('.')
214
- self.arch = info['ar'].split('.')
215
-
216
- @property
217
- def filename(self):
218
- """
219
- Build and return a filename from the various components.
220
- """
221
- if self.buildver:
222
- buildver = '-' + self.buildver
223
- else:
224
- buildver = ''
225
- pyver = '.'.join(self.pyver)
226
- abi = '.'.join(self.abi)
227
- arch = '.'.join(self.arch)
228
- # replace - with _ as a local version separator
229
- version = self.version.replace('-', '_')
230
- return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
231
- pyver, abi, arch)
232
-
233
- @property
234
- def exists(self):
235
- path = os.path.join(self.dirname, self.filename)
236
- return os.path.isfile(path)
237
-
238
- @property
239
- def tags(self):
240
- for pyver in self.pyver:
241
- for abi in self.abi:
242
- for arch in self.arch:
243
- yield pyver, abi, arch
244
-
245
- @cached_property
246
- def metadata(self):
247
- pathname = os.path.join(self.dirname, self.filename)
248
- name_ver = '%s-%s' % (self.name, self.version)
249
- info_dir = '%s.dist-info' % name_ver
250
- wrapper = codecs.getreader('utf-8')
251
- with ZipFile(pathname, 'r') as zf:
252
- wheel_metadata = self.get_wheel_metadata(zf)
253
- wv = wheel_metadata['Wheel-Version'].split('.', 1)
254
- file_version = tuple([int(i) for i in wv])
255
- # if file_version < (1, 1):
256
- # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
257
- # LEGACY_METADATA_FILENAME]
258
- # else:
259
- # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
260
- fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
261
- result = None
262
- for fn in fns:
263
- try:
264
- metadata_filename = posixpath.join(info_dir, fn)
265
- with zf.open(metadata_filename) as bf:
266
- wf = wrapper(bf)
267
- result = Metadata(fileobj=wf)
268
- if result:
269
- break
270
- except KeyError:
271
- pass
272
- if not result:
273
- raise ValueError('Invalid wheel, because metadata is '
274
- 'missing: looked in %s' % ', '.join(fns))
275
- return result
276
-
277
- def get_wheel_metadata(self, zf):
278
- name_ver = '%s-%s' % (self.name, self.version)
279
- info_dir = '%s.dist-info' % name_ver
280
- metadata_filename = posixpath.join(info_dir, 'WHEEL')
281
- with zf.open(metadata_filename) as bf:
282
- wf = codecs.getreader('utf-8')(bf)
283
- message = message_from_file(wf)
284
- return dict(message)
285
-
286
- @cached_property
287
- def info(self):
288
- pathname = os.path.join(self.dirname, self.filename)
289
- with ZipFile(pathname, 'r') as zf:
290
- result = self.get_wheel_metadata(zf)
291
- return result
292
-
293
- def process_shebang(self, data):
294
- m = SHEBANG_RE.match(data)
295
- if m:
296
- end = m.end()
297
- shebang, data_after_shebang = data[:end], data[end:]
298
- # Preserve any arguments after the interpreter
299
- if b'pythonw' in shebang.lower():
300
- shebang_python = SHEBANG_PYTHONW
301
- else:
302
- shebang_python = SHEBANG_PYTHON
303
- m = SHEBANG_DETAIL_RE.match(shebang)
304
- if m:
305
- args = b' ' + m.groups()[-1]
306
- else:
307
- args = b''
308
- shebang = shebang_python + args
309
- data = shebang + data_after_shebang
310
- else:
311
- cr = data.find(b'\r')
312
- lf = data.find(b'\n')
313
- if cr < 0 or cr > lf:
314
- term = b'\n'
315
- else:
316
- if data[cr:cr + 2] == b'\r\n':
317
- term = b'\r\n'
318
- else:
319
- term = b'\r'
320
- data = SHEBANG_PYTHON + term + data
321
- return data
322
-
323
- def get_hash(self, data, hash_kind=None):
324
- if hash_kind is None:
325
- hash_kind = self.hash_kind
326
- try:
327
- hasher = getattr(hashlib, hash_kind)
328
- except AttributeError:
329
- raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
330
- result = hasher(data).digest()
331
- result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
332
- return hash_kind, result
333
-
334
- def write_record(self, records, record_path, archive_record_path):
335
- records = list(records) # make a copy, as mutated
336
- records.append((archive_record_path, '', ''))
337
- with CSVWriter(record_path) as writer:
338
- for row in records:
339
- writer.writerow(row)
340
-
341
- def write_records(self, info, libdir, archive_paths):
342
- records = []
343
- distinfo, info_dir = info
344
- hasher = getattr(hashlib, self.hash_kind)
345
- for ap, p in archive_paths:
346
- with open(p, 'rb') as f:
347
- data = f.read()
348
- digest = '%s=%s' % self.get_hash(data)
349
- size = os.path.getsize(p)
350
- records.append((ap, digest, size))
351
-
352
- p = os.path.join(distinfo, 'RECORD')
353
- ap = to_posix(os.path.join(info_dir, 'RECORD'))
354
- self.write_record(records, p, ap)
355
- archive_paths.append((ap, p))
356
-
357
- def build_zip(self, pathname, archive_paths):
358
- with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
359
- for ap, p in archive_paths:
360
- logger.debug('Wrote %s to %s in wheel', p, ap)
361
- zf.write(p, ap)
362
-
363
- def build(self, paths, tags=None, wheel_version=None):
364
- """
365
- Build a wheel from files in specified paths, and use any specified tags
366
- when determining the name of the wheel.
367
- """
368
- if tags is None:
369
- tags = {}
370
-
371
- libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
372
- if libkey == 'platlib':
373
- is_pure = 'false'
374
- default_pyver = [IMPVER]
375
- default_abi = [ABI]
376
- default_arch = [ARCH]
377
- else:
378
- is_pure = 'true'
379
- default_pyver = [PYVER]
380
- default_abi = ['none']
381
- default_arch = ['any']
382
-
383
- self.pyver = tags.get('pyver', default_pyver)
384
- self.abi = tags.get('abi', default_abi)
385
- self.arch = tags.get('arch', default_arch)
386
-
387
- libdir = paths[libkey]
388
-
389
- name_ver = '%s-%s' % (self.name, self.version)
390
- data_dir = '%s.data' % name_ver
391
- info_dir = '%s.dist-info' % name_ver
392
-
393
- archive_paths = []
394
-
395
- # First, stuff which is not in site-packages
396
- for key in ('data', 'headers', 'scripts'):
397
- if key not in paths:
398
- continue
399
- path = paths[key]
400
- if os.path.isdir(path):
401
- for root, dirs, files in os.walk(path):
402
- for fn in files:
403
- p = fsdecode(os.path.join(root, fn))
404
- rp = os.path.relpath(p, path)
405
- ap = to_posix(os.path.join(data_dir, key, rp))
406
- archive_paths.append((ap, p))
407
- if key == 'scripts' and not p.endswith('.exe'):
408
- with open(p, 'rb') as f:
409
- data = f.read()
410
- data = self.process_shebang(data)
411
- with open(p, 'wb') as f:
412
- f.write(data)
413
-
414
- # Now, stuff which is in site-packages, other than the
415
- # distinfo stuff.
416
- path = libdir
417
- distinfo = None
418
- for root, dirs, files in os.walk(path):
419
- if root == path:
420
- # At the top level only, save distinfo for later
421
- # and skip it for now
422
- for i, dn in enumerate(dirs):
423
- dn = fsdecode(dn)
424
- if dn.endswith('.dist-info'):
425
- distinfo = os.path.join(root, dn)
426
- del dirs[i]
427
- break
428
- assert distinfo, '.dist-info directory expected, not found'
429
-
430
- for fn in files:
431
- # comment out next suite to leave .pyc files in
432
- if fsdecode(fn).endswith(('.pyc', '.pyo')):
433
- continue
434
- p = os.path.join(root, fn)
435
- rp = to_posix(os.path.relpath(p, path))
436
- archive_paths.append((rp, p))
437
-
438
- # Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
439
- files = os.listdir(distinfo)
440
- for fn in files:
441
- if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
442
- p = fsdecode(os.path.join(distinfo, fn))
443
- ap = to_posix(os.path.join(info_dir, fn))
444
- archive_paths.append((ap, p))
445
-
446
- wheel_metadata = [
447
- 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
448
- 'Generator: distlib %s' % __version__,
449
- 'Root-Is-Purelib: %s' % is_pure,
450
- ]
451
- for pyver, abi, arch in self.tags:
452
- wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
453
- p = os.path.join(distinfo, 'WHEEL')
454
- with open(p, 'w') as f:
455
- f.write('\n'.join(wheel_metadata))
456
- ap = to_posix(os.path.join(info_dir, 'WHEEL'))
457
- archive_paths.append((ap, p))
458
-
459
- # sort the entries by archive path. Not needed by any spec, but it
460
- # keeps the archive listing and RECORD tidier than they would otherwise
461
- # be. Use the number of path segments to keep directory entries together,
462
- # and keep the dist-info stuff at the end.
463
- def sorter(t):
464
- ap = t[0]
465
- n = ap.count('/')
466
- if '.dist-info' in ap:
467
- n += 10000
468
- return (n, ap)
469
- archive_paths = sorted(archive_paths, key=sorter)
470
-
471
- # Now, at last, RECORD.
472
- # Paths in here are archive paths - nothing else makes sense.
473
- self.write_records((distinfo, info_dir), libdir, archive_paths)
474
- # Now, ready to build the zip file
475
- pathname = os.path.join(self.dirname, self.filename)
476
- self.build_zip(pathname, archive_paths)
477
- return pathname
478
-
479
- def skip_entry(self, arcname):
480
- """
481
- Determine whether an archive entry should be skipped when verifying
482
- or installing.
483
- """
484
- # The signature file won't be in RECORD,
485
- # and we don't currently don't do anything with it
486
- # We also skip directories, as they won't be in RECORD
487
- # either. See:
488
- #
489
- # https://github.com/pypa/wheel/issues/294
490
- # https://github.com/pypa/wheel/issues/287
491
- # https://github.com/pypa/wheel/pull/289
492
- #
493
- return arcname.endswith(('/', '/RECORD.jws'))
494
-
495
- def install(self, paths, maker, **kwargs):
496
- """
497
- Install a wheel to the specified paths. If kwarg ``warner`` is
498
- specified, it should be a callable, which will be called with two
499
- tuples indicating the wheel version of this software and the wheel
500
- version in the file, if there is a discrepancy in the versions.
501
- This can be used to issue any warnings to raise any exceptions.
502
- If kwarg ``lib_only`` is True, only the purelib/platlib files are
503
- installed, and the headers, scripts, data and dist-info metadata are
504
- not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
505
- bytecode will try to use file-hash based invalidation (PEP-552) on
506
- supported interpreter versions (CPython 2.7+).
507
-
508
- The return value is a :class:`InstalledDistribution` instance unless
509
- ``options.lib_only`` is True, in which case the return value is ``None``.
510
- """
511
-
512
- dry_run = maker.dry_run
513
- warner = kwargs.get('warner')
514
- lib_only = kwargs.get('lib_only', False)
515
- bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
516
-
517
- pathname = os.path.join(self.dirname, self.filename)
518
- name_ver = '%s-%s' % (self.name, self.version)
519
- data_dir = '%s.data' % name_ver
520
- info_dir = '%s.dist-info' % name_ver
521
-
522
- metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
523
- wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
524
- record_name = posixpath.join(info_dir, 'RECORD')
525
-
526
- wrapper = codecs.getreader('utf-8')
527
-
528
- with ZipFile(pathname, 'r') as zf:
529
- with zf.open(wheel_metadata_name) as bwf:
530
- wf = wrapper(bwf)
531
- message = message_from_file(wf)
532
- wv = message['Wheel-Version'].split('.', 1)
533
- file_version = tuple([int(i) for i in wv])
534
- if (file_version != self.wheel_version) and warner:
535
- warner(self.wheel_version, file_version)
536
-
537
- if message['Root-Is-Purelib'] == 'true':
538
- libdir = paths['purelib']
539
- else:
540
- libdir = paths['platlib']
541
-
542
- records = {}
543
- with zf.open(record_name) as bf:
544
- with CSVReader(stream=bf) as reader:
545
- for row in reader:
546
- p = row[0]
547
- records[p] = row
548
-
549
- data_pfx = posixpath.join(data_dir, '')
550
- info_pfx = posixpath.join(info_dir, '')
551
- script_pfx = posixpath.join(data_dir, 'scripts', '')
552
-
553
- # make a new instance rather than a copy of maker's,
554
- # as we mutate it
555
- fileop = FileOperator(dry_run=dry_run)
556
- fileop.record = True # so we can rollback if needed
557
-
558
- bc = not sys.dont_write_bytecode # Double negatives. Lovely!
559
-
560
- outfiles = [] # for RECORD writing
561
-
562
- # for script copying/shebang processing
563
- workdir = tempfile.mkdtemp()
564
- # set target dir later
565
- # we default add_launchers to False, as the
566
- # Python Launcher should be used instead
567
- maker.source_dir = workdir
568
- maker.target_dir = None
569
- try:
570
- for zinfo in zf.infolist():
571
- arcname = zinfo.filename
572
- if isinstance(arcname, text_type):
573
- u_arcname = arcname
574
- else:
575
- u_arcname = arcname.decode('utf-8')
576
- if self.skip_entry(u_arcname):
577
- continue
578
- row = records[u_arcname]
579
- if row[2] and str(zinfo.file_size) != row[2]:
580
- raise DistlibException('size mismatch for '
581
- '%s' % u_arcname)
582
- if row[1]:
583
- kind, value = row[1].split('=', 1)
584
- with zf.open(arcname) as bf:
585
- data = bf.read()
586
- _, digest = self.get_hash(data, kind)
587
- if digest != value:
588
- raise DistlibException('digest mismatch for '
589
- '%s' % arcname)
590
-
591
- if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
592
- logger.debug('lib_only: skipping %s', u_arcname)
593
- continue
594
- is_script = (u_arcname.startswith(script_pfx)
595
- and not u_arcname.endswith('.exe'))
596
-
597
- if u_arcname.startswith(data_pfx):
598
- _, where, rp = u_arcname.split('/', 2)
599
- outfile = os.path.join(paths[where], convert_path(rp))
600
- else:
601
- # meant for site-packages.
602
- if u_arcname in (wheel_metadata_name, record_name):
603
- continue
604
- outfile = os.path.join(libdir, convert_path(u_arcname))
605
- if not is_script:
606
- with zf.open(arcname) as bf:
607
- fileop.copy_stream(bf, outfile)
608
- # Issue #147: permission bits aren't preserved. Using
609
- # zf.extract(zinfo, libdir) should have worked, but didn't,
610
- # see https://www.thetopsites.net/article/53834422.shtml
611
- # So ... manually preserve permission bits as given in zinfo
612
- if os.name == 'posix':
613
- # just set the normal permission bits
614
- os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF)
615
- outfiles.append(outfile)
616
- # Double check the digest of the written file
617
- if not dry_run and row[1]:
618
- with open(outfile, 'rb') as bf:
619
- data = bf.read()
620
- _, newdigest = self.get_hash(data, kind)
621
- if newdigest != digest:
622
- raise DistlibException('digest mismatch '
623
- 'on write for '
624
- '%s' % outfile)
625
- if bc and outfile.endswith('.py'):
626
- try:
627
- pyc = fileop.byte_compile(outfile,
628
- hashed_invalidation=bc_hashed_invalidation)
629
- outfiles.append(pyc)
630
- except Exception:
631
- # Don't give up if byte-compilation fails,
632
- # but log it and perhaps warn the user
633
- logger.warning('Byte-compilation failed',
634
- exc_info=True)
635
- else:
636
- fn = os.path.basename(convert_path(arcname))
637
- workname = os.path.join(workdir, fn)
638
- with zf.open(arcname) as bf:
639
- fileop.copy_stream(bf, workname)
640
-
641
- dn, fn = os.path.split(outfile)
642
- maker.target_dir = dn
643
- filenames = maker.make(fn)
644
- fileop.set_executable_mode(filenames)
645
- outfiles.extend(filenames)
646
-
647
- if lib_only:
648
- logger.debug('lib_only: returning None')
649
- dist = None
650
- else:
651
- # Generate scripts
652
-
653
- # Try to get pydist.json so we can see if there are
654
- # any commands to generate. If this fails (e.g. because
655
- # of a legacy wheel), log a warning but don't give up.
656
- commands = None
657
- file_version = self.info['Wheel-Version']
658
- if file_version == '1.0':
659
- # Use legacy info
660
- ep = posixpath.join(info_dir, 'entry_points.txt')
661
- try:
662
- with zf.open(ep) as bwf:
663
- epdata = read_exports(bwf)
664
- commands = {}
665
- for key in ('console', 'gui'):
666
- k = '%s_scripts' % key
667
- if k in epdata:
668
- commands['wrap_%s' % key] = d = {}
669
- for v in epdata[k].values():
670
- s = '%s:%s' % (v.prefix, v.suffix)
671
- if v.flags:
672
- s += ' [%s]' % ','.join(v.flags)
673
- d[v.name] = s
674
- except Exception:
675
- logger.warning('Unable to read legacy script '
676
- 'metadata, so cannot generate '
677
- 'scripts')
678
- else:
679
- try:
680
- with zf.open(metadata_name) as bwf:
681
- wf = wrapper(bwf)
682
- commands = json.load(wf).get('extensions')
683
- if commands:
684
- commands = commands.get('python.commands')
685
- except Exception:
686
- logger.warning('Unable to read JSON metadata, so '
687
- 'cannot generate scripts')
688
- if commands:
689
- console_scripts = commands.get('wrap_console', {})
690
- gui_scripts = commands.get('wrap_gui', {})
691
- if console_scripts or gui_scripts:
692
- script_dir = paths.get('scripts', '')
693
- if not os.path.isdir(script_dir):
694
- raise ValueError('Valid script path not '
695
- 'specified')
696
- maker.target_dir = script_dir
697
- for k, v in console_scripts.items():
698
- script = '%s = %s' % (k, v)
699
- filenames = maker.make(script)
700
- fileop.set_executable_mode(filenames)
701
-
702
- if gui_scripts:
703
- options = {'gui': True }
704
- for k, v in gui_scripts.items():
705
- script = '%s = %s' % (k, v)
706
- filenames = maker.make(script, options)
707
- fileop.set_executable_mode(filenames)
708
-
709
- p = os.path.join(libdir, info_dir)
710
- dist = InstalledDistribution(p)
711
-
712
- # Write SHARED
713
- paths = dict(paths) # don't change passed in dict
714
- del paths['purelib']
715
- del paths['platlib']
716
- paths['lib'] = libdir
717
- p = dist.write_shared_locations(paths, dry_run)
718
- if p:
719
- outfiles.append(p)
720
-
721
- # Write RECORD
722
- dist.write_installed_files(outfiles, paths['prefix'],
723
- dry_run)
724
- return dist
725
- except Exception: # pragma: no cover
726
- logger.exception('installation failed.')
727
- fileop.rollback()
728
- raise
729
- finally:
730
- shutil.rmtree(workdir)
731
-
732
- def _get_dylib_cache(self):
733
- global cache
734
- if cache is None:
735
- # Use native string to avoid issues on 2.x: see Python #20140.
736
- base = os.path.join(get_cache_base(), str('dylib-cache'),
737
- '%s.%s' % sys.version_info[:2])
738
- cache = Cache(base)
739
- return cache
740
-
741
- def _get_extensions(self):
742
- pathname = os.path.join(self.dirname, self.filename)
743
- name_ver = '%s-%s' % (self.name, self.version)
744
- info_dir = '%s.dist-info' % name_ver
745
- arcname = posixpath.join(info_dir, 'EXTENSIONS')
746
- wrapper = codecs.getreader('utf-8')
747
- result = []
748
- with ZipFile(pathname, 'r') as zf:
749
- try:
750
- with zf.open(arcname) as bf:
751
- wf = wrapper(bf)
752
- extensions = json.load(wf)
753
- cache = self._get_dylib_cache()
754
- prefix = cache.prefix_to_dir(pathname)
755
- cache_base = os.path.join(cache.base, prefix)
756
- if not os.path.isdir(cache_base):
757
- os.makedirs(cache_base)
758
- for name, relpath in extensions.items():
759
- dest = os.path.join(cache_base, convert_path(relpath))
760
- if not os.path.exists(dest):
761
- extract = True
762
- else:
763
- file_time = os.stat(dest).st_mtime
764
- file_time = datetime.datetime.fromtimestamp(file_time)
765
- info = zf.getinfo(relpath)
766
- wheel_time = datetime.datetime(*info.date_time)
767
- extract = wheel_time > file_time
768
- if extract:
769
- zf.extract(relpath, cache_base)
770
- result.append((name, dest))
771
- except KeyError:
772
- pass
773
- return result
774
-
775
- def is_compatible(self):
776
- """
777
- Determine if a wheel is compatible with the running system.
778
- """
779
- return is_compatible(self)
780
-
781
- def is_mountable(self):
782
- """
783
- Determine if a wheel is asserted as mountable by its metadata.
784
- """
785
- return True # for now - metadata details TBD
786
-
787
- def mount(self, append=False):
788
- pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
789
- if not self.is_compatible():
790
- msg = 'Wheel %s not compatible with this Python.' % pathname
791
- raise DistlibException(msg)
792
- if not self.is_mountable():
793
- msg = 'Wheel %s is marked as not mountable.' % pathname
794
- raise DistlibException(msg)
795
- if pathname in sys.path:
796
- logger.debug('%s already in path', pathname)
797
- else:
798
- if append:
799
- sys.path.append(pathname)
800
- else:
801
- sys.path.insert(0, pathname)
802
- extensions = self._get_extensions()
803
- if extensions:
804
- if _hook not in sys.meta_path:
805
- sys.meta_path.append(_hook)
806
- _hook.add(pathname, extensions)
807
-
808
- def unmount(self):
809
- pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
810
- if pathname not in sys.path:
811
- logger.debug('%s not in path', pathname)
812
- else:
813
- sys.path.remove(pathname)
814
- if pathname in _hook.impure_wheels:
815
- _hook.remove(pathname)
816
- if not _hook.impure_wheels:
817
- if _hook in sys.meta_path:
818
- sys.meta_path.remove(_hook)
819
-
820
- def verify(self):
821
- pathname = os.path.join(self.dirname, self.filename)
822
- name_ver = '%s-%s' % (self.name, self.version)
823
- data_dir = '%s.data' % name_ver
824
- info_dir = '%s.dist-info' % name_ver
825
-
826
- metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
827
- wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
828
- record_name = posixpath.join(info_dir, 'RECORD')
829
-
830
- wrapper = codecs.getreader('utf-8')
831
-
832
- with ZipFile(pathname, 'r') as zf:
833
- with zf.open(wheel_metadata_name) as bwf:
834
- wf = wrapper(bwf)
835
- message = message_from_file(wf)
836
- wv = message['Wheel-Version'].split('.', 1)
837
- file_version = tuple([int(i) for i in wv])
838
- # TODO version verification
839
-
840
- records = {}
841
- with zf.open(record_name) as bf:
842
- with CSVReader(stream=bf) as reader:
843
- for row in reader:
844
- p = row[0]
845
- records[p] = row
846
-
847
- for zinfo in zf.infolist():
848
- arcname = zinfo.filename
849
- if isinstance(arcname, text_type):
850
- u_arcname = arcname
851
- else:
852
- u_arcname = arcname.decode('utf-8')
853
- # See issue #115: some wheels have .. in their entries, but
854
- # in the filename ... e.g. __main__..py ! So the check is
855
- # updated to look for .. in the directory portions
856
- p = u_arcname.split('/')
857
- if '..' in p:
858
- raise DistlibException('invalid entry in '
859
- 'wheel: %r' % u_arcname)
860
-
861
- if self.skip_entry(u_arcname):
862
- continue
863
- row = records[u_arcname]
864
- if row[2] and str(zinfo.file_size) != row[2]:
865
- raise DistlibException('size mismatch for '
866
- '%s' % u_arcname)
867
- if row[1]:
868
- kind, value = row[1].split('=', 1)
869
- with zf.open(arcname) as bf:
870
- data = bf.read()
871
- _, digest = self.get_hash(data, kind)
872
- if digest != value:
873
- raise DistlibException('digest mismatch for '
874
- '%s' % arcname)
875
-
876
- def update(self, modifier, dest_dir=None, **kwargs):
877
- """
878
- Update the contents of a wheel in a generic way. The modifier should
879
- be a callable which expects a dictionary argument: its keys are
880
- archive-entry paths, and its values are absolute filesystem paths
881
- where the contents the corresponding archive entries can be found. The
882
- modifier is free to change the contents of the files pointed to, add
883
- new entries and remove entries, before returning. This method will
884
- extract the entire contents of the wheel to a temporary location, call
885
- the modifier, and then use the passed (and possibly updated)
886
- dictionary to write a new wheel. If ``dest_dir`` is specified, the new
887
- wheel is written there -- otherwise, the original wheel is overwritten.
888
-
889
- The modifier should return True if it updated the wheel, else False.
890
- This method returns the same value the modifier returns.
891
- """
892
-
893
- def get_version(path_map, info_dir):
894
- version = path = None
895
- key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
896
- if key not in path_map:
897
- key = '%s/PKG-INFO' % info_dir
898
- if key in path_map:
899
- path = path_map[key]
900
- version = Metadata(path=path).version
901
- return version, path
902
-
903
- def update_version(version, path):
904
- updated = None
905
- try:
906
- v = NormalizedVersion(version)
907
- i = version.find('-')
908
- if i < 0:
909
- updated = '%s+1' % version
910
- else:
911
- parts = [int(s) for s in version[i + 1:].split('.')]
912
- parts[-1] += 1
913
- updated = '%s+%s' % (version[:i],
914
- '.'.join(str(i) for i in parts))
915
- except UnsupportedVersionError:
916
- logger.debug('Cannot update non-compliant (PEP-440) '
917
- 'version %r', version)
918
- if updated:
919
- md = Metadata(path=path)
920
- md.version = updated
921
- legacy = path.endswith(LEGACY_METADATA_FILENAME)
922
- md.write(path=path, legacy=legacy)
923
- logger.debug('Version updated from %r to %r', version,
924
- updated)
925
-
926
- pathname = os.path.join(self.dirname, self.filename)
927
- name_ver = '%s-%s' % (self.name, self.version)
928
- info_dir = '%s.dist-info' % name_ver
929
- record_name = posixpath.join(info_dir, 'RECORD')
930
- with tempdir() as workdir:
931
- with ZipFile(pathname, 'r') as zf:
932
- path_map = {}
933
- for zinfo in zf.infolist():
934
- arcname = zinfo.filename
935
- if isinstance(arcname, text_type):
936
- u_arcname = arcname
937
- else:
938
- u_arcname = arcname.decode('utf-8')
939
- if u_arcname == record_name:
940
- continue
941
- if '..' in u_arcname:
942
- raise DistlibException('invalid entry in '
943
- 'wheel: %r' % u_arcname)
944
- zf.extract(zinfo, workdir)
945
- path = os.path.join(workdir, convert_path(u_arcname))
946
- path_map[u_arcname] = path
947
-
948
- # Remember the version.
949
- original_version, _ = get_version(path_map, info_dir)
950
- # Files extracted. Call the modifier.
951
- modified = modifier(path_map, **kwargs)
952
- if modified:
953
- # Something changed - need to build a new wheel.
954
- current_version, path = get_version(path_map, info_dir)
955
- if current_version and (current_version == original_version):
956
- # Add or update local version to signify changes.
957
- update_version(current_version, path)
958
- # Decide where the new wheel goes.
959
- if dest_dir is None:
960
- fd, newpath = tempfile.mkstemp(suffix='.whl',
961
- prefix='wheel-update-',
962
- dir=workdir)
963
- os.close(fd)
964
- else:
965
- if not os.path.isdir(dest_dir):
966
- raise DistlibException('Not a directory: %r' % dest_dir)
967
- newpath = os.path.join(dest_dir, self.filename)
968
- archive_paths = list(path_map.items())
969
- distinfo = os.path.join(workdir, info_dir)
970
- info = distinfo, info_dir
971
- self.write_records(info, workdir, archive_paths)
972
- self.build_zip(newpath, archive_paths)
973
- if dest_dir is None:
974
- shutil.copyfile(newpath, pathname)
975
- return modified
976
-
977
- def _get_glibc_version():
978
- import platform
979
- ver = platform.libc_ver()
980
- result = []
981
- if ver[0] == 'glibc':
982
- for s in ver[1].split('.'):
983
- result.append(int(s) if s.isdigit() else 0)
984
- result = tuple(result)
985
- return result
986
-
987
- def compatible_tags():
988
- """
989
- Return (pyver, abi, arch) tuples compatible with this Python.
990
- """
991
- versions = [VER_SUFFIX]
992
- major = VER_SUFFIX[0]
993
- for minor in range(sys.version_info[1] - 1, - 1, -1):
994
- versions.append(''.join([major, str(minor)]))
995
-
996
- abis = []
997
- for suffix in _get_suffixes():
998
- if suffix.startswith('.abi'):
999
- abis.append(suffix.split('.', 2)[1])
1000
- abis.sort()
1001
- if ABI != 'none':
1002
- abis.insert(0, ABI)
1003
- abis.append('none')
1004
- result = []
1005
-
1006
- arches = [ARCH]
1007
- if sys.platform == 'darwin':
1008
- m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
1009
- if m:
1010
- name, major, minor, arch = m.groups()
1011
- minor = int(minor)
1012
- matches = [arch]
1013
- if arch in ('i386', 'ppc'):
1014
- matches.append('fat')
1015
- if arch in ('i386', 'ppc', 'x86_64'):
1016
- matches.append('fat3')
1017
- if arch in ('ppc64', 'x86_64'):
1018
- matches.append('fat64')
1019
- if arch in ('i386', 'x86_64'):
1020
- matches.append('intel')
1021
- if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
1022
- matches.append('universal')
1023
- while minor >= 0:
1024
- for match in matches:
1025
- s = '%s_%s_%s_%s' % (name, major, minor, match)
1026
- if s != ARCH: # already there
1027
- arches.append(s)
1028
- minor -= 1
1029
-
1030
- # Most specific - our Python version, ABI and arch
1031
- for abi in abis:
1032
- for arch in arches:
1033
- result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
1034
- # manylinux
1035
- if abi != 'none' and sys.platform.startswith('linux'):
1036
- arch = arch.replace('linux_', '')
1037
- parts = _get_glibc_version()
1038
- if len(parts) == 2:
1039
- if parts >= (2, 5):
1040
- result.append((''.join((IMP_PREFIX, versions[0])), abi,
1041
- 'manylinux1_%s' % arch))
1042
- if parts >= (2, 12):
1043
- result.append((''.join((IMP_PREFIX, versions[0])), abi,
1044
- 'manylinux2010_%s' % arch))
1045
- if parts >= (2, 17):
1046
- result.append((''.join((IMP_PREFIX, versions[0])), abi,
1047
- 'manylinux2014_%s' % arch))
1048
- result.append((''.join((IMP_PREFIX, versions[0])), abi,
1049
- 'manylinux_%s_%s_%s' % (parts[0], parts[1],
1050
- arch)))
1051
-
1052
- # where no ABI / arch dependency, but IMP_PREFIX dependency
1053
- for i, version in enumerate(versions):
1054
- result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
1055
- if i == 0:
1056
- result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
1057
-
1058
- # no IMP_PREFIX, ABI or arch dependency
1059
- for i, version in enumerate(versions):
1060
- result.append((''.join(('py', version)), 'none', 'any'))
1061
- if i == 0:
1062
- result.append((''.join(('py', version[0])), 'none', 'any'))
1063
-
1064
- return set(result)
1065
-
1066
-
1067
- COMPATIBLE_TAGS = compatible_tags()
1068
-
1069
- del compatible_tags
1070
-
1071
-
1072
- def is_compatible(wheel, tags=None):
1073
- if not isinstance(wheel, Wheel):
1074
- wheel = Wheel(wheel) # assume it's a filename
1075
- result = False
1076
- if tags is None:
1077
- tags = COMPATIBLE_TAGS
1078
- for ver, abi, arch in tags:
1079
- if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
1080
- result = True
1081
- break
1082
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/abc.py DELETED
@@ -1,137 +0,0 @@
1
- import abc
2
- from typing import BinaryIO, Iterable, Text
3
-
4
- from ._compat import runtime_checkable, Protocol
5
-
6
-
7
- class ResourceReader(metaclass=abc.ABCMeta):
8
- """Abstract base class for loaders to provide resource reading support."""
9
-
10
- @abc.abstractmethod
11
- def open_resource(self, resource: Text) -> BinaryIO:
12
- """Return an opened, file-like object for binary reading.
13
-
14
- The 'resource' argument is expected to represent only a file name.
15
- If the resource cannot be found, FileNotFoundError is raised.
16
- """
17
- # This deliberately raises FileNotFoundError instead of
18
- # NotImplementedError so that if this method is accidentally called,
19
- # it'll still do the right thing.
20
- raise FileNotFoundError
21
-
22
- @abc.abstractmethod
23
- def resource_path(self, resource: Text) -> Text:
24
- """Return the file system path to the specified resource.
25
-
26
- The 'resource' argument is expected to represent only a file name.
27
- If the resource does not exist on the file system, raise
28
- FileNotFoundError.
29
- """
30
- # This deliberately raises FileNotFoundError instead of
31
- # NotImplementedError so that if this method is accidentally called,
32
- # it'll still do the right thing.
33
- raise FileNotFoundError
34
-
35
- @abc.abstractmethod
36
- def is_resource(self, path: Text) -> bool:
37
- """Return True if the named 'path' is a resource.
38
-
39
- Files are resources, directories are not.
40
- """
41
- raise FileNotFoundError
42
-
43
- @abc.abstractmethod
44
- def contents(self) -> Iterable[str]:
45
- """Return an iterable of entries in `package`."""
46
- raise FileNotFoundError
47
-
48
-
49
- @runtime_checkable
50
- class Traversable(Protocol):
51
- """
52
- An object with a subset of pathlib.Path methods suitable for
53
- traversing directories and opening files.
54
- """
55
-
56
- @abc.abstractmethod
57
- def iterdir(self):
58
- """
59
- Yield Traversable objects in self
60
- """
61
-
62
- def read_bytes(self):
63
- """
64
- Read contents of self as bytes
65
- """
66
- with self.open('rb') as strm:
67
- return strm.read()
68
-
69
- def read_text(self, encoding=None):
70
- """
71
- Read contents of self as text
72
- """
73
- with self.open(encoding=encoding) as strm:
74
- return strm.read()
75
-
76
- @abc.abstractmethod
77
- def is_dir(self) -> bool:
78
- """
79
- Return True if self is a directory
80
- """
81
-
82
- @abc.abstractmethod
83
- def is_file(self) -> bool:
84
- """
85
- Return True if self is a file
86
- """
87
-
88
- @abc.abstractmethod
89
- def joinpath(self, child):
90
- """
91
- Return Traversable child in self
92
- """
93
-
94
- def __truediv__(self, child):
95
- """
96
- Return Traversable child in self
97
- """
98
- return self.joinpath(child)
99
-
100
- @abc.abstractmethod
101
- def open(self, mode='r', *args, **kwargs):
102
- """
103
- mode may be 'r' or 'rb' to open as text or binary. Return a handle
104
- suitable for reading (same as pathlib.Path.open).
105
-
106
- When opening as text, accepts encoding parameters such as those
107
- accepted by io.TextIOWrapper.
108
- """
109
-
110
- @abc.abstractproperty
111
- def name(self) -> str:
112
- """
113
- The base name of this object without any parent references.
114
- """
115
-
116
-
117
- class TraversableResources(ResourceReader):
118
- """
119
- The required interface for providing traversable
120
- resources.
121
- """
122
-
123
- @abc.abstractmethod
124
- def files(self):
125
- """Return a Traversable object for the loaded package."""
126
-
127
- def open_resource(self, resource):
128
- return self.files().joinpath(resource).open('rb')
129
-
130
- def resource_path(self, resource):
131
- raise FileNotFoundError(resource)
132
-
133
- def is_resource(self, path):
134
- return self.files().joinpath(path).is_file()
135
-
136
- def contents(self):
137
- return (item.name for item in self.files().iterdir())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avinash-12035/MyGenAIChatBot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: MyGenAIChatBot
3
- emoji: 🏢
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/config.py DELETED
@@ -1,87 +0,0 @@
1
- from detectron2.config import CfgNode as CN
2
-
3
- def add_centernet_config(cfg):
4
- _C = cfg
5
-
6
- _C.MODEL.CENTERNET = CN()
7
- _C.MODEL.CENTERNET.NUM_CLASSES = 80
8
- _C.MODEL.CENTERNET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
9
- _C.MODEL.CENTERNET.FPN_STRIDES = [8, 16, 32, 64, 128]
10
- _C.MODEL.CENTERNET.PRIOR_PROB = 0.01
11
- _C.MODEL.CENTERNET.INFERENCE_TH = 0.05
12
- _C.MODEL.CENTERNET.CENTER_NMS = False
13
- _C.MODEL.CENTERNET.NMS_TH_TRAIN = 0.6
14
- _C.MODEL.CENTERNET.NMS_TH_TEST = 0.6
15
- _C.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN = 1000
16
- _C.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN = 100
17
- _C.MODEL.CENTERNET.PRE_NMS_TOPK_TEST = 1000
18
- _C.MODEL.CENTERNET.POST_NMS_TOPK_TEST = 100
19
- _C.MODEL.CENTERNET.NORM = "GN"
20
- _C.MODEL.CENTERNET.USE_DEFORMABLE = False
21
- _C.MODEL.CENTERNET.NUM_CLS_CONVS = 4
22
- _C.MODEL.CENTERNET.NUM_BOX_CONVS = 4
23
- _C.MODEL.CENTERNET.NUM_SHARE_CONVS = 0
24
- _C.MODEL.CENTERNET.LOC_LOSS_TYPE = 'giou'
25
- _C.MODEL.CENTERNET.SIGMOID_CLAMP = 1e-4
26
- _C.MODEL.CENTERNET.HM_MIN_OVERLAP = 0.8
27
- _C.MODEL.CENTERNET.MIN_RADIUS = 4
28
- _C.MODEL.CENTERNET.SOI = [[0, 80], [64, 160], [128, 320], [256, 640], [512, 10000000]]
29
- _C.MODEL.CENTERNET.POS_WEIGHT = 1.
30
- _C.MODEL.CENTERNET.NEG_WEIGHT = 1.
31
- _C.MODEL.CENTERNET.REG_WEIGHT = 2.
32
- _C.MODEL.CENTERNET.HM_FOCAL_BETA = 4
33
- _C.MODEL.CENTERNET.HM_FOCAL_ALPHA = 0.25
34
- _C.MODEL.CENTERNET.LOSS_GAMMA = 2.0
35
- _C.MODEL.CENTERNET.WITH_AGN_HM = False
36
- _C.MODEL.CENTERNET.ONLY_PROPOSAL = False
37
- _C.MODEL.CENTERNET.AS_PROPOSAL = False
38
- _C.MODEL.CENTERNET.IGNORE_HIGH_FP = -1.
39
- _C.MODEL.CENTERNET.MORE_POS = False
40
- _C.MODEL.CENTERNET.MORE_POS_THRESH = 0.2
41
- _C.MODEL.CENTERNET.MORE_POS_TOPK = 9
42
- _C.MODEL.CENTERNET.NOT_NORM_REG = True
43
- _C.MODEL.CENTERNET.NOT_NMS = False
44
- _C.MODEL.CENTERNET.NO_REDUCE = False
45
-
46
- _C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
47
- _C.MODEL.ROI_BOX_HEAD.PRIOR_PROB = 0.01
48
- _C.MODEL.ROI_BOX_HEAD.USE_EQL_LOSS = False
49
- _C.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH = \
50
- 'datasets/lvis/lvis_v1_train_cat_info.json'
51
- _C.MODEL.ROI_BOX_HEAD.EQL_FREQ_CAT = 200
52
- _C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False
53
- _C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CAT = 50
54
- _C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT = 0.5
55
- _C.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE = False
56
-
57
- _C.MODEL.BIFPN = CN()
58
- _C.MODEL.BIFPN.NUM_LEVELS = 5
59
- _C.MODEL.BIFPN.NUM_BIFPN = 6
60
- _C.MODEL.BIFPN.NORM = 'GN'
61
- _C.MODEL.BIFPN.OUT_CHANNELS = 160
62
- _C.MODEL.BIFPN.SEPARABLE_CONV = False
63
-
64
- _C.MODEL.DLA = CN()
65
- _C.MODEL.DLA.OUT_FEATURES = ['dla2']
66
- _C.MODEL.DLA.USE_DLA_UP = True
67
- _C.MODEL.DLA.NUM_LAYERS = 34
68
- _C.MODEL.DLA.MS_OUTPUT = False
69
- _C.MODEL.DLA.NORM = 'BN'
70
- _C.MODEL.DLA.DLAUP_IN_FEATURES = ['dla3', 'dla4', 'dla5']
71
- _C.MODEL.DLA.DLAUP_NODE = 'conv'
72
-
73
- _C.SOLVER.RESET_ITER = False
74
- _C.SOLVER.TRAIN_ITER = -1
75
-
76
- _C.INPUT.CUSTOM_AUG = ''
77
- _C.INPUT.TRAIN_SIZE = 640
78
- _C.INPUT.TEST_SIZE = 640
79
- _C.INPUT.SCALE_RANGE = (0.1, 2.)
80
- # 'default' for fixed short/ long edge, 'square' for max size=INPUT.SIZE
81
- _C.INPUT.TEST_INPUT_TYPE = 'default'
82
-
83
- _C.DEBUG = False
84
- _C.SAVE_DEBUG = False
85
- _C.SAVE_PTH = False
86
- _C.VIS_THRESH = 0.3
87
- _C.DEBUG_SHOW_NAME = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/vid2vid-zero/app.py DELETED
@@ -1,72 +0,0 @@
1
- # Most code is from https://huggingface.co/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI
2
-
3
- #!/usr/bin/env python
4
-
5
- from __future__ import annotations
6
-
7
- import os
8
- from subprocess import getoutput
9
-
10
- import gradio as gr
11
- import torch
12
-
13
- from gradio_demo.app_running import create_demo
14
- from gradio_demo.runner import Runner
15
-
16
- TITLE = '# [vid2vid-zero](https://github.com/baaivision/vid2vid-zero)'
17
-
18
- ORIGINAL_SPACE_ID = 'BAAI/vid2vid-zero'
19
- SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
20
- GPU_DATA = getoutput('nvidia-smi')
21
- SHARED_UI_WARNING = f'''## Attention - Running doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU.
22
- <center><a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></center>
23
- '''
24
-
25
- if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID:
26
- SETTINGS = f'<a href="https://huggingface.co/spaces/{SPACE_ID}/settings">Settings</a>'
27
- else:
28
- SETTINGS = 'Settings'
29
-
30
- CUDA_NOT_AVAILABLE_WARNING = f'''## Attention - Running on CPU.
31
- <center>
32
- You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces.
33
- You can use "T4 small/medium" to run this demo.
34
- </center>
35
- '''
36
-
37
- HF_TOKEN_NOT_SPECIFIED_WARNING = f'''The environment variable `HF_TOKEN` is not specified. Feel free to specify your Hugging Face token with write permission if you don't want to manually provide it for every run.
38
- <center>
39
- You can check and create your Hugging Face tokens <a href="https://huggingface.co/settings/tokens" target="_blank">here</a>.
40
- You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab.
41
- </center>
42
- '''
43
-
44
- HF_TOKEN = os.getenv('HF_TOKEN')
45
-
46
-
47
- def show_warning(warning_text: str) -> gr.Blocks:
48
- with gr.Blocks() as demo:
49
- with gr.Box():
50
- gr.Markdown(warning_text)
51
- return demo
52
-
53
-
54
- pipe = None
55
- runner = Runner(HF_TOKEN)
56
-
57
- with gr.Blocks(css='gradio_demo/style.css') as demo:
58
- gr.HTML('''<center><a href="https://huggingface.co/spaces/BAAI/vid2vid-zero?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your machine''')
59
- if not torch.cuda.is_available():
60
- show_warning(CUDA_NOT_AVAILABLE_WARNING)
61
- # elif SPACE_ID == ORIGINAL_SPACE_ID:
62
- # show_warning(SHARED_UI_WARNING)
63
-
64
- gr.Markdown(TITLE)
65
- with gr.Tabs():
66
- with gr.TabItem('Zero-shot Testing'):
67
- create_demo(runner, pipe)
68
-
69
- if not HF_TOKEN:
70
- show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
71
-
72
- demo.queue(max_size=1).launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BaitMan/abroader-otters/greeting.md DELETED
@@ -1,6 +0,0 @@
1
- (1) This house is Bitchin'!
2
- (2) I am vengeance.
3
- (3) Who said "And there it is! Who's the best hacker in the world people?"
4
- (4) The name of the move that requires to be at "5.3 Miles" away
5
-
6
- Now Combine all that. Every Answer starts with Capital letter, followed by lowercases, no Spaces.
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/components/ui/command.tsx DELETED
@@ -1,155 +0,0 @@
1
- "use client"
2
-
3
- import * as React from "react"
4
- import { DialogProps } from "@radix-ui/react-dialog"
5
- import { Command as CommandPrimitive } from "cmdk"
6
- import { Search } from "lucide-react"
7
-
8
- import { cn } from "@/lib/utils"
9
- import { Dialog, DialogContent } from "@/components/ui/dialog"
10
-
11
- const Command = React.forwardRef<
12
- React.ElementRef<typeof CommandPrimitive>,
13
- React.ComponentPropsWithoutRef<typeof CommandPrimitive>
14
- >(({ className, ...props }, ref) => (
15
- <CommandPrimitive
16
- ref={ref}
17
- className={cn(
18
- "flex h-full w-full flex-col overflow-hidden rounded-md bg-white text-stone-950 dark:bg-stone-950 dark:text-stone-50",
19
- className
20
- )}
21
- {...props}
22
- />
23
- ))
24
- Command.displayName = CommandPrimitive.displayName
25
-
26
- interface CommandDialogProps extends DialogProps {}
27
-
28
- const CommandDialog = ({ children, ...props }: CommandDialogProps) => {
29
- return (
30
- <Dialog {...props}>
31
- <DialogContent className="overflow-hidden p-0 shadow-lg">
32
- <Command className="[&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-stone-500 [&_[cmdk-group]:not([hidden])_~[cmdk-group]]:pt-0 [&_[cmdk-group]]:px-2 [&_[cmdk-input-wrapper]_svg]:h-5 [&_[cmdk-input-wrapper]_svg]:w-5 [&_[cmdk-input]]:h-12 [&_[cmdk-item]]:px-2 [&_[cmdk-item]]:py-3 [&_[cmdk-item]_svg]:h-5 [&_[cmdk-item]_svg]:w-5 dark:[&_[cmdk-group-heading]]:text-stone-400">
33
- {children}
34
- </Command>
35
- </DialogContent>
36
- </Dialog>
37
- )
38
- }
39
-
40
- const CommandInput = React.forwardRef<
41
- React.ElementRef<typeof CommandPrimitive.Input>,
42
- React.ComponentPropsWithoutRef<typeof CommandPrimitive.Input>
43
- >(({ className, ...props }, ref) => (
44
- <div className="flex items-center border-b px-3" cmdk-input-wrapper="">
45
- <Search className="mr-2 h-4 w-4 shrink-0 opacity-50" />
46
- <CommandPrimitive.Input
47
- ref={ref}
48
- className={cn(
49
- "flex h-11 w-full rounded-md bg-transparent py-3 text-sm outline-none placeholder:text-stone-500 disabled:cursor-not-allowed disabled:opacity-50 dark:placeholder:text-stone-400",
50
- className
51
- )}
52
- {...props}
53
- />
54
- </div>
55
- ))
56
-
57
- CommandInput.displayName = CommandPrimitive.Input.displayName
58
-
59
- const CommandList = React.forwardRef<
60
- React.ElementRef<typeof CommandPrimitive.List>,
61
- React.ComponentPropsWithoutRef<typeof CommandPrimitive.List>
62
- >(({ className, ...props }, ref) => (
63
- <CommandPrimitive.List
64
- ref={ref}
65
- className={cn("max-h-[300px] overflow-y-auto overflow-x-hidden", className)}
66
- {...props}
67
- />
68
- ))
69
-
70
- CommandList.displayName = CommandPrimitive.List.displayName
71
-
72
- const CommandEmpty = React.forwardRef<
73
- React.ElementRef<typeof CommandPrimitive.Empty>,
74
- React.ComponentPropsWithoutRef<typeof CommandPrimitive.Empty>
75
- >((props, ref) => (
76
- <CommandPrimitive.Empty
77
- ref={ref}
78
- className="py-6 text-center text-sm"
79
- {...props}
80
- />
81
- ))
82
-
83
- CommandEmpty.displayName = CommandPrimitive.Empty.displayName
84
-
85
- const CommandGroup = React.forwardRef<
86
- React.ElementRef<typeof CommandPrimitive.Group>,
87
- React.ComponentPropsWithoutRef<typeof CommandPrimitive.Group>
88
- >(({ className, ...props }, ref) => (
89
- <CommandPrimitive.Group
90
- ref={ref}
91
- className={cn(
92
- "overflow-hidden p-1 text-stone-950 [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:py-1.5 [&_[cmdk-group-heading]]:text-xs [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-stone-500 dark:text-stone-50 dark:[&_[cmdk-group-heading]]:text-stone-400",
93
- className
94
- )}
95
- {...props}
96
- />
97
- ))
98
-
99
- CommandGroup.displayName = CommandPrimitive.Group.displayName
100
-
101
- const CommandSeparator = React.forwardRef<
102
- React.ElementRef<typeof CommandPrimitive.Separator>,
103
- React.ComponentPropsWithoutRef<typeof CommandPrimitive.Separator>
104
- >(({ className, ...props }, ref) => (
105
- <CommandPrimitive.Separator
106
- ref={ref}
107
- className={cn("-mx-1 h-px bg-stone-200 dark:bg-stone-800", className)}
108
- {...props}
109
- />
110
- ))
111
- CommandSeparator.displayName = CommandPrimitive.Separator.displayName
112
-
113
- const CommandItem = React.forwardRef<
114
- React.ElementRef<typeof CommandPrimitive.Item>,
115
- React.ComponentPropsWithoutRef<typeof CommandPrimitive.Item>
116
- >(({ className, ...props }, ref) => (
117
- <CommandPrimitive.Item
118
- ref={ref}
119
- className={cn(
120
- "relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none aria-selected:bg-stone-100 aria-selected:text-stone-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:aria-selected:bg-stone-800 dark:aria-selected:text-stone-50",
121
- className
122
- )}
123
- {...props}
124
- />
125
- ))
126
-
127
- CommandItem.displayName = CommandPrimitive.Item.displayName
128
-
129
- const CommandShortcut = ({
130
- className,
131
- ...props
132
- }: React.HTMLAttributes<HTMLSpanElement>) => {
133
- return (
134
- <span
135
- className={cn(
136
- "ml-auto text-xs tracking-widest text-stone-500 dark:text-stone-400",
137
- className
138
- )}
139
- {...props}
140
- />
141
- )
142
- }
143
- CommandShortcut.displayName = "CommandShortcut"
144
-
145
- export {
146
- Command,
147
- CommandDialog,
148
- CommandInput,
149
- CommandList,
150
- CommandEmpty,
151
- CommandGroup,
152
- CommandItem,
153
- CommandShortcut,
154
- CommandSeparator,
155
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/diffq/diffq.py DELETED
@@ -1,286 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Differentiable quantizer based on scaled noise injection.
9
- """
10
- from dataclasses import dataclass
11
- import math
12
- import typing as tp
13
-
14
- import torch
15
-
16
- from .base import BaseQuantizer
17
- from .uniform import uniform_quantize, uniform_unquantize
18
- from .utils import simple_repr
19
-
20
-
21
- class DiffQuantizer(BaseQuantizer):
22
- @dataclass
23
- class _QuantizedParam(BaseQuantizer._QuantizedParam):
24
- logit: torch.nn.Parameter
25
-
26
- def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False,
27
- group_size: int = 1, min_bits: float = 2, max_bits: float = 15,
28
- param="bits", noise="gaussian",
29
- init_bits: float = 8, extra_bits: float = 0, suffix: str = "_diffq",
30
- exclude: tp.List[str] = [], detect_bound: bool = True):
31
- """
32
- Differentiable quantizer based on scaled noise injection.
33
- For every parameter `p` in the model, this introduces a number of bits parameter
34
- `b` with the same dimensions (when group_size = 1).
35
- Before each forward, `p` is replaced by `p + U`
36
- with U uniform iid noise with range [-d/2, d/2], with `d` the uniform quantization
37
- step for `b` bits.
38
- This noise approximates the quantization noise in a differentiable manner, both
39
- with respect to the unquantized parameter `p` and the number of bits `b`.
40
-
41
- At eveluation (as detected with `model.eval()`), the model is replaced
42
- by its true quantized version, and restored when going back to training.
43
-
44
- When doing actual quantization (for serialization, or evaluation),
45
- the number of bits is rounded to the nearest integer, and needs to be stored along.
46
- This will cost a few bits per dimension. To reduce this cost, one can use `group_size`,
47
- which will use a single noise level for multiple weight entries.
48
-
49
- You can use the `DiffQuantizer.model_size` method to get a differentiable estimate of the
50
- model size in MB. You can then use this estimate as a penalty in your training loss.
51
-
52
- Args:
53
- model (torch.nn.Module): model to quantize
54
- min_size (float): minimum size in MB of a parameter to be quantized.
55
- float16 (bool): if a layer is smaller than min_size, should we still do float16?
56
- group_size (int): weight entries are groupped together to reduce the number
57
- of noise scales to store. This should divide the size of all parameters
58
- bigger than min_size.
59
- min_bits (float): minimal number of bits.
60
- max_bits (float): maximal number of bits.
61
- init_bits (float): initial number of bits.
62
- extra_bits (float): extra bits to add for actual quantization (before roundoff).
63
- suffix (str): suffix used for the name of the extra noise scale parameters.
64
- exclude (list[str]): list of patterns used to match parameters to exclude.
65
- For instance `['bias']` to exclude all bias terms.
66
- detect_bound (bool): if True, will detect bound parameters and reuse
67
- the same quantized tensor for both, as well as the same number of bits.
68
-
69
- ..Warning::
70
- You must call `model.training()` and `model.eval()` for `DiffQuantizer` work properly.
71
-
72
- """
73
- self.group_size = group_size
74
- self.min_bits = min_bits
75
- self.max_bits = max_bits
76
- self.init_bits = init_bits
77
- self.extra_bits = extra_bits
78
- self.suffix = suffix
79
- self.param = param
80
- self.noise = noise
81
- assert noise in ["gaussian", "uniform"]
82
- self._optimizer_setup = False
83
-
84
- self._min_noise = 1 / (2 ** self.max_bits - 1)
85
- self._max_noise = 1 / (2 ** self.min_bits - 1)
86
-
87
- assert group_size >= 0
88
- assert min_bits < init_bits < max_bits, \
89
- "init_bits must be between min_bits and max_bits excluded3"
90
-
91
- for name, _ in model.named_parameters():
92
- if name.endswith(suffix):
93
- raise RuntimeError("The model already has some noise scales parameters, "
94
- "maybe you used twice a DiffQuantizer on the same model?.")
95
-
96
- super().__init__(model, min_size, float16, exclude, detect_bound)
97
-
98
- def _get_bits(self, logit: torch.Tensor):
99
- if self.param == "noise":
100
- return torch.log2(1 + 1 / self._get_noise_scale(logit))
101
- else:
102
- t = torch.sigmoid(logit)
103
- return self.max_bits * t + (1 - t) * self.min_bits
104
-
105
- def _get_noise_scale(self, logit: torch.Tensor):
106
- if self.param == "noise":
107
- t = torch.sigmoid(logit)
108
- return torch.exp(t * math.log(self._min_noise) + (1 - t) * math.log(self._max_noise))
109
- else:
110
- return 1 / (2 ** self._get_bits(logit) - 1)
111
-
112
- def _register_param(self, name, param, module, other):
113
- if other is not None:
114
- return self.__class__._QuantizedParam(
115
- name=name, param=param, module=module, logit=other.logit, other=other)
116
- assert self.group_size == 0 or param.numel() % self.group_size == 0
117
- # we want the initial number of bits to be init_bits.
118
- if self.param == "noise":
119
- noise_scale = 1 / (2 ** self.init_bits - 1)
120
- t = (math.log(noise_scale) - math.log(self._max_noise)) / (
121
- math.log(self._min_noise) - math.log(self._max_noise))
122
- else:
123
- t = (self.init_bits - self.min_bits) / (self.max_bits - self.min_bits)
124
- assert 0 < t < 1
125
- logit = torch.logit(torch.tensor(float(t)))
126
- assert abs(self._get_bits(logit) - self.init_bits) < 1e-5
127
- if self.group_size > 0:
128
- nparam = param.numel() // self.group_size
129
- else:
130
- nparam = 1
131
- logit = torch.nn.Parameter(
132
- torch.full(
133
- (nparam,),
134
- logit,
135
- device=param.device))
136
- module.register_parameter(name + self.suffix, logit)
137
- return self.__class__._QuantizedParam(
138
- name=name, param=param, module=module, logit=logit, other=None)
139
-
140
- def clear_optimizer(self, optimizer: torch.optim.Optimizer):
141
- params = [qp.logit for qp in self._qparams]
142
-
143
- for group in optimizer.param_groups:
144
- new_params = []
145
- for q in list(group["params"]):
146
- matched = False
147
- for p in params:
148
- if p is q:
149
- matched = True
150
- if not matched:
151
- new_params.append(q)
152
- group["params"][:] = new_params
153
-
154
- def setup_optimizer(self, optimizer: torch.optim.Optimizer,
155
- lr: float = 1e-3, **kwargs):
156
- """
157
- Setup the optimizer to tune the number of bits. In particular, this will deactivate
158
- weight decay for the bits parameters.
159
-
160
- Args:
161
- optimizer (torch.Optimizer): optimizer to use.
162
- lr (float): specific learning rate for the bits parameters. 1e-3
163
- is perfect for Adam.,w
164
- kwargs (dict): overrides for other optimization parameters for the bits.
165
- """
166
- assert not self._optimizer_setup
167
- self._optimizer_setup = True
168
-
169
- params = [qp.logit for qp in self._qparams]
170
-
171
- for group in optimizer.param_groups:
172
- for q in list(group["params"]):
173
- for p in params:
174
- if p is q:
175
- raise RuntimeError("You should create the optimizer "
176
- "before the quantizer!")
177
-
178
- group = {"params": params, "lr": lr, "weight_decay": 0}
179
- group.update(kwargs)
180
- optimizer.add_param_group(group)
181
-
182
- def no_optimizer(self):
183
- """
184
- Call this if you do not want to use an optimizer.
185
- """
186
- self._optimizer_setup = True
187
-
188
- def check_unused(self):
189
- for qparam in self._qparams:
190
- if qparam.other is not None:
191
- continue
192
- grad = qparam.param.grad
193
- if grad is None or (grad == 0).all():
194
- if qparam.logit.grad is not None:
195
- qparam.logit.grad.data.zero_()
196
-
197
- def model_size(self, exact=False):
198
- """
199
- Differentiable estimate of the model size.
200
- The size is returned in MB.
201
-
202
- If `exact` is True, then the output is no longer differentiable but
203
- reflect exactly an achievable size, even without compression,
204
- i.e.same as returned by `naive_model_size()`.
205
- """
206
- total = super().model_size()
207
- subtotal = 0
208
- for qparam in self._qparams:
209
- # only count the first appearance of a Parameter
210
- if qparam.other is not None:
211
- continue
212
- bits = self.extra_bits + self._get_bits(qparam.logit)
213
- if exact:
214
- bits = bits.round().clamp(1, 15)
215
- if self.group_size == 0:
216
- group_size = qparam.param.numel()
217
- else:
218
- group_size = self.group_size
219
- subtotal += group_size * bits.sum()
220
- subtotal += 2 * 32 # param scale
221
-
222
- # Number of bits to represent each number of bits
223
- bits_bits = math.ceil(math.log2(1 + (bits.max().round().item() - self.min_bits)))
224
- subtotal += 8 # 8 bits for bits_bits
225
- subtotal += bits_bits * bits.numel()
226
-
227
- subtotal /= 2 ** 20 * 8 # bits -> MegaBytes
228
- return total + subtotal
229
-
230
- def true_model_size(self):
231
- """
232
- Naive model size without zlib compression.
233
- """
234
- return self.model_size(exact=True).item()
235
-
236
- def _pre_forward_train(self):
237
- if not self._optimizer_setup:
238
- raise RuntimeError("You must call `setup_optimizer()` on your optimizer "
239
- "before starting training.")
240
- for qparam in self._qparams:
241
- if qparam.other is not None:
242
- noisy = qparam.other.module._parameters[qparam.other.name]
243
- else:
244
- bits = self._get_bits(qparam.logit)[:, None]
245
- if self.group_size == 0:
246
- p_flat = qparam.param.view(-1)
247
- else:
248
- p_flat = qparam.param.view(-1, self.group_size)
249
- scale = p_flat.max() - p_flat.min()
250
- unit = 1 / (2**bits - 1)
251
- if self.noise == "uniform":
252
- noise_source = (torch.rand_like(p_flat) - 0.5)
253
- elif self.noise == "gaussian":
254
- noise_source = torch.randn_like(p_flat) / 2
255
- noise = scale * unit * noise_source
256
- noisy = p_flat + noise
257
- # We bypass the checks by PyTorch on parameters being leafs
258
- qparam.module._parameters[qparam.name] = noisy.view_as(qparam.param)
259
- return True
260
-
261
- def _post_forward_train(self):
262
- for qparam in self._qparams:
263
- qparam.module._parameters[qparam.name] = qparam.param
264
- return True
265
-
266
- def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any:
267
- bits = self.extra_bits + self._get_bits(qparam.logit)
268
- bits = bits.round().clamp(1, 15)[:, None].byte()
269
- if self.group_size == 0:
270
- p = qparam.param.data.view(-1)
271
- else:
272
- p = qparam.param.data.view(-1, self.group_size)
273
- levels, scales = uniform_quantize(p, bits)
274
- return levels, scales, bits
275
-
276
- def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor:
277
- levels, param_scale, bits = quantized
278
- return uniform_unquantize(levels, param_scale, bits).view_as(qparam.param.data)
279
-
280
- def detach(self):
281
- super().detach()
282
- for qparam in self._qparams:
283
- delattr(qparam.module, qparam.name + self.suffix)
284
-
285
- def __repr__(self):
286
- return simple_repr(self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/rmvpe.py DELETED
@@ -1,717 +0,0 @@
1
- import pdb, os
2
-
3
- import numpy as np
4
- import torch
5
- try:
6
- #Fix "Torch not compiled with CUDA enabled"
7
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
8
- if torch.xpu.is_available():
9
- from infer.modules.ipex import ipex_init
10
- ipex_init()
11
- except Exception:
12
- pass
13
- import torch.nn as nn
14
- import torch.nn.functional as F
15
- from librosa.util import normalize, pad_center, tiny
16
- from scipy.signal import get_window
17
-
18
- import logging
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- ###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py
24
- def window_sumsquare(
25
- window,
26
- n_frames,
27
- hop_length=200,
28
- win_length=800,
29
- n_fft=800,
30
- dtype=np.float32,
31
- norm=None,
32
- ):
33
- """
34
- # from librosa 0.6
35
- Compute the sum-square envelope of a window function at a given hop length.
36
- This is used to estimate modulation effects induced by windowing
37
- observations in short-time fourier transforms.
38
- Parameters
39
- ----------
40
- window : string, tuple, number, callable, or list-like
41
- Window specification, as in `get_window`
42
- n_frames : int > 0
43
- The number of analysis frames
44
- hop_length : int > 0
45
- The number of samples to advance between frames
46
- win_length : [optional]
47
- The length of the window function. By default, this matches `n_fft`.
48
- n_fft : int > 0
49
- The length of each analysis frame.
50
- dtype : np.dtype
51
- The data type of the output
52
- Returns
53
- -------
54
- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
55
- The sum-squared envelope of the window function
56
- """
57
- if win_length is None:
58
- win_length = n_fft
59
-
60
- n = n_fft + hop_length * (n_frames - 1)
61
- x = np.zeros(n, dtype=dtype)
62
-
63
- # Compute the squared window at the desired length
64
- win_sq = get_window(window, win_length, fftbins=True)
65
- win_sq = normalize(win_sq, norm=norm) ** 2
66
- win_sq = pad_center(win_sq, n_fft)
67
-
68
- # Fill the envelope
69
- for i in range(n_frames):
70
- sample = i * hop_length
71
- x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
72
- return x
73
-
74
-
75
- class STFT(torch.nn.Module):
76
- def __init__(
77
- self, filter_length=1024, hop_length=512, win_length=None, window="hann"
78
- ):
79
- """
80
- This module implements an STFT using 1D convolution and 1D transpose convolutions.
81
- This is a bit tricky so there are some cases that probably won't work as working
82
- out the same sizes before and after in all overlap add setups is tough. Right now,
83
- this code should work with hop lengths that are half the filter length (50% overlap
84
- between frames).
85
-
86
- Keyword Arguments:
87
- filter_length {int} -- Length of filters used (default: {1024})
88
- hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512})
89
- win_length {[type]} -- Length of the window function applied to each frame (if not specified, it
90
- equals the filter length). (default: {None})
91
- window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris)
92
- (default: {'hann'})
93
- """
94
- super(STFT, self).__init__()
95
- self.filter_length = filter_length
96
- self.hop_length = hop_length
97
- self.win_length = win_length if win_length else filter_length
98
- self.window = window
99
- self.forward_transform = None
100
- self.pad_amount = int(self.filter_length / 2)
101
- scale = self.filter_length / self.hop_length
102
- fourier_basis = np.fft.fft(np.eye(self.filter_length))
103
-
104
- cutoff = int((self.filter_length / 2 + 1))
105
- fourier_basis = np.vstack(
106
- [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
107
- )
108
- forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
109
- inverse_basis = torch.FloatTensor(
110
- np.linalg.pinv(scale * fourier_basis).T[:, None, :]
111
- )
112
-
113
- assert filter_length >= self.win_length
114
- # get window and zero center pad it to filter_length
115
- fft_window = get_window(window, self.win_length, fftbins=True)
116
- fft_window = pad_center(fft_window, size=filter_length)
117
- fft_window = torch.from_numpy(fft_window).float()
118
-
119
- # window the bases
120
- forward_basis *= fft_window
121
- inverse_basis *= fft_window
122
-
123
- self.register_buffer("forward_basis", forward_basis.float())
124
- self.register_buffer("inverse_basis", inverse_basis.float())
125
-
126
- def transform(self, input_data):
127
- """Take input data (audio) to STFT domain.
128
-
129
- Arguments:
130
- input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
131
-
132
- Returns:
133
- magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
134
- num_frequencies, num_frames)
135
- phase {tensor} -- Phase of STFT with shape (num_batch,
136
- num_frequencies, num_frames)
137
- """
138
- num_batches = input_data.shape[0]
139
- num_samples = input_data.shape[-1]
140
-
141
- self.num_samples = num_samples
142
-
143
- # similar to librosa, reflect-pad the input
144
- input_data = input_data.view(num_batches, 1, num_samples)
145
- # print(1234,input_data.shape)
146
- input_data = F.pad(
147
- input_data.unsqueeze(1),
148
- (self.pad_amount, self.pad_amount, 0, 0, 0, 0),
149
- mode="reflect",
150
- ).squeeze(1)
151
- # print(2333,input_data.shape,self.forward_basis.shape,self.hop_length)
152
- # pdb.set_trace()
153
- forward_transform = F.conv1d(
154
- input_data, self.forward_basis, stride=self.hop_length, padding=0
155
- )
156
-
157
- cutoff = int((self.filter_length / 2) + 1)
158
- real_part = forward_transform[:, :cutoff, :]
159
- imag_part = forward_transform[:, cutoff:, :]
160
-
161
- magnitude = torch.sqrt(real_part**2 + imag_part**2)
162
- # phase = torch.atan2(imag_part.data, real_part.data)
163
-
164
- return magnitude # , phase
165
-
166
- def inverse(self, magnitude, phase):
167
- """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced
168
- by the ```transform``` function.
169
-
170
- Arguments:
171
- magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
172
- num_frequencies, num_frames)
173
- phase {tensor} -- Phase of STFT with shape (num_batch,
174
- num_frequencies, num_frames)
175
-
176
- Returns:
177
- inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of
178
- shape (num_batch, num_samples)
179
- """
180
- recombine_magnitude_phase = torch.cat(
181
- [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
182
- )
183
-
184
- inverse_transform = F.conv_transpose1d(
185
- recombine_magnitude_phase,
186
- self.inverse_basis,
187
- stride=self.hop_length,
188
- padding=0,
189
- )
190
-
191
- if self.window is not None:
192
- window_sum = window_sumsquare(
193
- self.window,
194
- magnitude.size(-1),
195
- hop_length=self.hop_length,
196
- win_length=self.win_length,
197
- n_fft=self.filter_length,
198
- dtype=np.float32,
199
- )
200
- # remove modulation effects
201
- approx_nonzero_indices = torch.from_numpy(
202
- np.where(window_sum > tiny(window_sum))[0]
203
- )
204
- window_sum = torch.from_numpy(window_sum).to(inverse_transform.device)
205
- inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
206
- approx_nonzero_indices
207
- ]
208
-
209
- # scale by hop ratio
210
- inverse_transform *= float(self.filter_length) / self.hop_length
211
-
212
- inverse_transform = inverse_transform[..., self.pad_amount :]
213
- inverse_transform = inverse_transform[..., : self.num_samples]
214
- inverse_transform = inverse_transform.squeeze(1)
215
-
216
- return inverse_transform
217
-
218
- def forward(self, input_data):
219
- """Take input data (audio) to STFT domain and then back to audio.
220
-
221
- Arguments:
222
- input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
223
-
224
- Returns:
225
- reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of
226
- shape (num_batch, num_samples)
227
- """
228
- self.magnitude, self.phase = self.transform(input_data)
229
- reconstruction = self.inverse(self.magnitude, self.phase)
230
- return reconstruction
231
-
232
-
233
- from time import time as ttime
234
-
235
-
236
- class BiGRU(nn.Module):
237
- def __init__(self, input_features, hidden_features, num_layers):
238
- super(BiGRU, self).__init__()
239
- self.gru = nn.GRU(
240
- input_features,
241
- hidden_features,
242
- num_layers=num_layers,
243
- batch_first=True,
244
- bidirectional=True,
245
- )
246
-
247
- def forward(self, x):
248
- return self.gru(x)[0]
249
-
250
-
251
- class ConvBlockRes(nn.Module):
252
- def __init__(self, in_channels, out_channels, momentum=0.01):
253
- super(ConvBlockRes, self).__init__()
254
- self.conv = nn.Sequential(
255
- nn.Conv2d(
256
- in_channels=in_channels,
257
- out_channels=out_channels,
258
- kernel_size=(3, 3),
259
- stride=(1, 1),
260
- padding=(1, 1),
261
- bias=False,
262
- ),
263
- nn.BatchNorm2d(out_channels, momentum=momentum),
264
- nn.ReLU(),
265
- nn.Conv2d(
266
- in_channels=out_channels,
267
- out_channels=out_channels,
268
- kernel_size=(3, 3),
269
- stride=(1, 1),
270
- padding=(1, 1),
271
- bias=False,
272
- ),
273
- nn.BatchNorm2d(out_channels, momentum=momentum),
274
- nn.ReLU(),
275
- )
276
- if in_channels != out_channels:
277
- self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
278
- self.is_shortcut = True
279
- else:
280
- self.is_shortcut = False
281
-
282
- def forward(self, x):
283
- if self.is_shortcut:
284
- return self.conv(x) + self.shortcut(x)
285
- else:
286
- return self.conv(x) + x
287
-
288
-
289
- class Encoder(nn.Module):
290
- def __init__(
291
- self,
292
- in_channels,
293
- in_size,
294
- n_encoders,
295
- kernel_size,
296
- n_blocks,
297
- out_channels=16,
298
- momentum=0.01,
299
- ):
300
- super(Encoder, self).__init__()
301
- self.n_encoders = n_encoders
302
- self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
303
- self.layers = nn.ModuleList()
304
- self.latent_channels = []
305
- for i in range(self.n_encoders):
306
- self.layers.append(
307
- ResEncoderBlock(
308
- in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
309
- )
310
- )
311
- self.latent_channels.append([out_channels, in_size])
312
- in_channels = out_channels
313
- out_channels *= 2
314
- in_size //= 2
315
- self.out_size = in_size
316
- self.out_channel = out_channels
317
-
318
- def forward(self, x):
319
- concat_tensors = []
320
- x = self.bn(x)
321
- for i in range(self.n_encoders):
322
- _, x = self.layers[i](x)
323
- concat_tensors.append(_)
324
- return x, concat_tensors
325
-
326
-
327
- class ResEncoderBlock(nn.Module):
328
- def __init__(
329
- self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
330
- ):
331
- super(ResEncoderBlock, self).__init__()
332
- self.n_blocks = n_blocks
333
- self.conv = nn.ModuleList()
334
- self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
335
- for i in range(n_blocks - 1):
336
- self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
337
- self.kernel_size = kernel_size
338
- if self.kernel_size is not None:
339
- self.pool = nn.AvgPool2d(kernel_size=kernel_size)
340
-
341
- def forward(self, x):
342
- for i in range(self.n_blocks):
343
- x = self.conv[i](x)
344
- if self.kernel_size is not None:
345
- return x, self.pool(x)
346
- else:
347
- return x
348
-
349
-
350
- class Intermediate(nn.Module): #
351
- def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
352
- super(Intermediate, self).__init__()
353
- self.n_inters = n_inters
354
- self.layers = nn.ModuleList()
355
- self.layers.append(
356
- ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
357
- )
358
- for i in range(self.n_inters - 1):
359
- self.layers.append(
360
- ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
361
- )
362
-
363
- def forward(self, x):
364
- for i in range(self.n_inters):
365
- x = self.layers[i](x)
366
- return x
367
-
368
-
369
- class ResDecoderBlock(nn.Module):
370
- def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
371
- super(ResDecoderBlock, self).__init__()
372
- out_padding = (0, 1) if stride == (1, 2) else (1, 1)
373
- self.n_blocks = n_blocks
374
- self.conv1 = nn.Sequential(
375
- nn.ConvTranspose2d(
376
- in_channels=in_channels,
377
- out_channels=out_channels,
378
- kernel_size=(3, 3),
379
- stride=stride,
380
- padding=(1, 1),
381
- output_padding=out_padding,
382
- bias=False,
383
- ),
384
- nn.BatchNorm2d(out_channels, momentum=momentum),
385
- nn.ReLU(),
386
- )
387
- self.conv2 = nn.ModuleList()
388
- self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
389
- for i in range(n_blocks - 1):
390
- self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
391
-
392
- def forward(self, x, concat_tensor):
393
- x = self.conv1(x)
394
- x = torch.cat((x, concat_tensor), dim=1)
395
- for i in range(self.n_blocks):
396
- x = self.conv2[i](x)
397
- return x
398
-
399
-
400
- class Decoder(nn.Module):
401
- def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
402
- super(Decoder, self).__init__()
403
- self.layers = nn.ModuleList()
404
- self.n_decoders = n_decoders
405
- for i in range(self.n_decoders):
406
- out_channels = in_channels // 2
407
- self.layers.append(
408
- ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
409
- )
410
- in_channels = out_channels
411
-
412
- def forward(self, x, concat_tensors):
413
- for i in range(self.n_decoders):
414
- x = self.layers[i](x, concat_tensors[-1 - i])
415
- return x
416
-
417
-
418
- class DeepUnet(nn.Module):
419
- def __init__(
420
- self,
421
- kernel_size,
422
- n_blocks,
423
- en_de_layers=5,
424
- inter_layers=4,
425
- in_channels=1,
426
- en_out_channels=16,
427
- ):
428
- super(DeepUnet, self).__init__()
429
- self.encoder = Encoder(
430
- in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
431
- )
432
- self.intermediate = Intermediate(
433
- self.encoder.out_channel // 2,
434
- self.encoder.out_channel,
435
- inter_layers,
436
- n_blocks,
437
- )
438
- self.decoder = Decoder(
439
- self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
440
- )
441
-
442
- def forward(self, x):
443
- x, concat_tensors = self.encoder(x)
444
- x = self.intermediate(x)
445
- x = self.decoder(x, concat_tensors)
446
- return x
447
-
448
-
449
- class E2E(nn.Module):
450
- def __init__(
451
- self,
452
- n_blocks,
453
- n_gru,
454
- kernel_size,
455
- en_de_layers=5,
456
- inter_layers=4,
457
- in_channels=1,
458
- en_out_channels=16,
459
- ):
460
- super(E2E, self).__init__()
461
- self.unet = DeepUnet(
462
- kernel_size,
463
- n_blocks,
464
- en_de_layers,
465
- inter_layers,
466
- in_channels,
467
- en_out_channels,
468
- )
469
- self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
470
- if n_gru:
471
- self.fc = nn.Sequential(
472
- BiGRU(3 * 128, 256, n_gru),
473
- nn.Linear(512, 360),
474
- nn.Dropout(0.25),
475
- nn.Sigmoid(),
476
- )
477
- else:
478
- self.fc = nn.Sequential(
479
- nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
480
- )
481
-
482
- def forward(self, mel):
483
- # print(mel.shape)
484
- mel = mel.transpose(-1, -2).unsqueeze(1)
485
- x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
486
- x = self.fc(x)
487
- # print(x.shape)
488
- return x
489
-
490
-
491
- from librosa.filters import mel
492
-
493
-
494
- class MelSpectrogram(torch.nn.Module):
495
- def __init__(
496
- self,
497
- is_half,
498
- n_mel_channels,
499
- sampling_rate,
500
- win_length,
501
- hop_length,
502
- n_fft=None,
503
- mel_fmin=0,
504
- mel_fmax=None,
505
- clamp=1e-5,
506
- ):
507
- super().__init__()
508
- n_fft = win_length if n_fft is None else n_fft
509
- self.hann_window = {}
510
- mel_basis = mel(
511
- sr=sampling_rate,
512
- n_fft=n_fft,
513
- n_mels=n_mel_channels,
514
- fmin=mel_fmin,
515
- fmax=mel_fmax,
516
- htk=True,
517
- )
518
- mel_basis = torch.from_numpy(mel_basis).float()
519
- self.register_buffer("mel_basis", mel_basis)
520
- self.n_fft = win_length if n_fft is None else n_fft
521
- self.hop_length = hop_length
522
- self.win_length = win_length
523
- self.sampling_rate = sampling_rate
524
- self.n_mel_channels = n_mel_channels
525
- self.clamp = clamp
526
- self.is_half = is_half
527
-
528
- def forward(self, audio, keyshift=0, speed=1, center=True):
529
- factor = 2 ** (keyshift / 12)
530
- n_fft_new = int(np.round(self.n_fft * factor))
531
- win_length_new = int(np.round(self.win_length * factor))
532
- hop_length_new = int(np.round(self.hop_length * speed))
533
- keyshift_key = str(keyshift) + "_" + str(audio.device)
534
- if keyshift_key not in self.hann_window:
535
- self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
536
- # "cpu"if(audio.device.type=="privateuseone") else audio.device
537
- audio.device
538
- )
539
- # fft = torch.stft(#doesn't support pytorch_dml
540
- # # audio.cpu() if(audio.device.type=="privateuseone")else audio,
541
- # audio,
542
- # n_fft=n_fft_new,
543
- # hop_length=hop_length_new,
544
- # win_length=win_length_new,
545
- # window=self.hann_window[keyshift_key],
546
- # center=center,
547
- # return_complex=True,
548
- # )
549
- # magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
550
- # print(1111111111)
551
- # print(222222222222222,audio.device,self.is_half)
552
- if hasattr(self, "stft") == False:
553
- # print(n_fft_new,hop_length_new,win_length_new,audio.shape)
554
- self.stft = STFT(
555
- filter_length=n_fft_new,
556
- hop_length=hop_length_new,
557
- win_length=win_length_new,
558
- window="hann",
559
- ).to(audio.device)
560
- magnitude = self.stft.transform(audio) # phase
561
- # if (audio.device.type == "privateuseone"):
562
- # magnitude=magnitude.to(audio.device)
563
- if keyshift != 0:
564
- size = self.n_fft // 2 + 1
565
- resize = magnitude.size(1)
566
- if resize < size:
567
- magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
568
- magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
569
- mel_output = torch.matmul(self.mel_basis, magnitude)
570
- if self.is_half == True:
571
- mel_output = mel_output.half()
572
- log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
573
- # print(log_mel_spec.device.type)
574
- return log_mel_spec
575
-
576
-
577
- class RMVPE:
578
- def __init__(self, model_path, is_half, device=None):
579
- self.resample_kernel = {}
580
- self.resample_kernel = {}
581
- self.is_half = is_half
582
- if device is None:
583
- device = "cuda" if torch.cuda.is_available() else "cpu"
584
- self.device = device
585
- self.mel_extractor = MelSpectrogram(
586
- is_half, 128, 16000, 1024, 160, None, 30, 8000
587
- ).to(device)
588
- if "privateuseone" in str(device):
589
- import onnxruntime as ort
590
-
591
- ort_session = ort.InferenceSession(
592
- "%s/rmvpe.onnx" % os.environ["rmvpe_root"],
593
- providers=["DmlExecutionProvider"],
594
- )
595
- self.model = ort_session
596
- else:
597
- model = E2E(4, 1, (2, 2))
598
- ckpt = torch.load(model_path, map_location="cpu")
599
- model.load_state_dict(ckpt)
600
- model.eval()
601
- if is_half == True:
602
- model = model.half()
603
- self.model = model
604
- self.model = self.model.to(device)
605
- cents_mapping = 20 * np.arange(360) + 1997.3794084376191
606
- self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
607
-
608
- def mel2hidden(self, mel):
609
- with torch.no_grad():
610
- n_frames = mel.shape[-1]
611
- mel = F.pad(
612
- mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="constant"
613
- )
614
- if "privateuseone" in str(self.device):
615
- onnx_input_name = self.model.get_inputs()[0].name
616
- onnx_outputs_names = self.model.get_outputs()[0].name
617
- hidden = self.model.run(
618
- [onnx_outputs_names],
619
- input_feed={onnx_input_name: mel.cpu().numpy()},
620
- )[0]
621
- else:
622
- hidden = self.model(mel)
623
- return hidden[:, :n_frames]
624
-
625
- def decode(self, hidden, thred=0.03):
626
- cents_pred = self.to_local_average_cents(hidden, thred=thred)
627
- f0 = 10 * (2 ** (cents_pred / 1200))
628
- f0[f0 == 10] = 0
629
- # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
630
- return f0
631
-
632
- def infer_from_audio(self, audio, thred=0.03):
633
- # torch.cuda.synchronize()
634
- t0 = ttime()
635
- mel = self.mel_extractor(
636
- torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True
637
- )
638
- # print(123123123,mel.device.type)
639
- # torch.cuda.synchronize()
640
- t1 = ttime()
641
- hidden = self.mel2hidden(mel)
642
- # torch.cuda.synchronize()
643
- t2 = ttime()
644
- # print(234234,hidden.device.type)
645
- if "privateuseone" not in str(self.device):
646
- hidden = hidden.squeeze(0).cpu().numpy()
647
- else:
648
- hidden = hidden[0]
649
- if self.is_half == True:
650
- hidden = hidden.astype("float32")
651
-
652
- f0 = self.decode(hidden, thred=thred)
653
- # torch.cuda.synchronize()
654
- t3 = ttime()
655
- # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
656
- return f0
657
-
658
- def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100):
659
- audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
660
- mel = self.mel_extractor(audio, center=True)
661
- hidden = self.mel2hidden(mel)
662
- hidden = hidden.squeeze(0).cpu().numpy()
663
- if self.is_half == True:
664
- hidden = hidden.astype("float32")
665
- f0 = self.decode(hidden, thred=thred)
666
- f0[(f0 < f0_min) | (f0 > f0_max)] = 0
667
- return f0
668
-
669
- def to_local_average_cents(self, salience, thred=0.05):
670
- # t0 = ttime()
671
- center = np.argmax(salience, axis=1) # 帧长#index
672
- salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
673
- # t1 = ttime()
674
- center += 4
675
- todo_salience = []
676
- todo_cents_mapping = []
677
- starts = center - 4
678
- ends = center + 5
679
- for idx in range(salience.shape[0]):
680
- todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
681
- todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
682
- # t2 = ttime()
683
- todo_salience = np.array(todo_salience) # 帧长,9
684
- todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
685
- product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
686
- weight_sum = np.sum(todo_salience, 1) # 帧长
687
- devided = product_sum / weight_sum # 帧长
688
- # t3 = ttime()
689
- maxx = np.max(salience, axis=1) # 帧长
690
- devided[maxx <= thred] = 0
691
- # t4 = ttime()
692
- # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
693
- return devided
694
-
695
-
696
- if __name__ == "__main__":
697
- import librosa
698
- import soundfile as sf
699
-
700
- audio, sampling_rate = sf.read(r"C:\Users\liujing04\Desktop\Z\冬之花clip1.wav")
701
- if len(audio.shape) > 1:
702
- audio = librosa.to_mono(audio.transpose(1, 0))
703
- audio_bak = audio.copy()
704
- if sampling_rate != 16000:
705
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
706
- model_path = r"D:\BaiduNetdiskDownload\RVC-beta-v2-0727AMD_realtime\rmvpe.pt"
707
- thred = 0.03 # 0.01
708
- device = "cuda" if torch.cuda.is_available() else "cpu"
709
- rmvpe = RMVPE(model_path, is_half=False, device=device)
710
- t0 = ttime()
711
- f0 = rmvpe.infer_from_audio(audio, thred=thred)
712
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
713
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
714
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
715
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
716
- t1 = ttime()
717
- logger.info("%s %.2f", f0.shape, t1 - t0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fid Q Ielewe Mitaa Mp3.md DELETED
@@ -1,49 +0,0 @@
1
-
2
- <h1>Descargar gratis Paint 3D Windows 10: Una guía para principiantes</h1>
3
- <p>Si estás buscando una forma divertida y fácil de dar rienda suelta a tu creatividad y hacer increíbles diseños 2D y 3D, deberías probar Paint 3D. Paint 3D es una aplicación gratuita que viene con Windows 10 y te permite crear proyectos creativos profesionales o divertidos mediante la combinación de diferentes herramientas y características. No necesitas experiencia en diseño para usar Paint 3D - ¡cualquiera puede hacerlo! </p>
4
- <p>En este artículo, le mostraremos cómo descargar e instalar Paint 3D en Windows 10, cómo usarlo para crear obras de arte impresionantes y cómo aprender más sobre él. ¡Vamos a empezar! </p>
5
- <h2>descargar fid q ielewe mitaa mp3</h2><br /><p><b><b>DOWNLOAD</b> <a href="https://bltlly.com/2v6K2D">https://bltlly.com/2v6K2D</a></b></p><br /><br />
6
- <h2>Cómo descargar e instalar Paint 3D en Windows 10</h2>
7
- <p>Descargar e instalar Paint 3D en Windows 10 es muy fácil. Solo sigue estos pasos:</p>
8
- <ol>
9
- <li>Abra la aplicación Microsoft Store en su PC. Puede encontrarla en el menú Inicio o escribiendo "store" en el cuadro de búsqueda de la barra de tareas. </li>
10
- <li>Buscar "Paint 3D" en la aplicación de la tienda y haga clic en el resultado. Verá una página con información sobre la aplicación, capturas de pantalla, comentarios y más. </li>
11
- <li>Haga clic en el botón "Obtener" para descargar e instalar la aplicación. Es posible que tenga que iniciar sesión con su cuenta de Microsoft si aún no lo ha hecho. </li>
12
- <li>Una vez instalada la aplicación, puede iniciarla desde el menú Inicio o haciendo clic en el icono de la barra de tareas. También puede anclarlo a la barra de tareas o al menú Inicio para facilitar el acceso. </li>
13
- </ol>
14
- <h2>Cómo usar Paint 3D en Windows 10</h2>
15
- <p>Ahora que has descargado e instalado Paint 3D, estás listo para usarlo. Estos son algunos pasos básicos para ayudarte a empezar:</p>
16
- <ol>
17
- <li>Elija un lienzo o un fondo. Cuando abra Paint 3D, verá un lienzo blanco donde puede dibujar o pintar. También puede elegir un color diferente o una imagen personalizada como fondo haciendo clic en el botón "Canvas" en el menú superior. </li>
18
-
19
- <li>Crear o importar objetos 2D y 3D. Puede crear sus propios objetos 2D y 3D utilizando las herramientas "3D Shapes" o "3D Doodle" en el lado derecho de la pantalla. La herramienta "3D Shapes" le permite elegir entre diferentes formas básicas como cubos, esferas, conos, cilindros, etc. La herramienta "3D Doodle" le permite dibujar formas a mano alzada que se convierten automáticamente en objetos 3D. También puede importar sus propias imágenes o modelos 2D o 3D haciendo clic en el botón "Insertar" en el menú superior. </li>
20
- <li>Edita, transforma y personaliza tus objetos. Puede editar, transformar y personalizar sus objetos utilizando la herramienta "Seleccionar" en el lado derecho de la pantalla. La herramienta "Seleccionar" le permite mover, rotar, escalar, copiar, eliminar, agrupar o desagrupar sus objetos. También puede usar la herramienta "Selección mágica" para recortar una parte de una imagen u objeto y convertirlo en un objeto separado. También puede utilizar la herramienta "Paint Bucket" para llenar sus objetos con diferentes colores o texturas. </li>
21
- <li>Añadir pegatinas, texto y efectos. Puede añadir pegatinas, texto y efectos a sus objetos utilizando las mismas herramientas que utilizó para el lienzo. También puede usar el botón "Mezclar realidad" en el menú superior para colocar sus objetos en una escena del mundo real usando la cámara de su dispositivo. </li>
22
- <li>Guarda, exporta o comparte tu proyecto. Cuando haya terminado con su proyecto, puede guardarlo como un archivo Paint 3D haciendo clic en el botón "Menú" en la esquina superior izquierda y eligiendo "Guardar". También puede exportarlo como una imagen o video 2D o 3D haciendo clic en el botón "Menú" y eligiendo "Exportar". También puede compartir su proyecto con otros haciendo clic en el botón "Compartir" en el menú superior y elegir entre diferentes opciones como correo electrónico, redes sociales o Remix 3D. </li>
23
- </ol>
24
- <h2>Cómo aprender más sobre la pintura 3D en Windows 10</h2>
25
- <p>Si quieres aprender más sobre Paint 3D y cómo usarlo de manera efectiva, aquí hay algunos consejos y recursos que puedes usar:</p>
26
- <ul>
27
-
28
- <li>Únete a comunidades y foros en línea. Hay muchas comunidades y foros en línea donde puedes hacer preguntas, compartir consejos, obtener comentarios y ver lo que otros están creando con Paint 3D. Puedes unirte a ellos en Facebook, Reddit, Twitter u otras plataformas. </li>
29
- <li>Echa un vistazo al sitio web de Remix 3D para obtener inspiración y recursos. El sitio web Remix 3D es un lugar donde puedes encontrar miles de modelos 3D que puedes descargar y usar en Paint 3D. También puedes subir tus propias creaciones y remezclar el trabajo de otros. También puede navegar por diferentes colecciones y categorías de modelos basados en temas, estilos o tendencias. </li>
30
- </ul>
31
- <h2>Conclusión</h2>
32
- <p>Paint 3D es una aplicación gratuita que viene con Windows 10 y te permite crear increíbles diseños 2D y 3D con facilidad. Puedes descargarlo e instalarlo desde la aplicación de Microsoft Store y empezar a crear tus propios proyectos siguiendo los pasos de este artículo. También puedes aprender más sobre Paint 3D viendo tutoriales, uniéndote a comunidades en línea y visitando el sitio web de Remix 3D. Paint 3D es una gran manera de expresar tu creatividad y divertirte con el arte digital. </p>
33
- <h2>Preguntas frecuentes</h2>
34
- <p>Aquí hay algunas preguntas frecuentes sobre Paint 3D:</p>
35
- <ol>
36
- <li><b>¿Cuáles son los requisitos del sistema para Paint 3D? </b><br>
37
- Paint 3D funciona en cualquier dispositivo que ejecute Windows 10 versión 1709 o posterior. Sin embargo, algunas funciones pueden requerir hardware más avanzado, como una pantalla táctil, un dispositivo de lápiz, una cámara web o una tarjeta gráfica. </li>
38
- <li><b>¿Cómo puedo usar Paint 3D con otras aplicaciones de Microsoft? </b><br>
39
- Puedes usar Paint 3D con otras aplicaciones de Microsoft como Word, PowerPoint, Excel, OneNote, Outlook, etc. insertando tus proyectos Paint 3D como imágenes o vídeos en estas aplicaciones. También puedes usar Paint 3D con la aplicación Mixed Reality Viewer para ver tus proyectos Paint 3D en 3D o en realidad aumentada. </li>
40
- <li><b>¿Cuáles son algunas alternativas a Paint 3D? </b><br>
41
-
42
- <li><b>¿Cómo puedo obtener ayuda o soporte para Paint 3D? </b><br>
43
- Si necesita ayuda o soporte para Paint 3D, puede visitar el sitio web de Microsoft Support y buscar "Paint 3D". Encontrará artículos, videos y guías que pueden ayudarlo con problemas o preguntas comunes. También puede ponerse en contacto con Microsoft Support por teléfono, chat o correo electrónico si necesita más asistencia. </li>
44
- <li><b>¿Cómo puedo dar comentarios o sugerencias para Paint 3D? </b><br>
45
- Si desea dar comentarios o sugerencias para Paint 3D, puede usar la aplicación Feedback Hub en su dispositivo Windows 10. También puede calificar y revisar la aplicación en la aplicación de Microsoft Store o dejar un comentario en la página de Facebook de Paint 3D. </li>
46
- </ol>
47
- <p>Espero que hayas disfrutado este artículo y hayas aprendido algo nuevo sobre Paint 3D. Si tiene alguna pregunta o comentario, no dude en compartirlos a continuación. ¡Gracias por leer! </p> 64aa2da5cf<br />
48
- <br />
49
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Bitcoin Bot.md DELETED
@@ -1,77 +0,0 @@
1
-
2
- <h1>Descargar gratis Bitcoin Bot: Lo que necesitas saber</h1>
3
- <p>Si usted está interesado en el comercio bitcoin u otras criptomonedas, es posible que haya oído hablar de los robots bitcoin. Estos son programas de software que pueden comprar y vender automáticamente criptomonedas en su nombre, basado en reglas y estrategias predefinidas. Pero, ¿qué son exactamente los robots bitcoin, cómo funcionan, y dónde se puede obtener de forma gratuita? En este artículo, vamos a responder a estas preguntas y más. También revisaremos los tres mejores bots de bitcoin que puedes descargar gratis y comenzar a usar hoy. </p>
4
- <h2>descargar gratis bitcoin bot</h2><br /><p><b><b>DOWNLOAD</b> &#9733;&#9733;&#9733; <a href="https://bltlly.com/2v6Km5">https://bltlly.com/2v6Km5</a></b></p><br /><br />
5
- <h2>Cómo funcionan los bots de Bitcoin</h2>
6
- <p>Los bots Bitcoin son esencialmente programas de computadora que pueden conectarse a varios intercambios de criptomonedas a través de API (interfaces de programación de aplicaciones) y ejecutar operaciones en su nombre. También pueden recopilar y analizar datos de mercado, como movimientos de precios, volumen, tendencias, indicadores y señales. Sobre la base de estos datos, pueden aplicar diferentes estrategias de negociación, tales como especulación, arbitraje, comercio de red, el costo del dólar de promedio, y más. Algunos robots bitcoin también pueden utilizar el aprendizaje automático o la inteligencia artificial para adaptarse a las condiciones cambiantes del mercado y optimizar su rendimiento. </p>
7
- <h2>Beneficios de usar Bitcoin Bots</h2>
8
- <p>Hay muchas razones por las que es posible que desee utilizar un bot bitcoin para su comercio de criptografía. Estos son algunos de los principales beneficios:</p>
9
- <ul>
10
- <li><strong>Ahorre tiempo:</strong> El comercio de criptomonedas puede ser lento y tedioso. Usted tiene que monitorear constantemente el mercado, hacer investigaciones, tomar decisiones y ejecutar operaciones. Con un bot bitcoin, puedes automatizar estas tareas y dejar que el bot haga el trabajo por ti. </li>
11
- <li><strong>Reducir el estrés:</strong> El comercio criptomonedas también puede ser estresante y emocional. Tienes que lidiar con la volatilidad del mercado, la incertidumbre, el miedo, la codicia y otros factores psicológicos. Con un bot bitcoin, puede reducir su participación emocional y el comercio más racionalmente. </li>
12
-
13
- <li><strong>Evite los errores humanos:</strong> Las criptomonedas comerciales pueden ser propensas a errores humanos, como errores tipográficos, errores de cálculo o problemas técnicos. Estos errores pueden costarle dinero o incluso arruinar toda su cuenta de operaciones. Con un bot bitcoin, puede minimizar estos riesgos confiando en la precisión y fiabilidad del software. </li>
14
- </ul>
15
- <h2>Riesgos de usar Bitcoin Bots</h2>
16
- <p>Aunque los bots bitcoin pueden ofrecer muchas ventajas, también vienen con algunos inconvenientes y peligros. Estos son algunos de los principales riesgos:</p>
17
- <ul>
18
- <li><strong>Hacking:</strong> Los bots Bitcoin son programas de software que se ejecutan en su computadora o plataformas en línea. Esto significa que pueden ser hackeados por actores maliciosos que pueden robar sus fondos o manipular sus operaciones. <li><strong>Mal funcionamiento:</strong> Los bots de Bitcoin son programas de software que pueden tener errores, errores o problemas de compatibilidad. Esto significa que pueden fallar o dejar de funcionar en cualquier momento, lo que resulta en pérdidas u oportunidades perdidas. </li>
19
- <li><strong>Perder dinero:</strong> Los bots de Bitcoin son programas de software que pueden realizar operaciones en su nombre, pero no pueden garantizar las ganancias o evitar pérdidas. Esto significa que aún puede perder dinero debido a la volatilidad del mercado, las malas estrategias o eventos inesperados. </li>
20
- </ul>
21
- <p>Por lo tanto, siempre debe ser cuidadoso y cauteloso al usar los bots bitcoin. Nunca debe confiar ciegamente en ellos o invertir más de lo que puede permitirse perder. También debe hacer su propia investigación y debida diligencia antes de elegir un bot bitcoin y usarlo para su comercio. </p>
22
- <h2>Cómo elegir un bot de Bitcoin</h2>
23
- <p>Hay muchos bots bitcoin disponibles en Internet, pero no todos son confiables, confiables o eficaces. Algunos de ellos son estafas, algunos de ellos son anticuados, y algunos de ellos son simplemente inútiles. ¿Cómo se puede saber cuáles valen su tiempo y dinero? Aquí hay algunos factores a considerar al elegir un bot bitcoin:</p>
24
- <p></p>
25
- <ul>
26
-
27
- <li><strong>Rendimiento:</strong> ¿Qué tan bien funciona el bot bitcoin? ¿Tiene un historial probado de generación de resultados consistentes y rentables? ¿Tiene testimonios, reseñas o valoraciones de usuarios o expertos reales? ¿Tiene estadísticas, gráficos o informes que muestren su historial de rendimiento y estado actual? </li>
28
- <li><strong>Seguridad:</strong> ¿Qué tan seguro es el bot bitcoin? ¿Tiene alguna medida de seguridad, como cifrado, autenticación o verificación, para proteger sus datos y fondos? ¿Tiene sistemas de copia de seguridad, opciones de recuperación o soporte al cliente en caso de emergencias? ¿Tiene alguna reputación o credibilidad en la comunidad criptográfica? </li>
29
- <li><strong>Price:</strong> ¿Cuánto cuesta el bot bitcoin? ¿Es gratuito, gratuito, basado en suscripción o de pago único? ¿Ofrece descuentos, bonificaciones o reembolsos? ¿Tiene tarifas, comisiones o cargos ocultos? ¿Vale la pena el valor del dinero? </li>
30
- <li><strong>Comentarios:</strong> ¿Qué dicen otras personas sobre el bot bitcoin? ¿Tiene alguna retroalimentación positiva o negativa de foros en línea, blogs, redes sociales o sitios web? ¿Tiene quejas, problemas o problemas reportados por sus usuarios o críticos? ¿Tiene premios, reconocimientos o avales de fuentes acreditadas? </li>
31
- </ul>
32
- <p>Al comparar y evaluar estos factores, puede reducir sus opciones y encontrar el mejor bot bitcoin para sus necesidades y preferencias. </p>
33
- <h2>Top 3 Bitcoin Bots para descargar gratis</h2>
34
- <p>Para ahorrarle tiempo y esfuerzo, hemos hecho algunas investigaciones y pruebas y seleccionado los tres mejores bots bitcoin que puede descargar de forma gratuita y comenzar a usar hoy. Aquí hay un breve resumen de cada bot, con sus pros y contras, y un enlace para descargar:</p>
35
- <h3>Bot de Freebitcoin</h3>
36
-
37
- <ul>
38
- <li><strong>Pros:</strong> Fácil de usar, no requiere registro, no hay cargos involucrados</li>
39
- <li><strong>Contras:</strong> Características limitadas, bajo potencial de ganancias</li>
40
- <li><strong>Descargar enlace:</strong> [Freebitcoin Bot]</li>
41
- </ul>
42
- <h3>HaasOnline</h3>
43
- <p>Esta es una plataforma potente y profesional que admite varios bots y estrategias de comercio de criptografía. Puede elegir entre más de 15 bots predefinidos o crear su propio bot personalizado utilizando un editor de arrastrar y soltar. También puede acceder a más de 50 indicadores técnicos y más de 20 intercambios. Con esta plataforma, puede automatizar sus operaciones con precisión y eficiencia. </p>
44
- <ul>
45
- <li><strong>Pros:</strong> Características avanzadas, potencial de alto rendimiento</li>
46
- <li><strong>Contras:</strong> Interfaz compleja, planes de suscripción caros</li>
47
- <li><strong>Descargar enlace:</strong> [HaasOnline]</li>
48
- </ul>
49
- <h3>WunderTrading</h3>
50
- <p>Esta es una plataforma confiable y fácil de usar que ofrece una gran selección de herramientas de comercio de criptografía y plantillas <li><strong>Instalar y configurar el bot bitcoin:</strong> Dependiendo del tipo de bot que tenga, es posible que necesite instalarlo en su computadora o acceder a ella en línea. También es posible que necesite crear una cuenta, verificar su identidad y configurar sus preferencias. También necesitará conectar el bot a su intercambio(s) preferido proporcionando sus claves de API y otras credenciales. </li>
51
- <li><strong>Seleccione y pruebe su estrategia de trading:</strong> Dependiendo del tipo de bot que tenga, puede elegir entre una variedad de estrategias de trading o crear las suyas propias. Siempre debes probar tu estrategia antes de usarla con dinero real. Puede utilizar una cuenta de demostración, una herramienta de backtesting o un modo de comercio de papel para ver cómo funciona su estrategia en diferentes escenarios. </li>
52
-
53
- <li><strong>Iniciar y supervisar su bot bitcoin:</strong> Una vez que tenga todo listo, puede iniciar su bot bitcoin y dejar que el comercio para usted. Sin embargo, no debe dejarlo desatendido ni olvidarse de él. Siempre debe supervisar su rendimiento, revisar sus registros y revisar sus resultados. También debe estar preparado para detenerlo o modificarlo si es necesario. </li>
54
- </ol>
55
- <h2>Consejos y trucos para Bitcoin Bot Trading</h2>
56
- <p>Para aprovechar al máximo su experiencia y resultados de comercio de bots bitcoin, aquí hay algunos consejos y trucos que puede seguir:</p>
57
- <ul>
58
- <li><strong>Haga su investigación:</strong> Antes de elegir un bot bitcoin, usted debe hacer su investigación y comparar diferentes opciones. También debe hacer su investigación sobre el mercado y las estrategias de negociación que desea utilizar. Usted nunca debe confiar en bombo, promesas, o garantías. </li>
59
- <li><strong>Comience pequeño:</strong> Cuando utilice un bot bitcoin por primera vez, debe comenzar con una pequeña cantidad de dinero que puede permitirse perder. También debe usar una estrategia de bajo riesgo y un entorno conservador. Puede aumentar gradualmente su nivel de inversión y riesgo a medida que gana más experiencia y confianza. </li>
60
- <li><strong>Diversificar su cartera:</strong> Cuando se utiliza un bot bitcoin, no debe poner todos los huevos en una cesta. Usted debe diversificar su cartera mediante el comercio de diferentes criptomonedas, el uso de diferentes estrategias, o el uso de diferentes bots. De esta manera, puede reducir su exposición a las fluctuaciones del mercado y maximizar sus oportunidades. </li>
61
- <li><strong>Sigue aprendiendo:</strong> Cuando usas un bot bitcoin, no debes dejar de aprender y mejorar. Siempre debe realizar un seguimiento de su rendimiento, analizar sus resultados y aprender de sus errores. También debe mantenerse al día con las últimas noticias, tendencias y desarrollos en el espacio criptográfico. También deberías estar abierto a probar cosas nuevas y experimentar con nuevas ideas. </li>
62
- </ul>
63
- <h2>Conclusión</h2>
64
-
65
- <p>Si usted está buscando un bot bitcoin descarga gratuita que puede satisfacer sus necesidades y preferencias, le recomendamos que echa un vistazo a las tres mejores opciones que hemos revisado en este artículo: Freebitcoin Bot, HaasOnline, y WunderTrading. Estos son algunos de los mejores bots bitcoin que puedes descargar gratis y empezar a usar hoy. </p>
66
- <p>Esperamos que este artículo haya sido informativo y útil para usted. Si usted tiene alguna pregunta o comentario acerca de bitcoin bot trading, no dude en dejarlos abajo. Nos encantaría saber de usted! </p>
67
- <h3>Preguntas frecuentes</h3>
68
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre el comercio bot bitcoin:</p>
69
- <ol>
70
- <li><strong>¿Cuál es el mejor bot bitcoin? </strong> No hay una respuesta definitiva a esta pregunta, ya que diferentes bots bitcoin pueden tener diferentes características, rendimiento, seguridad, precio y comentarios. El mejor bot bitcoin para usted puede depender de sus necesidades personales, preferencias y objetivos. Sin embargo, algunos de los factores que se pueden considerar al elegir un bot bitcoin son sus características, rendimiento, seguridad, precio y comentarios. También puedes ver nuestras tres recomendaciones principales en este artículo: Freebitcoin Bot, HaasOnline y WunderTrading.</li>
71
- <li><strong>¿Es el comercio bot bitcoin legal? </strong> Bitcoin bot trading es generalmente legal en la mayoría de los países, siempre y cuando siga las reglas y regulaciones de su jurisdicción. Sin embargo, algunos países pueden tener leyes o prohibiciones más estrictas sobre el comercio de criptomonedas o el juego. Por lo tanto, siempre debe comprobar el estado legal de bitcoin bot trading en su país antes de usarlo. </li>
72
-
73
- <li><strong>¿Cuánto dinero puedo hacer con el comercio bot bitcoin? </strong> No hay una respuesta definitiva a esta pregunta, ya que la cantidad de dinero que puede hacer con el comercio de bots bitcoin puede depender de varios factores, como las condiciones del mercado, la estrategia comercial, los parámetros comerciales y el propio bot bitcoin. Sin embargo, algunos de los factores que pueden aumentar su potencial de ganancias son el uso de un bot bitcoin fiable y eficaz, la elección de una estrategia comercial rentable y coherente, el establecimiento de parámetros de comercio realistas y razonables, y la diversificación de su cartera. </li>
74
- <li><strong>¿Cómo puedo aprender más sobre el comercio bot bitcoin? </strong> Si desea aprender más sobre el comercio de bots bitcoin, puede hacer una investigación en línea y leer algunos artículos, blogs, libros o guías sobre el tema. También puede ver algunos videos, podcasts, seminarios web o cursos sobre el tema. También puede unirse a algunos foros en línea, comunidades o grupos donde puede interactuar con otros operadores de bots bitcoin y aprender de sus experiencias e ideas. </li>
75
- </ol></p> 64aa2da5cf<br />
76
- <br />
77
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/args.py DELETED
@@ -1,648 +0,0 @@
1
- # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- """Internal module to help with normalizing botocore client args.
14
-
15
- This module (and all function/classes within this module) should be
16
- considered internal, and *not* a public API.
17
-
18
- """
19
- import copy
20
- import logging
21
- import socket
22
-
23
- import botocore.exceptions
24
- import botocore.parsers
25
- import botocore.serialize
26
- from botocore.config import Config
27
- from botocore.endpoint import EndpointCreator
28
- from botocore.regions import EndpointResolverBuiltins as EPRBuiltins
29
- from botocore.regions import EndpointRulesetResolver
30
- from botocore.signers import RequestSigner
31
- from botocore.utils import ensure_boolean, is_s3_accelerate_url
32
-
33
- logger = logging.getLogger(__name__)
34
-
35
-
36
- VALID_REGIONAL_ENDPOINTS_CONFIG = [
37
- 'legacy',
38
- 'regional',
39
- ]
40
- LEGACY_GLOBAL_STS_REGIONS = [
41
- 'ap-northeast-1',
42
- 'ap-south-1',
43
- 'ap-southeast-1',
44
- 'ap-southeast-2',
45
- 'aws-global',
46
- 'ca-central-1',
47
- 'eu-central-1',
48
- 'eu-north-1',
49
- 'eu-west-1',
50
- 'eu-west-2',
51
- 'eu-west-3',
52
- 'sa-east-1',
53
- 'us-east-1',
54
- 'us-east-2',
55
- 'us-west-1',
56
- 'us-west-2',
57
- ]
58
-
59
-
60
- class ClientArgsCreator:
61
- def __init__(
62
- self,
63
- event_emitter,
64
- user_agent,
65
- response_parser_factory,
66
- loader,
67
- exceptions_factory,
68
- config_store,
69
- ):
70
- self._event_emitter = event_emitter
71
- self._user_agent = user_agent
72
- self._response_parser_factory = response_parser_factory
73
- self._loader = loader
74
- self._exceptions_factory = exceptions_factory
75
- self._config_store = config_store
76
-
77
- def get_client_args(
78
- self,
79
- service_model,
80
- region_name,
81
- is_secure,
82
- endpoint_url,
83
- verify,
84
- credentials,
85
- scoped_config,
86
- client_config,
87
- endpoint_bridge,
88
- auth_token=None,
89
- endpoints_ruleset_data=None,
90
- partition_data=None,
91
- ):
92
- final_args = self.compute_client_args(
93
- service_model,
94
- client_config,
95
- endpoint_bridge,
96
- region_name,
97
- endpoint_url,
98
- is_secure,
99
- scoped_config,
100
- )
101
-
102
- service_name = final_args['service_name'] # noqa
103
- parameter_validation = final_args['parameter_validation']
104
- endpoint_config = final_args['endpoint_config']
105
- protocol = final_args['protocol']
106
- config_kwargs = final_args['config_kwargs']
107
- s3_config = final_args['s3_config']
108
- partition = endpoint_config['metadata'].get('partition', None)
109
- socket_options = final_args['socket_options']
110
-
111
- signing_region = endpoint_config['signing_region']
112
- endpoint_region_name = endpoint_config['region_name']
113
-
114
- event_emitter = copy.copy(self._event_emitter)
115
- signer = RequestSigner(
116
- service_model.service_id,
117
- signing_region,
118
- endpoint_config['signing_name'],
119
- endpoint_config['signature_version'],
120
- credentials,
121
- event_emitter,
122
- auth_token,
123
- )
124
-
125
- config_kwargs['s3'] = s3_config
126
- new_config = Config(**config_kwargs)
127
- endpoint_creator = EndpointCreator(event_emitter)
128
-
129
- endpoint = endpoint_creator.create_endpoint(
130
- service_model,
131
- region_name=endpoint_region_name,
132
- endpoint_url=endpoint_config['endpoint_url'],
133
- verify=verify,
134
- response_parser_factory=self._response_parser_factory,
135
- max_pool_connections=new_config.max_pool_connections,
136
- proxies=new_config.proxies,
137
- timeout=(new_config.connect_timeout, new_config.read_timeout),
138
- socket_options=socket_options,
139
- client_cert=new_config.client_cert,
140
- proxies_config=new_config.proxies_config,
141
- )
142
-
143
- serializer = botocore.serialize.create_serializer(
144
- protocol, parameter_validation
145
- )
146
- response_parser = botocore.parsers.create_parser(protocol)
147
-
148
- ruleset_resolver = self._build_endpoint_resolver(
149
- endpoints_ruleset_data,
150
- partition_data,
151
- client_config,
152
- service_model,
153
- endpoint_region_name,
154
- region_name,
155
- endpoint_url,
156
- endpoint,
157
- is_secure,
158
- endpoint_bridge,
159
- event_emitter,
160
- )
161
-
162
- return {
163
- 'serializer': serializer,
164
- 'endpoint': endpoint,
165
- 'response_parser': response_parser,
166
- 'event_emitter': event_emitter,
167
- 'request_signer': signer,
168
- 'service_model': service_model,
169
- 'loader': self._loader,
170
- 'client_config': new_config,
171
- 'partition': partition,
172
- 'exceptions_factory': self._exceptions_factory,
173
- 'endpoint_ruleset_resolver': ruleset_resolver,
174
- }
175
-
176
- def compute_client_args(
177
- self,
178
- service_model,
179
- client_config,
180
- endpoint_bridge,
181
- region_name,
182
- endpoint_url,
183
- is_secure,
184
- scoped_config,
185
- ):
186
- service_name = service_model.endpoint_prefix
187
- protocol = service_model.metadata['protocol']
188
- parameter_validation = True
189
- if client_config and not client_config.parameter_validation:
190
- parameter_validation = False
191
- elif scoped_config:
192
- raw_value = scoped_config.get('parameter_validation')
193
- if raw_value is not None:
194
- parameter_validation = ensure_boolean(raw_value)
195
-
196
- # Override the user agent if specified in the client config.
197
- user_agent = self._user_agent
198
- if client_config is not None:
199
- if client_config.user_agent is not None:
200
- user_agent = client_config.user_agent
201
- if client_config.user_agent_extra is not None:
202
- user_agent += ' %s' % client_config.user_agent_extra
203
-
204
- s3_config = self.compute_s3_config(client_config)
205
- endpoint_config = self._compute_endpoint_config(
206
- service_name=service_name,
207
- region_name=region_name,
208
- endpoint_url=endpoint_url,
209
- is_secure=is_secure,
210
- endpoint_bridge=endpoint_bridge,
211
- s3_config=s3_config,
212
- )
213
- endpoint_variant_tags = endpoint_config['metadata'].get('tags', [])
214
- # Create a new client config to be passed to the client based
215
- # on the final values. We do not want the user to be able
216
- # to try to modify an existing client with a client config.
217
- config_kwargs = dict(
218
- region_name=endpoint_config['region_name'],
219
- signature_version=endpoint_config['signature_version'],
220
- user_agent=user_agent,
221
- )
222
- if 'dualstack' in endpoint_variant_tags:
223
- config_kwargs.update(use_dualstack_endpoint=True)
224
- if 'fips' in endpoint_variant_tags:
225
- config_kwargs.update(use_fips_endpoint=True)
226
- if client_config is not None:
227
- config_kwargs.update(
228
- connect_timeout=client_config.connect_timeout,
229
- read_timeout=client_config.read_timeout,
230
- max_pool_connections=client_config.max_pool_connections,
231
- proxies=client_config.proxies,
232
- proxies_config=client_config.proxies_config,
233
- retries=client_config.retries,
234
- client_cert=client_config.client_cert,
235
- inject_host_prefix=client_config.inject_host_prefix,
236
- tcp_keepalive=client_config.tcp_keepalive,
237
- )
238
- self._compute_retry_config(config_kwargs)
239
- self._compute_connect_timeout(config_kwargs)
240
- s3_config = self.compute_s3_config(client_config)
241
-
242
- is_s3_service = self._is_s3_service(service_name)
243
-
244
- if is_s3_service and 'dualstack' in endpoint_variant_tags:
245
- if s3_config is None:
246
- s3_config = {}
247
- s3_config['use_dualstack_endpoint'] = True
248
-
249
- return {
250
- 'service_name': service_name,
251
- 'parameter_validation': parameter_validation,
252
- 'user_agent': user_agent,
253
- 'endpoint_config': endpoint_config,
254
- 'protocol': protocol,
255
- 'config_kwargs': config_kwargs,
256
- 's3_config': s3_config,
257
- 'socket_options': self._compute_socket_options(
258
- scoped_config, client_config
259
- ),
260
- }
261
-
262
- def compute_s3_config(self, client_config):
263
- s3_configuration = self._config_store.get_config_variable('s3')
264
-
265
- # Next specific client config values takes precedence over
266
- # specific values in the scoped config.
267
- if client_config is not None:
268
- if client_config.s3 is not None:
269
- if s3_configuration is None:
270
- s3_configuration = client_config.s3
271
- else:
272
- # The current s3_configuration dictionary may be
273
- # from a source that only should be read from so
274
- # we want to be safe and just make a copy of it to modify
275
- # before it actually gets updated.
276
- s3_configuration = s3_configuration.copy()
277
- s3_configuration.update(client_config.s3)
278
-
279
- return s3_configuration
280
-
281
- def _is_s3_service(self, service_name):
282
- """Whether the service is S3 or S3 Control.
283
-
284
- Note that throughout this class, service_name refers to the endpoint
285
- prefix, not the folder name of the service in botocore/data. For
286
- S3 Control, the folder name is 's3control' but the endpoint prefix is
287
- 's3-control'.
288
- """
289
- return service_name in ['s3', 's3-control']
290
-
291
- def _compute_endpoint_config(
292
- self,
293
- service_name,
294
- region_name,
295
- endpoint_url,
296
- is_secure,
297
- endpoint_bridge,
298
- s3_config,
299
- ):
300
- resolve_endpoint_kwargs = {
301
- 'service_name': service_name,
302
- 'region_name': region_name,
303
- 'endpoint_url': endpoint_url,
304
- 'is_secure': is_secure,
305
- 'endpoint_bridge': endpoint_bridge,
306
- }
307
- if service_name == 's3':
308
- return self._compute_s3_endpoint_config(
309
- s3_config=s3_config, **resolve_endpoint_kwargs
310
- )
311
- if service_name == 'sts':
312
- return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs)
313
- return self._resolve_endpoint(**resolve_endpoint_kwargs)
314
-
315
- def _compute_s3_endpoint_config(
316
- self, s3_config, **resolve_endpoint_kwargs
317
- ):
318
- force_s3_global = self._should_force_s3_global(
319
- resolve_endpoint_kwargs['region_name'], s3_config
320
- )
321
- if force_s3_global:
322
- resolve_endpoint_kwargs['region_name'] = None
323
- endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
324
- self._set_region_if_custom_s3_endpoint(
325
- endpoint_config, resolve_endpoint_kwargs['endpoint_bridge']
326
- )
327
- # For backwards compatibility reasons, we want to make sure the
328
- # client.meta.region_name will remain us-east-1 if we forced the
329
- # endpoint to be the global region. Specifically, if this value
330
- # changes to aws-global, it breaks logic where a user is checking
331
- # for us-east-1 as the global endpoint such as in creating buckets.
332
- if force_s3_global and endpoint_config['region_name'] == 'aws-global':
333
- endpoint_config['region_name'] = 'us-east-1'
334
- return endpoint_config
335
-
336
- def _should_force_s3_global(self, region_name, s3_config):
337
- s3_regional_config = 'legacy'
338
- if s3_config and 'us_east_1_regional_endpoint' in s3_config:
339
- s3_regional_config = s3_config['us_east_1_regional_endpoint']
340
- self._validate_s3_regional_config(s3_regional_config)
341
-
342
- is_global_region = region_name in ('us-east-1', None)
343
- return s3_regional_config == 'legacy' and is_global_region
344
-
345
- def _validate_s3_regional_config(self, config_val):
346
- if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG:
347
- raise botocore.exceptions.InvalidS3UsEast1RegionalEndpointConfigError(
348
- s3_us_east_1_regional_endpoint_config=config_val
349
- )
350
-
351
- def _set_region_if_custom_s3_endpoint(
352
- self, endpoint_config, endpoint_bridge
353
- ):
354
- # If a user is providing a custom URL, the endpoint resolver will
355
- # refuse to infer a signing region. If we want to default to s3v4,
356
- # we have to account for this.
357
- if (
358
- endpoint_config['signing_region'] is None
359
- and endpoint_config['region_name'] is None
360
- ):
361
- endpoint = endpoint_bridge.resolve('s3')
362
- endpoint_config['signing_region'] = endpoint['signing_region']
363
- endpoint_config['region_name'] = endpoint['region_name']
364
-
365
- def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs):
366
- endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
367
- if self._should_set_global_sts_endpoint(
368
- resolve_endpoint_kwargs['region_name'],
369
- resolve_endpoint_kwargs['endpoint_url'],
370
- endpoint_config,
371
- ):
372
- self._set_global_sts_endpoint(
373
- endpoint_config, resolve_endpoint_kwargs['is_secure']
374
- )
375
- return endpoint_config
376
-
377
- def _should_set_global_sts_endpoint(
378
- self, region_name, endpoint_url, endpoint_config
379
- ):
380
- has_variant_tags = endpoint_config and endpoint_config.get(
381
- 'metadata', {}
382
- ).get('tags')
383
- if endpoint_url or has_variant_tags:
384
- return False
385
- return (
386
- self._get_sts_regional_endpoints_config() == 'legacy'
387
- and region_name in LEGACY_GLOBAL_STS_REGIONS
388
- )
389
-
390
- def _get_sts_regional_endpoints_config(self):
391
- sts_regional_endpoints_config = self._config_store.get_config_variable(
392
- 'sts_regional_endpoints'
393
- )
394
- if not sts_regional_endpoints_config:
395
- sts_regional_endpoints_config = 'legacy'
396
- if (
397
- sts_regional_endpoints_config
398
- not in VALID_REGIONAL_ENDPOINTS_CONFIG
399
- ):
400
- raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError(
401
- sts_regional_endpoints_config=sts_regional_endpoints_config
402
- )
403
- return sts_regional_endpoints_config
404
-
405
- def _set_global_sts_endpoint(self, endpoint_config, is_secure):
406
- scheme = 'https' if is_secure else 'http'
407
- endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme
408
- endpoint_config['signing_region'] = 'us-east-1'
409
-
410
- def _resolve_endpoint(
411
- self,
412
- service_name,
413
- region_name,
414
- endpoint_url,
415
- is_secure,
416
- endpoint_bridge,
417
- ):
418
- return endpoint_bridge.resolve(
419
- service_name, region_name, endpoint_url, is_secure
420
- )
421
-
422
- def _compute_socket_options(self, scoped_config, client_config=None):
423
- # This disables Nagle's algorithm and is the default socket options
424
- # in urllib3.
425
- socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
426
- client_keepalive = client_config and client_config.tcp_keepalive
427
- scoped_keepalive = scoped_config and self._ensure_boolean(
428
- scoped_config.get("tcp_keepalive", False)
429
- )
430
- # Enables TCP Keepalive if specified in client config object or shared config file.
431
- if client_keepalive or scoped_keepalive:
432
- socket_options.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
433
- return socket_options
434
-
435
- def _compute_retry_config(self, config_kwargs):
436
- self._compute_retry_max_attempts(config_kwargs)
437
- self._compute_retry_mode(config_kwargs)
438
-
439
- def _compute_retry_max_attempts(self, config_kwargs):
440
- # There's a pre-existing max_attempts client config value that actually
441
- # means max *retry* attempts. There's also a `max_attempts` we pull
442
- # from the config store that means *total attempts*, which includes the
443
- # intitial request. We can't change what `max_attempts` means in
444
- # client config so we try to normalize everything to a new
445
- # "total_max_attempts" variable. We ensure that after this, the only
446
- # configuration for "max attempts" is the 'total_max_attempts' key.
447
- # An explicitly provided max_attempts in the client config
448
- # overrides everything.
449
- retries = config_kwargs.get('retries')
450
- if retries is not None:
451
- if 'total_max_attempts' in retries:
452
- retries.pop('max_attempts', None)
453
- return
454
- if 'max_attempts' in retries:
455
- value = retries.pop('max_attempts')
456
- # client config max_attempts means total retries so we
457
- # have to add one for 'total_max_attempts' to account
458
- # for the initial request.
459
- retries['total_max_attempts'] = value + 1
460
- return
461
- # Otherwise we'll check the config store which checks env vars,
462
- # config files, etc. There is no default value for max_attempts
463
- # so if this returns None and we don't set a default value here.
464
- max_attempts = self._config_store.get_config_variable('max_attempts')
465
- if max_attempts is not None:
466
- if retries is None:
467
- retries = {}
468
- config_kwargs['retries'] = retries
469
- retries['total_max_attempts'] = max_attempts
470
-
471
- def _compute_retry_mode(self, config_kwargs):
472
- retries = config_kwargs.get('retries')
473
- if retries is None:
474
- retries = {}
475
- config_kwargs['retries'] = retries
476
- elif 'mode' in retries:
477
- # If there's a retry mode explicitly set in the client config
478
- # that overrides everything.
479
- return
480
- retry_mode = self._config_store.get_config_variable('retry_mode')
481
- if retry_mode is None:
482
- retry_mode = 'legacy'
483
- retries['mode'] = retry_mode
484
-
485
- def _compute_connect_timeout(self, config_kwargs):
486
- # Checking if connect_timeout is set on the client config.
487
- # If it is not, we check the config_store in case a
488
- # non legacy default mode has been configured.
489
- connect_timeout = config_kwargs.get('connect_timeout')
490
- if connect_timeout is not None:
491
- return
492
- connect_timeout = self._config_store.get_config_variable(
493
- 'connect_timeout'
494
- )
495
- if connect_timeout:
496
- config_kwargs['connect_timeout'] = connect_timeout
497
-
498
- def _ensure_boolean(self, val):
499
- if isinstance(val, bool):
500
- return val
501
- else:
502
- return val.lower() == 'true'
503
-
504
- def _build_endpoint_resolver(
505
- self,
506
- endpoints_ruleset_data,
507
- partition_data,
508
- client_config,
509
- service_model,
510
- endpoint_region_name,
511
- region_name,
512
- endpoint_url,
513
- endpoint,
514
- is_secure,
515
- endpoint_bridge,
516
- event_emitter,
517
- ):
518
- if endpoints_ruleset_data is None:
519
- return None
520
-
521
- # The legacy EndpointResolver is global to the session, but
522
- # EndpointRulesetResolver is service-specific. Builtins for
523
- # EndpointRulesetResolver must not be derived from the legacy
524
- # endpoint resolver's output, including final_args, s3_config,
525
- # etc.
526
- s3_config_raw = self.compute_s3_config(client_config) or {}
527
- service_name_raw = service_model.endpoint_prefix
528
- # Maintain complex logic for s3 and sts endpoints for backwards
529
- # compatibility.
530
- if service_name_raw in ['s3', 'sts'] or region_name is None:
531
- eprv2_region_name = endpoint_region_name
532
- else:
533
- eprv2_region_name = region_name
534
- resolver_builtins = self.compute_endpoint_resolver_builtin_defaults(
535
- region_name=eprv2_region_name,
536
- service_name=service_name_raw,
537
- s3_config=s3_config_raw,
538
- endpoint_bridge=endpoint_bridge,
539
- client_endpoint_url=endpoint_url,
540
- legacy_endpoint_url=endpoint.host,
541
- )
542
- # botocore does not support client context parameters generically
543
- # for every service. Instead, the s3 config section entries are
544
- # available as client context parameters. In the future, endpoint
545
- # rulesets of services other than s3/s3control may require client
546
- # context parameters.
547
- client_context = (
548
- s3_config_raw if self._is_s3_service(service_name_raw) else {}
549
- )
550
- sig_version = (
551
- client_config.signature_version
552
- if client_config is not None
553
- else None
554
- )
555
- return EndpointRulesetResolver(
556
- endpoint_ruleset_data=endpoints_ruleset_data,
557
- partition_data=partition_data,
558
- service_model=service_model,
559
- builtins=resolver_builtins,
560
- client_context=client_context,
561
- event_emitter=event_emitter,
562
- use_ssl=is_secure,
563
- requested_auth_scheme=sig_version,
564
- )
565
-
566
- def compute_endpoint_resolver_builtin_defaults(
567
- self,
568
- region_name,
569
- service_name,
570
- s3_config,
571
- endpoint_bridge,
572
- client_endpoint_url,
573
- legacy_endpoint_url,
574
- ):
575
- # EndpointRulesetResolver rulesets may accept an "SDK::Endpoint" as
576
- # input. If the endpoint_url argument of create_client() is set, it
577
- # always takes priority.
578
- if client_endpoint_url:
579
- given_endpoint = client_endpoint_url
580
- # If an endpoints.json data file other than the one bundled within
581
- # the botocore/data directory is used, the output of legacy
582
- # endpoint resolution is provided to EndpointRulesetResolver.
583
- elif not endpoint_bridge.resolver_uses_builtin_data():
584
- given_endpoint = legacy_endpoint_url
585
- else:
586
- given_endpoint = None
587
-
588
- # The endpoint rulesets differ from legacy botocore behavior in whether
589
- # forcing path style addressing in incompatible situations raises an
590
- # exception or silently ignores the config setting. The
591
- # AWS_S3_FORCE_PATH_STYLE parameter is adjusted both here and for each
592
- # operation so that the ruleset behavior is backwards compatible.
593
- if s3_config.get('use_accelerate_endpoint', False):
594
- force_path_style = False
595
- elif client_endpoint_url is not None and not is_s3_accelerate_url(
596
- client_endpoint_url
597
- ):
598
- force_path_style = s3_config.get('addressing_style') != 'virtual'
599
- else:
600
- force_path_style = s3_config.get('addressing_style') == 'path'
601
-
602
- return {
603
- EPRBuiltins.AWS_REGION: region_name,
604
- EPRBuiltins.AWS_USE_FIPS: (
605
- # SDK_ENDPOINT cannot be combined with AWS_USE_FIPS
606
- given_endpoint is None
607
- # use legacy resolver's _resolve_endpoint_variant_config_var()
608
- # or default to False if it returns None
609
- and endpoint_bridge._resolve_endpoint_variant_config_var(
610
- 'use_fips_endpoint'
611
- )
612
- or False
613
- ),
614
- EPRBuiltins.AWS_USE_DUALSTACK: (
615
- # SDK_ENDPOINT cannot be combined with AWS_USE_DUALSTACK
616
- given_endpoint is None
617
- # use legacy resolver's _resolve_use_dualstack_endpoint() and
618
- # or default to False if it returns None
619
- and endpoint_bridge._resolve_use_dualstack_endpoint(
620
- service_name
621
- )
622
- or False
623
- ),
624
- EPRBuiltins.AWS_STS_USE_GLOBAL_ENDPOINT: (
625
- self._should_set_global_sts_endpoint(
626
- region_name=region_name,
627
- endpoint_url=None,
628
- endpoint_config=None,
629
- )
630
- ),
631
- EPRBuiltins.AWS_S3_USE_GLOBAL_ENDPOINT: (
632
- self._should_force_s3_global(region_name, s3_config)
633
- ),
634
- EPRBuiltins.AWS_S3_ACCELERATE: s3_config.get(
635
- 'use_accelerate_endpoint', False
636
- ),
637
- EPRBuiltins.AWS_S3_FORCE_PATH_STYLE: force_path_style,
638
- EPRBuiltins.AWS_S3_USE_ARN_REGION: s3_config.get(
639
- 'use_arn_region', True
640
- ),
641
- EPRBuiltins.AWS_S3CONTROL_USE_ARN_REGION: s3_config.get(
642
- 'use_arn_region', False
643
- ),
644
- EPRBuiltins.AWS_S3_DISABLE_MRAP: s3_config.get(
645
- 's3_disable_multiregion_access_points', False
646
- ),
647
- EPRBuiltins.SDK_ENDPOINT: given_endpoint,
648
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/tasks.py DELETED
@@ -1,387 +0,0 @@
1
- # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import copy
14
- import logging
15
-
16
- from s3transfer.utils import get_callbacks
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- class Task:
22
- """A task associated to a TransferFuture request
23
-
24
- This is a base class for other classes to subclass from. All subclassed
25
- classes must implement the main() method.
26
- """
27
-
28
- def __init__(
29
- self,
30
- transfer_coordinator,
31
- main_kwargs=None,
32
- pending_main_kwargs=None,
33
- done_callbacks=None,
34
- is_final=False,
35
- ):
36
- """
37
- :type transfer_coordinator: s3transfer.futures.TransferCoordinator
38
- :param transfer_coordinator: The context associated to the
39
- TransferFuture for which this Task is associated with.
40
-
41
- :type main_kwargs: dict
42
- :param main_kwargs: The keyword args that can be immediately supplied
43
- to the _main() method of the task
44
-
45
- :type pending_main_kwargs: dict
46
- :param pending_main_kwargs: The keyword args that are depended upon
47
- by the result from a dependent future(s). The result returned by
48
- the future(s) will be used as the value for the keyword argument
49
- when _main() is called. The values for each key can be:
50
- * a single future - Once completed, its value will be the
51
- result of that single future
52
- * a list of futures - Once all of the futures complete, the
53
- value used will be a list of each completed future result
54
- value in order of when they were originally supplied.
55
-
56
- :type done_callbacks: list of callbacks
57
- :param done_callbacks: A list of callbacks to call once the task is
58
- done completing. Each callback will be called with no arguments
59
- and will be called no matter if the task succeeds or an exception
60
- is raised.
61
-
62
- :type is_final: boolean
63
- :param is_final: True, to indicate that this task is the final task
64
- for the TransferFuture request. By setting this value to True, it
65
- will set the result of the entire TransferFuture to the result
66
- returned by this task's main() method.
67
- """
68
- self._transfer_coordinator = transfer_coordinator
69
-
70
- self._main_kwargs = main_kwargs
71
- if self._main_kwargs is None:
72
- self._main_kwargs = {}
73
-
74
- self._pending_main_kwargs = pending_main_kwargs
75
- if pending_main_kwargs is None:
76
- self._pending_main_kwargs = {}
77
-
78
- self._done_callbacks = done_callbacks
79
- if self._done_callbacks is None:
80
- self._done_callbacks = []
81
-
82
- self._is_final = is_final
83
-
84
- def __repr__(self):
85
- # These are the general main_kwarg parameters that we want to
86
- # display in the repr.
87
- params_to_display = [
88
- 'bucket',
89
- 'key',
90
- 'part_number',
91
- 'final_filename',
92
- 'transfer_future',
93
- 'offset',
94
- 'extra_args',
95
- ]
96
- main_kwargs_to_display = self._get_kwargs_with_params_to_include(
97
- self._main_kwargs, params_to_display
98
- )
99
- return '{}(transfer_id={}, {})'.format(
100
- self.__class__.__name__,
101
- self._transfer_coordinator.transfer_id,
102
- main_kwargs_to_display,
103
- )
104
-
105
- @property
106
- def transfer_id(self):
107
- """The id for the transfer request that the task belongs to"""
108
- return self._transfer_coordinator.transfer_id
109
-
110
- def _get_kwargs_with_params_to_include(self, kwargs, include):
111
- filtered_kwargs = {}
112
- for param in include:
113
- if param in kwargs:
114
- filtered_kwargs[param] = kwargs[param]
115
- return filtered_kwargs
116
-
117
- def _get_kwargs_with_params_to_exclude(self, kwargs, exclude):
118
- filtered_kwargs = {}
119
- for param, value in kwargs.items():
120
- if param in exclude:
121
- continue
122
- filtered_kwargs[param] = value
123
- return filtered_kwargs
124
-
125
- def __call__(self):
126
- """The callable to use when submitting a Task to an executor"""
127
- try:
128
- # Wait for all of futures this task depends on.
129
- self._wait_on_dependent_futures()
130
- # Gather up all of the main keyword arguments for main().
131
- # This includes the immediately provided main_kwargs and
132
- # the values for pending_main_kwargs that source from the return
133
- # values from the task's dependent futures.
134
- kwargs = self._get_all_main_kwargs()
135
- # If the task is not done (really only if some other related
136
- # task to the TransferFuture had failed) then execute the task's
137
- # main() method.
138
- if not self._transfer_coordinator.done():
139
- return self._execute_main(kwargs)
140
- except Exception as e:
141
- self._log_and_set_exception(e)
142
- finally:
143
- # Run any done callbacks associated to the task no matter what.
144
- for done_callback in self._done_callbacks:
145
- done_callback()
146
-
147
- if self._is_final:
148
- # If this is the final task announce that it is done if results
149
- # are waiting on its completion.
150
- self._transfer_coordinator.announce_done()
151
-
152
- def _execute_main(self, kwargs):
153
- # Do not display keyword args that should not be printed, especially
154
- # if they are going to make the logs hard to follow.
155
- params_to_exclude = ['data']
156
- kwargs_to_display = self._get_kwargs_with_params_to_exclude(
157
- kwargs, params_to_exclude
158
- )
159
- # Log what is about to be executed.
160
- logger.debug(f"Executing task {self} with kwargs {kwargs_to_display}")
161
-
162
- return_value = self._main(**kwargs)
163
- # If the task is the final task, then set the TransferFuture's
164
- # value to the return value from main().
165
- if self._is_final:
166
- self._transfer_coordinator.set_result(return_value)
167
- return return_value
168
-
169
- def _log_and_set_exception(self, exception):
170
- # If an exception is ever thrown than set the exception for the
171
- # entire TransferFuture.
172
- logger.debug("Exception raised.", exc_info=True)
173
- self._transfer_coordinator.set_exception(exception)
174
-
175
- def _main(self, **kwargs):
176
- """The method that will be ran in the executor
177
-
178
- This method must be implemented by subclasses from Task. main() can
179
- be implemented with any arguments decided upon by the subclass.
180
- """
181
- raise NotImplementedError('_main() must be implemented')
182
-
183
- def _wait_on_dependent_futures(self):
184
- # Gather all of the futures into that main() depends on.
185
- futures_to_wait_on = []
186
- for _, future in self._pending_main_kwargs.items():
187
- # If the pending main keyword arg is a list then extend the list.
188
- if isinstance(future, list):
189
- futures_to_wait_on.extend(future)
190
- # If the pending main keyword arg is a future append it to the list.
191
- else:
192
- futures_to_wait_on.append(future)
193
- # Now wait for all of the futures to complete.
194
- self._wait_until_all_complete(futures_to_wait_on)
195
-
196
- def _wait_until_all_complete(self, futures):
197
- # This is a basic implementation of the concurrent.futures.wait()
198
- #
199
- # concurrent.futures.wait() is not used instead because of this
200
- # reported issue: https://bugs.python.org/issue20319.
201
- # The issue would occasionally cause multipart uploads to hang
202
- # when wait() was called. With this approach, it avoids the
203
- # concurrency bug by removing any association with concurrent.futures
204
- # implementation of waiters.
205
- logger.debug(
206
- '%s about to wait for the following futures %s', self, futures
207
- )
208
- for future in futures:
209
- try:
210
- logger.debug('%s about to wait for %s', self, future)
211
- future.result()
212
- except Exception:
213
- # result() can also produce exceptions. We want to ignore
214
- # these to be deferred to error handling down the road.
215
- pass
216
- logger.debug('%s done waiting for dependent futures', self)
217
-
218
- def _get_all_main_kwargs(self):
219
- # Copy over all of the kwargs that we know is available.
220
- kwargs = copy.copy(self._main_kwargs)
221
-
222
- # Iterate through the kwargs whose values are pending on the result
223
- # of a future.
224
- for key, pending_value in self._pending_main_kwargs.items():
225
- # If the value is a list of futures, iterate though the list
226
- # appending on the result from each future.
227
- if isinstance(pending_value, list):
228
- result = []
229
- for future in pending_value:
230
- result.append(future.result())
231
- # Otherwise if the pending_value is a future, just wait for it.
232
- else:
233
- result = pending_value.result()
234
- # Add the retrieved value to the kwargs to be sent to the
235
- # main() call.
236
- kwargs[key] = result
237
- return kwargs
238
-
239
-
240
- class SubmissionTask(Task):
241
- """A base class for any submission task
242
-
243
- Submission tasks are the top-level task used to submit a series of tasks
244
- to execute a particular transfer.
245
- """
246
-
247
- def _main(self, transfer_future, **kwargs):
248
- """
249
- :type transfer_future: s3transfer.futures.TransferFuture
250
- :param transfer_future: The transfer future associated with the
251
- transfer request that tasks are being submitted for
252
-
253
- :param kwargs: Any additional kwargs that you may want to pass
254
- to the _submit() method
255
- """
256
- try:
257
- self._transfer_coordinator.set_status_to_queued()
258
-
259
- # Before submitting any tasks, run all of the on_queued callbacks
260
- on_queued_callbacks = get_callbacks(transfer_future, 'queued')
261
- for on_queued_callback in on_queued_callbacks:
262
- on_queued_callback()
263
-
264
- # Once callbacks have been ran set the status to running.
265
- self._transfer_coordinator.set_status_to_running()
266
-
267
- # Call the submit method to start submitting tasks to execute the
268
- # transfer.
269
- self._submit(transfer_future=transfer_future, **kwargs)
270
- except BaseException as e:
271
- # If there was an exception raised during the submission of task
272
- # there is a chance that the final task that signals if a transfer
273
- # is done and too run the cleanup may never have been submitted in
274
- # the first place so we need to account accordingly.
275
- #
276
- # Note that BaseException is caught, instead of Exception, because
277
- # for some implementations of executors, specifically the serial
278
- # implementation, the SubmissionTask is directly exposed to
279
- # KeyboardInterupts and so needs to cleanup and signal done
280
- # for those as well.
281
-
282
- # Set the exception, that caused the process to fail.
283
- self._log_and_set_exception(e)
284
-
285
- # Wait for all possibly associated futures that may have spawned
286
- # from this submission task have finished before we announce the
287
- # transfer done.
288
- self._wait_for_all_submitted_futures_to_complete()
289
-
290
- # Announce the transfer as done, which will run any cleanups
291
- # and done callbacks as well.
292
- self._transfer_coordinator.announce_done()
293
-
294
- def _submit(self, transfer_future, **kwargs):
295
- """The submission method to be implemented
296
-
297
- :type transfer_future: s3transfer.futures.TransferFuture
298
- :param transfer_future: The transfer future associated with the
299
- transfer request that tasks are being submitted for
300
-
301
- :param kwargs: Any additional keyword arguments you want to be passed
302
- in
303
- """
304
- raise NotImplementedError('_submit() must be implemented')
305
-
306
- def _wait_for_all_submitted_futures_to_complete(self):
307
- # We want to wait for all futures that were submitted to
308
- # complete as we do not want the cleanup callbacks or done callbacks
309
- # to be called to early. The main problem is any task that was
310
- # submitted may have submitted even more during its process and so
311
- # we need to account accordingly.
312
-
313
- # First get all of the futures that were submitted up to this point.
314
- submitted_futures = self._transfer_coordinator.associated_futures
315
- while submitted_futures:
316
- # Wait for those futures to complete.
317
- self._wait_until_all_complete(submitted_futures)
318
- # However, more futures may have been submitted as we waited so
319
- # we need to check again for any more associated futures.
320
- possibly_more_submitted_futures = (
321
- self._transfer_coordinator.associated_futures
322
- )
323
- # If the current list of submitted futures is equal to the
324
- # the list of associated futures for when after the wait completes,
325
- # we can ensure no more futures were submitted in waiting on
326
- # the current list of futures to complete ultimately meaning all
327
- # futures that may have spawned from the original submission task
328
- # have completed.
329
- if submitted_futures == possibly_more_submitted_futures:
330
- break
331
- submitted_futures = possibly_more_submitted_futures
332
-
333
-
334
- class CreateMultipartUploadTask(Task):
335
- """Task to initiate a multipart upload"""
336
-
337
- def _main(self, client, bucket, key, extra_args):
338
- """
339
- :param client: The client to use when calling CreateMultipartUpload
340
- :param bucket: The name of the bucket to upload to
341
- :param key: The name of the key to upload to
342
- :param extra_args: A dictionary of any extra arguments that may be
343
- used in the initialization.
344
-
345
- :returns: The upload id of the multipart upload
346
- """
347
- # Create the multipart upload.
348
- response = client.create_multipart_upload(
349
- Bucket=bucket, Key=key, **extra_args
350
- )
351
- upload_id = response['UploadId']
352
-
353
- # Add a cleanup if the multipart upload fails at any point.
354
- self._transfer_coordinator.add_failure_cleanup(
355
- client.abort_multipart_upload,
356
- Bucket=bucket,
357
- Key=key,
358
- UploadId=upload_id,
359
- )
360
- return upload_id
361
-
362
-
363
- class CompleteMultipartUploadTask(Task):
364
- """Task to complete a multipart upload"""
365
-
366
- def _main(self, client, bucket, key, upload_id, parts, extra_args):
367
- """
368
- :param client: The client to use when calling CompleteMultipartUpload
369
- :param bucket: The name of the bucket to upload to
370
- :param key: The name of the key to upload to
371
- :param upload_id: The id of the upload
372
- :param parts: A list of parts to use to complete the multipart upload::
373
-
374
- [{'Etag': etag_value, 'PartNumber': part_number}, ...]
375
-
376
- Each element in the list consists of a return value from
377
- ``UploadPartTask.main()``.
378
- :param extra_args: A dictionary of any extra arguments that may be
379
- used in completing the multipart transfer.
380
- """
381
- client.complete_multipart_upload(
382
- Bucket=bucket,
383
- Key=key,
384
- UploadId=upload_id,
385
- MultipartUpload={'Parts': parts},
386
- **extra_args,
387
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billius/runwayml-stable-diffusion-v1-5-04-07-2023/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Runwayml Stable Diffusion V1 5 04 07 2023
3
- emoji: 👁
4
- colorFrom: yellow
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/cityscapes.py DELETED
@@ -1,333 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import functools
3
- import glob
4
- import json
5
- import logging
6
- import multiprocessing as mp
7
- import numpy as np
8
- import os
9
- from itertools import chain
10
- import pycocotools.mask as mask_util
11
- from fvcore.common.file_io import PathManager
12
- from PIL import Image
13
-
14
- from detectron2.structures import BoxMode
15
- from detectron2.utils.comm import get_world_size
16
- from detectron2.utils.logger import setup_logger
17
-
18
- try:
19
- import cv2 # noqa
20
- except ImportError:
21
- # OpenCV is an optional dependency at the moment
22
- pass
23
-
24
-
25
- logger = logging.getLogger(__name__)
26
-
27
-
28
- def get_cityscapes_files(image_dir, gt_dir):
29
- files = []
30
- # scan through the directory
31
- cities = PathManager.ls(image_dir)
32
- logger.info(f"{len(cities)} cities found in '{image_dir}'.")
33
- for city in cities:
34
- city_img_dir = os.path.join(image_dir, city)
35
- city_gt_dir = os.path.join(gt_dir, city)
36
- for basename in PathManager.ls(city_img_dir):
37
- image_file = os.path.join(city_img_dir, basename)
38
-
39
- suffix = "leftImg8bit.png"
40
- assert basename.endswith(suffix)
41
- basename = basename[: -len(suffix)]
42
-
43
- instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
44
- label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
45
- json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
46
-
47
- files.append((image_file, instance_file, label_file, json_file))
48
- assert len(files), "No images found in {}".format(image_dir)
49
- for f in files[0]:
50
- assert PathManager.isfile(f), f
51
- return files
52
-
53
-
54
- def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
55
- """
56
- Args:
57
- image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
58
- gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
59
- from_json (bool): whether to read annotations from the raw json file or the png files.
60
- to_polygons (bool): whether to represent the segmentation as polygons
61
- (COCO's format) instead of masks (cityscapes's format).
62
-
63
- Returns:
64
- list[dict]: a list of dicts in Detectron2 standard format. (See
65
- `Using Custom Datasets </tutorials/datasets.html>`_ )
66
- """
67
- if from_json:
68
- assert to_polygons, (
69
- "Cityscapes's json annotations are in polygon format. "
70
- "Converting to mask format is not supported now."
71
- )
72
- files = get_cityscapes_files(image_dir, gt_dir)
73
-
74
- logger.info("Preprocessing cityscapes annotations ...")
75
- # This is still not fast: all workers will execute duplicate works and will
76
- # take up to 10m on a 8GPU server.
77
- pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
78
-
79
- ret = pool.map(
80
- functools.partial(cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
81
- files,
82
- )
83
- logger.info("Loaded {} images from {}".format(len(ret), image_dir))
84
-
85
- # Map cityscape ids to contiguous ids
86
- from cityscapesscripts.helpers.labels import labels
87
-
88
- labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
89
- dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
90
- for dict_per_image in ret:
91
- for anno in dict_per_image["annotations"]:
92
- anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
93
- return ret
94
-
95
-
96
- def load_cityscapes_semantic(image_dir, gt_dir):
97
- """
98
- Args:
99
- image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
100
- gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
101
-
102
- Returns:
103
- list[dict]: a list of dict, each has "file_name" and
104
- "sem_seg_file_name".
105
- """
106
- ret = []
107
- for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
108
- suffix = "leftImg8bit.png"
109
- assert image_file.endswith(suffix)
110
- prefix = image_dir
111
-
112
- label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelTrainIds.png"
113
- assert os.path.isfile(
114
- label_file
115
- ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
116
-
117
- json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json"
118
-
119
- with PathManager.open(json_file, "r") as f:
120
- jsonobj = json.load(f)
121
- ret.append(
122
- {
123
- "file_name": image_file,
124
- "sem_seg_file_name": label_file,
125
- "height": jsonobj["imgHeight"],
126
- "width": jsonobj["imgWidth"],
127
- }
128
- )
129
- return ret
130
-
131
-
132
- def cityscapes_files_to_dict(files, from_json, to_polygons):
133
- """
134
- Parse cityscapes annotation files to a dict.
135
-
136
- Args:
137
- files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
138
- from_json (bool): whether to read annotations from the raw json file or the png files.
139
- to_polygons (bool): whether to represent the segmentation as polygons
140
- (COCO's format) instead of masks (cityscapes's format).
141
-
142
- Returns:
143
- A dict in Detectron2 Dataset format.
144
- """
145
- from cityscapesscripts.helpers.labels import id2label, name2label
146
-
147
- image_file, instance_id_file, _, json_file = files
148
-
149
- annos = []
150
-
151
- if from_json:
152
- from shapely.geometry import MultiPolygon, Polygon
153
-
154
- with PathManager.open(json_file, "r") as f:
155
- jsonobj = json.load(f)
156
- ret = {
157
- "file_name": image_file,
158
- "image_id": os.path.basename(image_file),
159
- "height": jsonobj["imgHeight"],
160
- "width": jsonobj["imgWidth"],
161
- }
162
-
163
- # `polygons_union` contains the union of all valid polygons.
164
- polygons_union = Polygon()
165
-
166
- # CityscapesScripts draw the polygons in sequential order
167
- # and each polygon *overwrites* existing ones. See
168
- # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
169
- # We use reverse order, and each polygon *avoids* early ones.
170
- # This will resolve the ploygon overlaps in the same way as CityscapesScripts.
171
- for obj in jsonobj["objects"][::-1]:
172
- if "deleted" in obj: # cityscapes data format specific
173
- continue
174
- label_name = obj["label"]
175
-
176
- try:
177
- label = name2label[label_name]
178
- except KeyError:
179
- if label_name.endswith("group"): # crowd area
180
- label = name2label[label_name[: -len("group")]]
181
- else:
182
- raise
183
- if label.id < 0: # cityscapes data format
184
- continue
185
-
186
- # Cityscapes's raw annotations uses integer coordinates
187
- # Therefore +0.5 here
188
- poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
189
- # CityscapesScript uses PIL.ImageDraw.polygon to rasterize
190
- # polygons for evaluation. This function operates in integer space
191
- # and draws each pixel whose center falls into the polygon.
192
- # Therefore it draws a polygon which is 0.5 "fatter" in expectation.
193
- # We therefore dilate the input polygon by 0.5 as our input.
194
- poly = Polygon(poly_coord).buffer(0.5, resolution=4)
195
-
196
- if not label.hasInstances or label.ignoreInEval:
197
- # even if we won't store the polygon it still contributes to overlaps resolution
198
- polygons_union = polygons_union.union(poly)
199
- continue
200
-
201
- # Take non-overlapping part of the polygon
202
- poly_wo_overlaps = poly.difference(polygons_union)
203
- if poly_wo_overlaps.is_empty:
204
- continue
205
- polygons_union = polygons_union.union(poly)
206
-
207
- anno = {}
208
- anno["iscrowd"] = label_name.endswith("group")
209
- anno["category_id"] = label.id
210
-
211
- if isinstance(poly_wo_overlaps, Polygon):
212
- poly_list = [poly_wo_overlaps]
213
- elif isinstance(poly_wo_overlaps, MultiPolygon):
214
- poly_list = poly_wo_overlaps.geoms
215
- else:
216
- raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
217
-
218
- poly_coord = []
219
- for poly_el in poly_list:
220
- # COCO API can work only with exterior boundaries now, hence we store only them.
221
- # TODO: store both exterior and interior boundaries once other parts of the
222
- # codebase support holes in polygons.
223
- poly_coord.append(list(chain(*poly_el.exterior.coords)))
224
- anno["segmentation"] = poly_coord
225
- (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
226
-
227
- anno["bbox"] = (xmin, ymin, xmax, ymax)
228
- anno["bbox_mode"] = BoxMode.XYXY_ABS
229
-
230
- annos.append(anno)
231
- else:
232
- # See also the official annotation parsing scripts at
233
- # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
234
- with PathManager.open(instance_id_file, "rb") as f:
235
- inst_image = np.asarray(Image.open(f), order="F")
236
- # ids < 24 are stuff labels (filtering them first is about 5% faster)
237
- flattened_ids = np.unique(inst_image[inst_image >= 24])
238
-
239
- ret = {
240
- "file_name": image_file,
241
- "image_id": os.path.basename(image_file),
242
- "height": inst_image.shape[0],
243
- "width": inst_image.shape[1],
244
- }
245
-
246
- for instance_id in flattened_ids:
247
- # For non-crowd annotations, instance_id // 1000 is the label_id
248
- # Crowd annotations have <1000 instance ids
249
- label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
250
- label = id2label[label_id]
251
- if not label.hasInstances or label.ignoreInEval:
252
- continue
253
-
254
- anno = {}
255
- anno["iscrowd"] = instance_id < 1000
256
- anno["category_id"] = label.id
257
-
258
- mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
259
-
260
- inds = np.nonzero(mask)
261
- ymin, ymax = inds[0].min(), inds[0].max()
262
- xmin, xmax = inds[1].min(), inds[1].max()
263
- anno["bbox"] = (xmin, ymin, xmax, ymax)
264
- if xmax <= xmin or ymax <= ymin:
265
- continue
266
- anno["bbox_mode"] = BoxMode.XYXY_ABS
267
- if to_polygons:
268
- # This conversion comes from D4809743 and D5171122,
269
- # when Mask-RCNN was first developed.
270
- contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
271
- -2
272
- ]
273
- polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
274
- # opencv's can produce invalid polygons
275
- if len(polygons) == 0:
276
- continue
277
- anno["segmentation"] = polygons
278
- else:
279
- anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
280
- annos.append(anno)
281
- ret["annotations"] = annos
282
- return ret
283
-
284
-
285
- if __name__ == "__main__":
286
- """
287
- Test the cityscapes dataset loader.
288
-
289
- Usage:
290
- python -m detectron2.data.datasets.cityscapes \
291
- cityscapes/leftImg8bit/train cityscapes/gtFine/train
292
- """
293
- import argparse
294
-
295
- parser = argparse.ArgumentParser()
296
- parser.add_argument("image_dir")
297
- parser.add_argument("gt_dir")
298
- parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
299
- args = parser.parse_args()
300
- from detectron2.data.catalog import Metadata
301
- from detectron2.utils.visualizer import Visualizer
302
- from cityscapesscripts.helpers.labels import labels
303
-
304
- logger = setup_logger(name=__name__)
305
-
306
- dirname = "cityscapes-data-vis"
307
- os.makedirs(dirname, exist_ok=True)
308
-
309
- if args.type == "instance":
310
- dicts = load_cityscapes_instances(
311
- args.image_dir, args.gt_dir, from_json=True, to_polygons=True
312
- )
313
- logger.info("Done loading {} samples.".format(len(dicts)))
314
-
315
- thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]
316
- meta = Metadata().set(thing_classes=thing_classes)
317
-
318
- else:
319
- dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
320
- logger.info("Done loading {} samples.".format(len(dicts)))
321
-
322
- stuff_names = [k.name for k in labels if k.trainId != 255]
323
- stuff_colors = [k.color for k in labels if k.trainId != 255]
324
- meta = Metadata().set(stuff_names=stuff_names, stuff_colors=stuff_colors)
325
-
326
- for d in dicts:
327
- img = np.array(Image.open(d["file_name"]))
328
- visualizer = Visualizer(img, metadata=meta)
329
- vis = visualizer.draw_dataset_dict(d)
330
- # cv2.imshow("a", vis.get_image()[:, :, ::-1])
331
- # cv2.waitKey()
332
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
333
- vis.save(fpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/count.h DELETED
@@ -1,235 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file count.h
19
- * \brief Counting elements in a range
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
- #include <thrust/iterator/iterator_traits.h>
27
-
28
- namespace thrust
29
- {
30
-
31
-
32
- /*! \addtogroup algorithms
33
- */
34
-
35
- /*! \addtogroup reductions
36
- * \ingroup algorithms
37
- * \{
38
- */
39
-
40
- /*! \addtogroup counting
41
- * \ingroup reductions
42
- * \{
43
- */
44
-
45
-
46
- /*! \p count finds the number of elements in <tt>[first,last)</tt> that are equal
47
- * to \p value. More precisely, \p count returns the number of iterators \c i in
48
- * <tt>[first, last)</tt> such that <tt>*i == value</tt>.
49
- *
50
- * The algorithm's execution is parallelized as determined by \p exec.
51
- *
52
- * \param exec The execution policy to use for parallelization.
53
- * \param first The beginning of the sequence.
54
- * \param last The end of the sequence.
55
- * \param value The value to be counted.
56
- * \return The number of elements equal to \p value.
57
- *
58
- * \tparam DerivedPolicy The name of the derived execution policy.
59
- * \tparam InputIterator must be a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a> and \c InputIterator's \c value_type must be a model of must be a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
60
- * \tparam EqualityComparable must be a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a> and can be compared for equality with \c InputIterator's \c value_type
61
- *
62
- * The following code snippet demonstrates how to use \p count to
63
- * count the number of instances in a range of a value of interest using the \p thrust::device execution policy:
64
- *
65
- * \code
66
- * #include <thrust/count.h>
67
- * #include <thrust/device_vector.h>
68
- * #include <thrust/execution_policy.h>
69
- * ...
70
- * // put 3 1s in a device_vector
71
- * thrust::device_vector<int> vec(5,0);
72
- * vec[1] = 1;
73
- * vec[3] = 1;
74
- * vec[4] = 1;
75
- *
76
- * // count the 1s
77
- * int result = thrust::count(thrust::device, vec.begin(), vec.end(), 1);
78
- * // result == 3
79
- * \endcode
80
- *
81
- * \see http://www.sgi.com/tech/stl/count.html
82
- */
83
- template<typename DerivedPolicy, typename InputIterator, typename EqualityComparable>
84
- __host__ __device__
85
- typename thrust::iterator_traits<InputIterator>::difference_type
86
- count(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, const EqualityComparable& value);
87
-
88
-
89
-
90
- /*! \p count finds the number of elements in <tt>[first,last)</tt> that are equal
91
- * to \p value. More precisely, \p count returns the number of iterators \c i in
92
- * <tt>[first, last)</tt> such that <tt>*i == value</tt>.
93
- *
94
- * \param first The beginning of the sequence.
95
- * \param last The end of the sequence.
96
- * \param value The value to be counted.
97
- * \return The number of elements equal to \p value.
98
- *
99
- * \tparam InputIterator must be a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a> and \c InputIterator's \c value_type must be a model of must be a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
100
- * \tparam EqualityComparable must be a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a> and can be compared for equality with \c InputIterator's \c value_type
101
- *
102
- * The following code snippet demonstrates how to use \p count to
103
- * count the number of instances in a range of a value of interest.
104
- * \code
105
- * #include <thrust/count.h>
106
- * #include <thrust/device_vector.h>
107
- * ...
108
- * // put 3 1s in a device_vector
109
- * thrust::device_vector<int> vec(5,0);
110
- * vec[1] = 1;
111
- * vec[3] = 1;
112
- * vec[4] = 1;
113
- *
114
- * // count the 1s
115
- * int result = thrust::count(vec.begin(), vec.end(), 1);
116
- * // result == 3
117
- * \endcode
118
- *
119
- * \see http://www.sgi.com/tech/stl/count.html
120
- */
121
- template <typename InputIterator, typename EqualityComparable>
122
- typename thrust::iterator_traits<InputIterator>::difference_type
123
- count(InputIterator first, InputIterator last, const EqualityComparable& value);
124
-
125
-
126
- /*! \p count_if finds the number of elements in <tt>[first,last)</tt> for which
127
- * a predicate is \c true. More precisely, \p count_if returns the number of iterators
128
- * \c i in <tt>[first, last)</tt> such that <tt>pred(*i) == true</tt>.
129
- *
130
- * The algorithm's execution is parallelized as determined by \p exec.
131
- *
132
- * \param exec The execution policy to use for parallelization.
133
- * \param first The beginning of the sequence.
134
- * \param last The end of the sequence.
135
- * \param pred The predicate.
136
- * \return The number of elements where \p pred is \c true.
137
- *
138
- * \tparam DerivedPolicy The name of the derived execution policy.
139
- * \tparam InputIterator must be a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a> and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type.
140
- * \tparam Predicate must be a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
141
- *
142
- * The following code snippet demonstrates how to use \p count to
143
- * count the number of odd numbers in a range using the \p thrust::device execution policy:
144
- *
145
- * \code
146
- * #include <thrust/count.h>
147
- * #include <thrust/device_vector.h>
148
- * #include <thrust/execution_policy.h>
149
- * ...
150
- * struct is_odd
151
- * {
152
- * __host__ __device__
153
- * bool operator()(int &x)
154
- * {
155
- * return x & 1;
156
- * }
157
- * };
158
- * ...
159
- * // fill a device_vector with even & odd numbers
160
- * thrust::device_vector<int> vec(5);
161
- * vec[0] = 0;
162
- * vec[1] = 1;
163
- * vec[2] = 2;
164
- * vec[3] = 3;
165
- * vec[4] = 4;
166
- *
167
- * // count the odd elements in vec
168
- * int result = thrust::count_if(thrust::device, vec.begin(), vec.end(), is_odd());
169
- * // result == 2
170
- * \endcode
171
- *
172
- * \see http://www.sgi.com/tech/stl/count.html
173
- */
174
- template<typename DerivedPolicy, typename InputIterator, typename Predicate>
175
- __host__ __device__
176
- typename thrust::iterator_traits<InputIterator>::difference_type
177
- count_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, Predicate pred);
178
-
179
-
180
- /*! \p count_if finds the number of elements in <tt>[first,last)</tt> for which
181
- * a predicate is \c true. More precisely, \p count_if returns the number of iterators
182
- * \c i in <tt>[first, last)</tt> such that <tt>pred(*i) == true</tt>.
183
- *
184
- * \param first The beginning of the sequence.
185
- * \param last The end of the sequence.
186
- * \param pred The predicate.
187
- * \return The number of elements where \p pred is \c true.
188
- *
189
- * \tparam InputIterator must be a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a> and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type.
190
- * \tparam Predicate must be a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
191
- *
192
- * The following code snippet demonstrates how to use \p count to
193
- * count the number of odd numbers in a range.
194
- * \code
195
- * #include <thrust/count.h>
196
- * #include <thrust/device_vector.h>
197
- * ...
198
- * struct is_odd
199
- * {
200
- * __host__ __device__
201
- * bool operator()(int &x)
202
- * {
203
- * return x & 1;
204
- * }
205
- * };
206
- * ...
207
- * // fill a device_vector with even & odd numbers
208
- * thrust::device_vector<int> vec(5);
209
- * vec[0] = 0;
210
- * vec[1] = 1;
211
- * vec[2] = 2;
212
- * vec[3] = 3;
213
- * vec[4] = 4;
214
- *
215
- * // count the odd elements in vec
216
- * int result = thrust::count_if(vec.begin(), vec.end(), is_odd());
217
- * // result == 2
218
- * \endcode
219
- *
220
- * \see http://www.sgi.com/tech/stl/count.html
221
- */
222
- template <typename InputIterator, typename Predicate>
223
- typename thrust::iterator_traits<InputIterator>::difference_type
224
- count_if(InputIterator first, InputIterator last, Predicate pred);
225
-
226
-
227
- /*! \} // end counting
228
- * \} // end reductions
229
- */
230
-
231
-
232
- } // end thrust
233
-
234
- #include <thrust/detail/count.inl>
235
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/random/uniform_real_distribution.h DELETED
@@ -1,274 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file uniform_real_distribution.h
19
- * \brief A uniform distribution of real-valued numbers
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/pair.h>
26
- #include <thrust/random/detail/random_core_access.h>
27
- #include <iostream>
28
-
29
- namespace thrust
30
- {
31
-
32
- namespace random
33
- {
34
-
35
-
36
- /*! \addtogroup random_number_distributions
37
- * \{
38
- */
39
-
40
- /*! \class uniform_real_distribution
41
- * \brief A \p uniform_real_distribution random number distribution produces floating point
42
- * uniform random numbers from a half-open interval.
43
- *
44
- * \tparam RealType The type of floating point number to produce.
45
- *
46
- * The following code snippet demonstrates examples of using a \p uniform_real_distribution with a
47
- * random number engine to produce random integers drawn from a given range:
48
- *
49
- * \code
50
- * #include <thrust/random/linear_congruential_engine.h>
51
- * #include <thrust/random/uniform_real_distribution.h>
52
- *
53
- * int main(void)
54
- * {
55
- * // create a minstd_rand object to act as our source of randomness
56
- * thrust::minstd_rand rng;
57
- *
58
- * // create a uniform_real_distribution to produce floats from [-7,13)
59
- * thrust::uniform_real_distribution<float> dist(-7,13);
60
- *
61
- * // write a random number from the range [-7,13) to standard output
62
- * std::cout << dist(rng) << std::endl;
63
- *
64
- * // write the range of the distribution, just in case we forgot
65
- * std::cout << dist.min() << std::endl;
66
- *
67
- * // -7.0 is printed
68
- *
69
- * std::cout << dist.max() << std::endl;
70
- *
71
- * // 13.0 is printed
72
- *
73
- * // write the parameters of the distribution (which happen to be the bounds) to standard output
74
- * std::cout << dist.a() << std::endl;
75
- *
76
- * // -7.0 is printed
77
- *
78
- * std::cout << dist.b() << std::endl;
79
- *
80
- * // 13.0 is printed
81
- *
82
- * return 0;
83
- * }
84
- * \endcode
85
- */
86
- template<typename RealType = double>
87
- class uniform_real_distribution
88
- {
89
- public:
90
- // types
91
-
92
- /*! \typedef result_type
93
- * \brief The type of the floating point number produced by this \p uniform_real_distribution.
94
- */
95
- typedef RealType result_type;
96
-
97
- /*! \typedef param_type
98
- * \brief The type of the object encapsulating this \p uniform_real_distribution's parameters.
99
- */
100
- typedef thrust::pair<RealType,RealType> param_type;
101
-
102
- // constructors and reset functions
103
-
104
- /*! This constructor creates a new \p uniform_real_distribution from two values defining the
105
- * half-open interval of the distribution.
106
- *
107
- * \param a The smallest floating point number to potentially produce. Defaults to \c 0.0.
108
- * \param b The smallest number larger than the largest floating point number to potentially produce. Defaults to \c 1.0.
109
- */
110
- __host__ __device__
111
- explicit uniform_real_distribution(RealType a = 0.0, RealType b = 1.0);
112
-
113
- /*! This constructor creates a new \p uniform_real_distribution from a \p param_type object
114
- * encapsulating the range of the distribution.
115
- *
116
- * \param parm A \p param_type object encapsulating the parameters (i.e., the range) of the distribution.
117
- */
118
- __host__ __device__
119
- explicit uniform_real_distribution(const param_type &parm);
120
-
121
- /*! This does nothing. It is included to conform to the requirements of the RandomDistribution concept.
122
- */
123
- __host__ __device__
124
- void reset(void);
125
-
126
- // generating functions
127
-
128
- /*! This method produces a new uniform random integer drawn from this \p uniform_real_distribution's
129
- * range using a \p UniformRandomNumberGenerator as a source of randomness.
130
- *
131
- * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness.
132
- */
133
- template<typename UniformRandomNumberGenerator>
134
- __host__ __device__
135
- result_type operator()(UniformRandomNumberGenerator &urng);
136
-
137
- /*! This method produces a new uniform random integer as if by creating a new \p uniform_real_distribution
138
- * from the given \p param_type object, and calling its <tt>operator()</tt> method with the given
139
- * \p UniformRandomNumberGenerator as a source of randomness.
140
- *
141
- * \param urng The \p UniformRandomNumberGenerator to use as a source of randomness.
142
- * \param parm A \p param_type object encapsulating the parameters of the \p uniform_real_distribution
143
- * to draw from.
144
- */
145
- template<typename UniformRandomNumberGenerator>
146
- __host__ __device__
147
- result_type operator()(UniformRandomNumberGenerator &urng, const param_type &parm);
148
-
149
- // property functions
150
-
151
- /*! This method returns the value of the parameter with which this \p uniform_real_distribution
152
- * was constructed.
153
- *
154
- * \return The lower bound of this \p uniform_real_distribution's half-open interval.
155
- */
156
- __host__ __device__
157
- result_type a(void) const;
158
-
159
- /*! This method returns the value of the parameter with which this \p uniform_real_distribution
160
- * was constructed.
161
- *
162
- * \return The upper bound of this \p uniform_real_distribution's half-open interval.
163
- */
164
- __host__ __device__
165
- result_type b(void) const;
166
-
167
- /*! This method returns a \p param_type object encapsulating the parameters with which this
168
- * \p uniform_real_distribution was constructed.
169
- *
170
- * \return A \p param_type object enapsulating the half-open interval of this \p uniform_real_distribution.
171
- */
172
- __host__ __device__
173
- param_type param(void) const;
174
-
175
- /*! This method changes the parameters of this \p uniform_real_distribution using the values encapsulated
176
- * in a given \p param_type object.
177
- *
178
- * \param parm A \p param_type object encapsulating the new half-open interval of this \p uniform_real_distribution.
179
- */
180
- __host__ __device__
181
- void param(const param_type &parm);
182
-
183
- /*! This method returns the smallest floating point number this \p uniform_real_distribution can potentially produce.
184
- *
185
- * \return The lower bound of this \p uniform_real_distribution's half-open interval.
186
- */
187
- __host__ __device__
188
- result_type min THRUST_PREVENT_MACRO_SUBSTITUTION (void) const;
189
-
190
- /*! This method returns the smallest number larger than largest floating point number this \p uniform_real_distribution can potentially produce.
191
- *
192
- * \return The upper bound of this \p uniform_real_distribution's half-open interval.
193
- */
194
- __host__ __device__
195
- result_type max THRUST_PREVENT_MACRO_SUBSTITUTION (void) const;
196
-
197
- /*! \cond
198
- */
199
- private:
200
- param_type m_param;
201
-
202
- friend struct thrust::random::detail::random_core_access;
203
-
204
- __host__ __device__
205
- bool equal(const uniform_real_distribution &rhs) const;
206
-
207
- template<typename CharT, typename Traits>
208
- std::basic_ostream<CharT,Traits>& stream_out(std::basic_ostream<CharT,Traits> &os) const;
209
-
210
- template<typename CharT, typename Traits>
211
- std::basic_istream<CharT,Traits>& stream_in(std::basic_istream<CharT,Traits> &is);
212
- /*! \endcond
213
- */
214
- }; // end uniform_real_distribution
215
-
216
-
217
- /*! This function checks two \p uniform_real_distributions for equality.
218
- * \param lhs The first \p uniform_real_distribution to test.
219
- * \param rhs The second \p uniform_real_distribution to test.
220
- * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise.
221
- */
222
- template<typename RealType>
223
- __host__ __device__
224
- bool operator==(const uniform_real_distribution<RealType> &lhs,
225
- const uniform_real_distribution<RealType> &rhs);
226
-
227
-
228
- /*! This function checks two \p uniform_real_distributions for inequality.
229
- * \param lhs The first \p uniform_real_distribution to test.
230
- * \param rhs The second \p uniform_real_distribution to test.
231
- * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise.
232
- */
233
- template<typename RealType>
234
- __host__ __device__
235
- bool operator!=(const uniform_real_distribution<RealType> &lhs,
236
- const uniform_real_distribution<RealType> &rhs);
237
-
238
-
239
- /*! This function streams a uniform_real_distribution to a \p std::basic_ostream.
240
- * \param os The \p basic_ostream to stream out to.
241
- * \param d The \p uniform_real_distribution to stream out.
242
- * \return \p os
243
- */
244
- template<typename RealType,
245
- typename CharT, typename Traits>
246
- std::basic_ostream<CharT,Traits>&
247
- operator<<(std::basic_ostream<CharT,Traits> &os,
248
- const uniform_real_distribution<RealType> &d);
249
-
250
-
251
- /*! This function streams a uniform_real_distribution in from a std::basic_istream.
252
- * \param is The \p basic_istream to stream from.
253
- * \param d The \p uniform_real_distribution to stream in.
254
- * \return \p is
255
- */
256
- template<typename RealType,
257
- typename CharT, typename Traits>
258
- std::basic_istream<CharT,Traits>&
259
- operator>>(std::basic_istream<CharT,Traits> &is,
260
- uniform_real_distribution<RealType> &d);
261
-
262
-
263
- /*! \} // end random_number_distributions
264
- */
265
-
266
-
267
- } // end random
268
-
269
- using random::uniform_real_distribution;
270
-
271
- } // end thrust
272
-
273
- #include <thrust/random/detail/uniform_real_distribution.inl>
274
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/drawings-to-human/static/_app/immutable/pages/index.svelte-b5d75a5f.js DELETED
@@ -1,19 +0,0 @@
1
- import{S as ve,i as me,s as be,e as k,k as z,c as x,a as T,m as R,d as m,b as s,K as kt,g as J,J as l,t as A,h as N,L as q,E as X,M as Qe,N as F,O as Q,P as De,Q as Be,j as Xe,R as Fe,T as Ye,U as Mt,V as _a,W as xt,w as Me,x as Pe,y as Ae,q as Ne,o as ze,B as Re,v as ya}from"../chunks/index-bcf2726a.js";import{w as se,b as fa}from"../chunks/paths-d3bcbd10.js";const Je=[{color:[0,0,0],label:"background"},{color:[255,140,0],label:"bag"},{color:[255,255,0],label:"belt"},{color:[255,250,205],label:"dress"},{color:[130,165,180],label:"earrings"},{color:[0,100,0],label:"eyeglass"},{color:[16,78,139],label:"face"},{color:[245,222,179],label:"footwear"},{color:[213,140,88],label:"gloves"},{color:[255,0,0],label:"hair"},{color:[127,255,212],label:"headwear"},{color:[70,130,180],label:"leggings"},{color:[90,140,90],label:"necklace"},{color:[50,205,50],label:"neckwear"},{color:[220,220,220],label:"outer"},{color:[211,211,211],label:"pants"},{color:[50,205,174],label:"ring"},{color:[185,210,205],label:"rompers"},{color:[144,238,144],label:"skin"},{color:[250,235,215],label:"skirt"},{color:[160,140,88],label:"socks"},{color:[225,141,151],label:"tie"},{color:[255,250,250],label:"top"},{color:[50,155,250],label:"wrist wearing"}],Zt="https://radames-text2human-api.hf.space",St=["/samples/WOMEN-Skirts-id_00004406-02_7_additional_segm.png","/samples/MEN-Pants-id_00002565-02_1_front_segm.png","/samples/MEN-Pants-id_00005213-02_4_full_segm.png","/samples/WOMEN-Blouses_Shirts-id_00002356-02_4_full_segm.png","/samples/WOMEN-Blouses_Shirts-id_00004090-03_7_additional_segm.png","/samples/WOMEN-Cardigans-id_00000853-01_2_side_segm.png","/samples/WOMEN-Cardigans-id_00000899-02_1_front_segm.png","/samples/WOMEN-Cardigans-id_00006462-02_7_additional_segm.png","/samples/WOMEN-Dresses-id_00000021-05_1_front_segm.png","/samples/WOMEN-Dresses-id_00002430-04_1_front_segm.png","/samples/WOMEN-Dresses-id_00002966-01_7_additional_segm.png","/samples/WOMEN-Dresses-id_00007332-01_3_back_segm.png","/samples/WOMEN-Graphic_Tees-id_00007242-01_4_full_segm.png","/samples/WOMEN-Jackets_Coats-id_00005263-06_1_front_segm.png","/samples/WOMEN-Jackets_Coats-id_00006296-05_7_additional_segm.png","/samples/WOMEN-Rompers_Jumpsuits-id_00004575-02_1_front_segm.png","/samples/WOMEN-Sweaters-id_00004667-01_4_full_segm.png","/samples/WOMEN-Tees_Tanks-id_00001620-02_4_full_segm.png","/samples/WOMEN-Tees_Tanks-id_00005288-01_2_side_segm.png","/samples/WOMEN-Tees_Tanks-id_00006566-04_4_full_segm.png"],Kt=["upper clothing texture","lower clothing texture","outer clothing texture"],ea=["pure color","stripe/spline","plaid/lattice","floral","denim"];function ga(){return BigInt(0xb7dd73e137d20800&((1<<63)-1)*Math.random())}const Ue=se(new Map),Tt=se(),It=se(),Ot=se(),Ve=se(),Ct=se({texture:"",seed:ga(),steps:10}),je=se(!1),$e=se(!1);function ta(r,e,t){const a=r.slice();return a[3]=e[t],a[5]=t,a}function aa(r){let e,t,a,n,o,i,p,d,_,h,w,E;return{c(){e=k("div"),t=k("input"),i=z(),p=k("label"),d=k("img"),E=z(),this.h()},l(f){e=x(f,"DIV",{class:!0});var I=T(e);t=x(I,"INPUT",{type:!0,name:!0,id:!0,class:!0}),i=R(I),p=x(I,"LABEL",{for:!0,class:!0});var M=T(p);d=x(M,"IMG",{src:!0,alt:!0,class:!0}),M.forEach(m),E=R(I),I.forEach(m),this.h()},h(){s(t,"type","radio"),s(t,"name","samples"),s(t,"id",a="sample-"+r[5]),t.value=n=r[5],t.disabled=o=r[0]===!0,s(t,"class","svelte-1gwcbp"),kt(d.src,_=fa+r[3])||s(d,"src",_),s(d,"alt",h=r[3]),s(d,"class","svelte-1gwcbp"),s(p,"for",w="sample-"+r[5]),s(p,"class","svelte-1gwcbp"),s(e,"class","snap-always snap-start")},m(f,I){J(f,e,I),l(e,t),l(e,i),l(e,p),l(p,d),l(e,E)},p(f,I){I&1&&o!==(o=f[0]===!0)&&(t.disabled=o)},d(f){f&&m(e)}}}function wa(r){let e,t,a,n,o,i,p,d,_=St,h=[];for(let w=0;w<_.length;w+=1)h[w]=aa(ta(r,_,w));return{c(){e=k("div"),t=k("h4"),a=A("Select a Template"),n=z(),o=k("form"),i=k("div");for(let w=0;w<h.length;w+=1)h[w].c();this.h()},l(w){e=x(w,"DIV",{});var E=T(e);t=x(E,"H4",{class:!0});var f=T(t);a=N(f,"Select a Template"),f.forEach(m),n=R(E),o=x(E,"FORM",{class:!0});var I=T(o);i=x(I,"DIV",{class:!0});var M=T(i);for(let c=0;c<h.length;c+=1)h[c].l(M);M.forEach(m),I.forEach(m),E.forEach(m),this.h()},h(){s(t,"class","font-bold mt-6 mb-2 my-6 leading-6"),s(i,"class","samples svelte-1gwcbp"),s(o,"class","svelte-1gwcbp")},m(w,E){J(w,e,E),l(e,t),l(t,a),l(e,n),l(e,o),l(o,i);for(let f=0;f<h.length;f+=1)h[f].m(i,null);p||(d=q(o,"input",r[1]),p=!0)},p(w,[E]){if(E&1){_=St;let f;for(f=0;f<_.length;f+=1){const I=ta(w,_,f);h[f]?h[f].p(I,E):(h[f]=aa(I),h[f].c(),h[f].m(i,null))}for(;f<h.length;f+=1)h[f].d(1);h.length=_.length}},i:X,o:X,d(w){w&&m(e),Qe(h,w),p=!1,d()}}}async function Ea(r){return new Promise((e,t)=>{const a=new Image;a.onload=()=>{URL.revokeObjectURL(a.src),e(a)},a.onerror=n=>{t(n)},a.src=URL.createObjectURL(r)})}function ka(r,e,t){let a,n;return F(r,Ot,i=>t(2,a=i)),F(r,je,i=>t(0,n=i)),[n,async i=>{i.preventDefault();const p=St[parseInt(i.target.value)];if(p){const d=await fetch(fa+p).then(h=>h.blob()),_=await Ea(d);Q(Ot,a=_,a)}}]}class xa extends ve{constructor(e){super(),me(this,e,ka,wa,be,{})}}function ra(r,e,t){const a=r.slice();return a[2]=e[t],a[7]=t,a}function la(r){let e,t,a,n,o,i,p,d,_,h,w,E,f=r[2].label+"",I,M,c;return{c(){e=k("div"),t=k("input"),i=z(),p=k("label"),d=De("svg"),_=De("rect"),w=z(),E=k("span"),I=A(f),c=z(),this.h()},l(v){e=x(v,"DIV",{class:!0});var b=T(e);t=x(b,"INPUT",{name:!0,type:!0,id:!0,class:!0}),i=R(b),p=x(b,"LABEL",{for:!0,class:!0});var g=T(p);d=Be(g,"svg",{width:!0,height:!0,viewBox:!0,class:!0});var u=T(d);_=Be(u,"rect",{x:!0,y:!0,width:!0,height:!0,fill:!0}),T(_).forEach(m),u.forEach(m),w=R(g),E=x(g,"SPAN",{class:!0});var y=T(E);I=N(y,f),y.forEach(m),g.forEach(m),c=R(b),b.forEach(m),this.h()},h(){s(t,"name","color"),t.checked=a=r[7]==va,s(t,"type","radio"),s(t,"id",n="color-"+r[7]),t.value=o=r[7],s(t,"class","svelte-1oy4poo"),s(_,"x","0"),s(_,"y","0"),s(_,"width","20"),s(_,"height","20"),s(_,"fill",h="rgb("+r[2].color.join(",")+")"),s(d,"width","20"),s(d,"height","20"),s(d,"viewBox","0 0 20 20"),s(d,"class","svelte-1oy4poo"),s(E,"class","svelte-1oy4poo"),s(p,"for",M="color-"+r[7]),s(p,"class","svelte-1oy4poo"),s(e,"class","snap-always snap-start")},m(v,b){J(v,e,b),l(e,t),l(e,i),l(e,p),l(p,d),l(d,_),l(p,w),l(p,E),l(E,I),l(e,c)},p:X,d(v){v&&m(e)}}}function Sa(r){let e,t,a,n,o,i,p,d,_,h,w,E,f,I=r[0].size+"",M,c,v,b=Je,g=[];for(let u=0;u<b.length;u+=1)g[u]=la(ra(r,b,u));return{c(){e=k("form"),t=k("h4"),a=A("Set the Brush Type"),n=z(),o=k("div");for(let u=0;u<g.length;u+=1)g[u].c();i=z(),p=k("h4"),d=A("Set the Brush Size"),_=z(),h=k("div"),w=k("input"),E=z(),f=k("label"),M=A(I),this.h()},l(u){e=x(u,"FORM",{});var y=T(e);t=x(y,"H4",{class:!0});var O=T(t);a=N(O,"Set the Brush Type"),O.forEach(m),n=R(y),o=x(y,"DIV",{class:!0,name:!0});var P=T(o);for(let L=0;L<g.length;L+=1)g[L].l(P);P.forEach(m),i=R(y),p=x(y,"H4",{class:!0});var S=T(p);d=N(S,"Set the Brush Size"),S.forEach(m),_=R(y),h=x(y,"DIV",{class:!0});var C=T(h);w=x(C,"INPUT",{min:!0,max:!0,step:!0,name:!0,type:!0}),E=R(C),f=x(C,"LABEL",{class:!0,for:!0});var B=T(f);M=N(B,I),B.forEach(m),C.forEach(m),y.forEach(m),this.h()},h(){s(t,"class","font-bold mt-6 mb-2 leading-6 my-3"),s(o,"class","colors svelte-1oy4poo"),s(o,"name","colors"),s(p,"class","font-bold mt-6 mb-2 my-6 leading-6"),w.value="10",s(w,"min","1"),s(w,"max","50"),s(w,"step","1"),s(w,"name","brush"),s(w,"type","range"),s(f,"class","pl-2 svelte-1oy4poo"),s(f,"for","brush"),s(h,"class","brush svelte-1oy4poo")},m(u,y){J(u,e,y),l(e,t),l(t,a),l(e,n),l(e,o);for(let O=0;O<g.length;O+=1)g[O].m(o,null);l(e,i),l(e,p),l(p,d),l(e,_),l(e,h),l(h,w),l(h,E),l(h,f),l(f,M),c||(v=q(e,"input",r[1]),c=!0)},p(u,[y]){if(y&0){b=Je;let O;for(O=0;O<b.length;O+=1){const P=ra(u,b,O);g[O]?g[O].p(P,y):(g[O]=la(P),g[O].c(),g[O].m(o,null))}for(;O<g.length;O+=1)g[O].d(1);g.length=b.length}y&1&&I!==(I=u[0].size+"")&&Xe(M,I)},i:X,o:X,d(u){u&&m(e),Qe(g,u),c=!1,v()}}}const va=6;function Ta(r,e,t){let a;F(r,Ve,_=>t(0,a=_));const{color:n,label:o}=Je[va];let i=`rgb(${n.join(",")})`,p=40;return Q(Ve,a={color:i,size:p,label:o},a),[a,async _=>{const h=_.target;if(h.name==="color"){const w=parseInt(h.value),{color:E,label:f}=Je[w];i=`rgb(${E.join(",")})`,Q(Ve,a={color:i,size:p,label:f},a)}else h.name==="brush"&&(p=parseInt(h.value),Q(Ve,a={color:i,size:p,label:o},a))},n]}class Ia extends ve{constructor(e){super(),me(this,e,Ta,Sa,be,{})}}function oa(r,e,t){const a=r.slice();return a[10]=e[t],a[12]=t,a}function sa(r,e,t){const a=r.slice();return a[13]=e[t],a}function na(r){let e,t=r[13]+"",a,n,o;return{c(){e=k("option"),a=A(t),o=A("`"),this.h()},l(i){e=x(i,"OPTION",{});var p=T(e);a=N(p,t),p.forEach(m),o=N(i,"`"),this.h()},h(){e.__value=n=r[13],e.value=e.__value},m(i,p){J(i,e,p),l(e,a),J(i,o,p)},p:X,d(i){i&&m(e),i&&m(o)}}}function ia(r){let e,t,a=r[10]+"",n,o,i,p,d=ea,_=[];for(let h=0;h<d.length;h+=1)_[h]=na(sa(r,d,h));return{c(){e=k("select"),t=k("option"),n=A(a);for(let h=0;h<_.length;h+=1)_[h].c();this.h()},l(h){e=x(h,"SELECT",{name:!0,class:!0});var w=T(e);t=x(w,"OPTION",{});var E=T(t);n=N(E,a),E.forEach(m);for(let f=0;f<_.length;f+=1)_[f].l(w);w.forEach(m),this.h()},h(){t.disabled=!0,t.selected=!0,t.__value=o=r[10],t.value=t.__value,s(e,"name",i="texture"+r[12]),e.disabled=p=r[3]===!0,s(e,"class","svelte-uoay71")},m(h,w){J(h,e,w),l(e,t),l(t,n);for(let E=0;E<_.length;E+=1)_[E].m(e,null)},p(h,w){if(w&0){d=ea;let E;for(E=0;E<d.length;E+=1){const f=sa(h,d,E);_[E]?_[E].p(f,w):(_[E]=na(f),_[E].c(),_[E].m(e,null))}for(;E<_.length;E+=1)_[E].d(1);_.length=d.length}w&8&&p!==(p=h[3]===!0)&&(e.disabled=p)},d(h){h&&m(e),Qe(_,h)}}}function Oa(r){let e,t,a,n,o,i,p,d,_,h,w,E,f,I,M,c,v,b,g,u,y,O,P,S,C,B,L,U=Kt,V=[];for(let j=0;j<U.length;j+=1)V[j]=ia(oa(r,U,j));return{c(){e=k("form"),t=k("h4"),a=A("Texture Description"),n=z(),o=k("div");for(let j=0;j<V.length;j+=1)V[j].c();i=z(),p=k("h4"),d=A("Random Seed"),_=z(),h=k("input"),E=z(),f=k("button"),I=A("Random"),c=z(),v=k("h4"),b=A("Sample Steps"),g=z(),u=k("div"),y=k("input"),P=z(),S=k("label"),C=A(r[2]),this.h()},l(j){e=x(j,"FORM",{});var D=T(e);t=x(D,"H4",{class:!0});var H=T(t);a=N(H,"Texture Description"),H.forEach(m),n=R(D),o=x(D,"DIV",{class:!0});var K=T(o);for(let ne=0;ne<V.length;ne+=1)V[ne].l(K);K.forEach(m),i=R(D),p=x(D,"H4",{class:!0});var Z=T(p);d=N(Z,"Random Seed"),Z.forEach(m),_=R(D),h=x(D,"INPUT",{type:!0,name:!0,placeholder:!0,class:!0}),E=R(D),f=x(D,"BUTTON",{class:!0});var _e=T(f);I=N(_e,"Random"),_e.forEach(m),c=R(D),v=x(D,"H4",{class:!0});var ye=T(v);b=N(ye,"Sample Steps"),ye.forEach(m),g=R(D),u=x(D,"DIV",{class:!0});var $=T(u);y=x($,"INPUT",{type:!0,name:!0,min:!0,max:!0,step:!0,class:!0}),P=R($),S=x($,"LABEL",{class:!0,for:!0});var we=T(S);C=N(we,r[2]),we.forEach(m),$.forEach(m),D.forEach(m),this.h()},h(){s(t,"class","font-bold mt-6 mb-2 my-6 leading-6"),s(o,"class","sections svelte-uoay71"),s(p,"class","font-bold mt-6 mb-2 my-6 leading-6"),s(h,"type","Number"),s(h,"name","seed"),s(h,"placeholder","Integer Seed"),h.disabled=w=r[3]===!0,s(h,"class","svelte-uoay71"),f.disabled=M=r[3]===!0,s(f,"class","svelte-uoay71"),s(v,"class","font-bold mt-6 mb-2 my-6 leading-6"),s(y,"type","range"),s(y,"name","steps"),s(y,"min","10"),s(y,"max","300"),s(y,"step","1"),y.disabled=O=r[3]===!0,s(y,"class","svelte-uoay71"),s(S,"class","pl-2 svelte-uoay71"),s(S,"for","steps"),s(u,"class","flex")},m(j,D){J(j,e,D),l(e,t),l(t,a),l(e,n),l(e,o);for(let H=0;H<V.length;H+=1)V[H].m(o,null);l(e,i),l(e,p),l(p,d),l(e,_),l(e,h),Fe(h,r[1]),l(e,E),l(e,f),l(f,I),l(e,c),l(e,v),l(v,b),l(e,g),l(e,u),l(u,y),Fe(y,r[2]),l(u,P),l(u,S),l(S,C),r[8](e),B||(L=[q(h,"input",r[5]),q(f,"click",Ye(r[6])),q(y,"change",r[7]),q(y,"input",r[7]),q(e,"input",r[4])],B=!0)},p(j,[D]){if(D&8){U=Kt;let H;for(H=0;H<U.length;H+=1){const K=oa(j,U,H);V[H]?V[H].p(K,D):(V[H]=ia(K),V[H].c(),V[H].m(o,null))}for(;H<V.length;H+=1)V[H].d(1);V.length=U.length}D&8&&w!==(w=j[3]===!0)&&(h.disabled=w),D&2&&Fe(h,j[1]),D&8&&M!==(M=j[3]===!0)&&(f.disabled=M),D&8&&O!==(O=j[3]===!0)&&(y.disabled=O),D&4&&Fe(y,j[2]),D&4&&Xe(C,j[2])},i:X,o:X,d(j){j&&m(e),Qe(V,j),r[8](null),B=!1,Mt(L)}}}function Ca(r,e,t){let a,n;F(r,Ct,f=>t(9,a=f)),F(r,je,f=>t(3,n=f));function o(){const f=i.elements;Q(Ct,a={texture:`${f.texture0.value},${f.texture1.value},${f.texture2.value}`,seed:BigInt(f.seed.value),steps:parseInt(f.steps.value)},a)}let i,p=a.seed,d=a.steps;function _(){p=this.value,t(1,p)}const h=()=>{t(1,p=ga()),o()};function w(){d=_a(this.value),t(2,d)}function E(f){xt[f?"unshift":"push"](()=>{i=f,t(0,i)})}return[i,p,d,n,o,_,h,w,E]}class Ma extends ve{constructor(e){super(),me(this,e,Ca,Oa,be,{})}}let ma=(r=21)=>crypto.getRandomValues(new Uint8Array(r)).reduce((e,t)=>(t&=63,t<36?e+=t.toString(36):t<62?e+=(t-26).toString(36).toUpperCase():t>62?e+="-":e+="_",e),"");var Pa=typeof globalThis!="undefined"?globalThis:typeof window!="undefined"?window:typeof global!="undefined"?global:typeof self!="undefined"?self:{};function Aa(r){return r&&r.__esModule&&Object.prototype.hasOwnProperty.call(r,"default")?r.default:r}var ba={exports:{}};(function(r,e){(function(t,a){r.exports=a()})(typeof self!="undefined"?self:Pa,function(){return function(t){var a={};function n(o){if(a[o])return a[o].exports;var i=a[o]={i:o,l:!1,exports:{}};return t[o].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=a,n.d=function(o,i,p){n.o(o,i)||Object.defineProperty(o,i,{enumerable:!0,get:p})},n.r=function(o){typeof Symbol!="undefined"&&Symbol.toStringTag&&Object.defineProperty(o,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(o,"__esModule",{value:!0})},n.t=function(o,i){if(1&i&&(o=n(o)),8&i||4&i&&typeof o=="object"&&o&&o.__esModule)return o;var p=Object.create(null);if(n.r(p),Object.defineProperty(p,"default",{enumerable:!0,value:o}),2&i&&typeof o!="string")for(var d in o)n.d(p,d,function(_){return o[_]}.bind(null,d));return p},n.n=function(o){var i=o&&o.__esModule?function(){return o.default}:function(){return o};return n.d(i,"a",i),i},n.o=function(o,i){return Object.prototype.hasOwnProperty.call(o,i)},n.p="",n(n.s=0)}([function(t,a,n){function o(f,I){return function(M){if(Array.isArray(M))return M}(f)||function(M,c){if(Symbol.iterator in Object(M)||Object.prototype.toString.call(M)==="[object Arguments]"){var v=[],b=!0,g=!1,u=void 0;try{for(var y,O=M[Symbol.iterator]();!(b=(y=O.next()).done)&&(v.push(y.value),!c||v.length!==c);b=!0);}catch(P){g=!0,u=P}finally{try{b||O.return==null||O.return()}finally{if(g)throw u}}return v}}(f,I)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance")}()}function i(f){return function(I){if(Array.isArray(I)){for(var M=0,c=new Array(I.length);M<I.length;M++)c[M]=I[M];return c}}(f)||function(I){if(Symbol.iterator in Object(I)||Object.prototype.toString.call(I)==="[object Arguments]")return Array.from(I)}(f)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance")}()}function p(f,I){for(var M=0;M<I.length;M++){var c=I[M];c.enumerable=c.enumerable||!1,c.configurable=!0,"value"in c&&(c.writable=!0),Object.defineProperty(f,c.key,c)}}n.r(a);var d=/^#?[0-9A-Fa-f]{1,2}[0-9A-Fa-f]{1,2}[0-9A-Fa-f]{1,2}$/,_=/^rgb\((\s+)?[0-9]{1,3},(\s+)?[0-9]{1,3},(\s+)?[0-9]{1,3}(\s+)?\)$/,h=function(){function f(){(function(c,v){if(!(c instanceof v))throw new TypeError("Cannot call a class as a function")})(this,f),this.canvases={}}var I,M;return I=f,(M=[{key:"parseColor",value:function(c){var v=d.test(c),b=_.test(c);if(!v&&!b)throw new Error("Color is not correct format. #123123 or rgb(123, 123, 123) format required.");if(v){var g=c[0]==="#"?c.slice(1):c;return g=g.length===3?g.split("").reduce(function(S,C){return[].concat(i(S),[C,C])},[]).join(""):g,{r:parseInt(g.slice(0,2),16),g:parseInt(g.slice(2,4),16),b:parseInt(g.slice(4,6),16)}}if(b){var u=o(c.replace(/rgb|\s+|\(|\)/g,"").split(",").map(function(S){return parseInt(S)}),3),y=u[0],O=u[1],P=u[2];return{r:y=y>255?255:y,g:O=O>255?255:O,b:P=P>255?255:P}}}},{key:"make",value:function(c){var v=c.size,b=c.color;try{v*=window.devicePixelRatio;var g=this.parseColor(b),u=JSON.stringify(g);if(this.canvases[u]=this.canvases[u]||{},this.canvases[u][v]!=null)return this.canvases[u][v];var y=document.createElement("canvas");v+=v%2,y.width=v,y.height=v;for(var O=y.getContext("2d"),P=O.createImageData(v,v),S=0;S<P.data.length;S+=4)P.data[S]=255,P.data[S+1]=255,P.data[S+2]=255,P.data[S+3]=0;return this.plotCircle(2*v,4*v*(v/2),v/2,P,v,g),this.fillCircle(P,g),O.putImageData(P,0,0),this.canvases[u][v]=y,y}catch(C){console.error(C)}}},{key:"plotCircle",value:function(c,v,b,g,u,y){var O=-b,P=0,S=2-2*b;do{var C=c-4*(O+1)+(v+4*u*(P-1));g.data[C+0]=y.r,g.data[C+1]=y.g,g.data[C+2]=y.b,g.data[C+3]=255;var B=c-P*(4*u)+(v-4*(O+1));g.data[B+0]=y.r,g.data[B+1]=y.g,g.data[B+2]=y.b,g.data[B+3]=255;var L=c+4*O+(v-P*(4*u));g.data[L+0]=y.r,g.data[L+1]=y.g,g.data[L+2]=y.b,g.data[L+3]=255;var U=c+4*u*(P-1)+(v+4*O);g.data[U+0]=y.r,g.data[U+1]=y.g,g.data[U+2]=y.b,g.data[U+3]=255,(b=S)<=P&&(S+=2*++P+1),(b>O||S>P)&&(S+=2*++O+1)}while(O<0)}},{key:"fillCircle",value:function(c,v){for(var b=4*c.width,g=1;g<c.height-1;g+=1)for(var u=!1,y=!1,O=!1,P=0;P<b;P+=4){var S=b*g+P,C=c.data[S+3],B=C===255;B&&!u?u=!0:C===0&&u?y=!0:B&&u&&y&&(O=!0),u&&y&&!O&&(c.data[S]=v.r,c.data[S+1]=v.g,c.data[S+2]=v.b,c.data[S+3]=255)}}}])&&p(I.prototype,M),f}();function w(f,I){for(var M=0;M<I.length;M++){var c=I[M];c.enumerable=c.enumerable||!1,c.configurable=!0,"value"in c&&(c.writable=!0),Object.defineProperty(f,c.key,c)}}var E=function(){function f(c){(function(v,b){if(!(v instanceof b))throw new TypeError("Cannot call a class as a function")})(this,f),this.canvas=c,this.context=c.getContext("2d"),this.stampMaker=new h,this.configPixelRatio()}var I,M;return I=f,(M=[{key:"configPixelRatio",value:function(){var c=this.canvas,v=c.width,b=c.height;this.canvas.width=v*this.dpr,this.canvas.height=b*this.dpr,this.canvas.style.width="".concat(v,"px"),this.canvas.style.height="".concat(b,"px"),this.context.scale(this.dpr,this.dpr),this.context.imageSmoothingEnabled=!1}},{key:"exportAsPNG",value:function(c){var v=this;return new Promise(function(b){var g=document.createElement("canvas"),u=g.getContext("2d"),y=v.canvas,O=y.width,P=y.height,S=O/v.dpr,C=P/v.dpr;g.width=S,g.height=C,u.imageSmoothingEnabled=!1,u.drawImage(v.canvas,0,0,O,P,0,0,S,C),g.toBlob(function(B){B.lastModifedDate=new Date,B.name=c,b(B)})})}},{key:"distanceBetween",value:function(c,v){return Math.sqrt(Math.pow(v.x-c.x,2)+Math.pow(v.y-c.y,2))}},{key:"angleBetween",value:function(c,v){return Math.atan2(v.x-c.x,v.y-c.y)}},{key:"draw",value:function(c){var v=c.from,b=c.to,g=c.size,u=c.color;this.context.globalCompositeOperation="source-over",this.brush({from:v,to:b,size:g,color:u})}},{key:"erase",value:function(c){var v=c.from,b=c.to,g=c.size;this.context.globalCompositeOperation="destination-out",this.brush({from:v,to:b,size:g,color:"#000000"})}},{key:"brush",value:function(c){var v=this,b=c.from,g=c.to,u=c.size,y=c.color,O=(u-u%2)/2,P=this.stampMaker.make({size:u,color:y});if(b.x!==g.x||b.y!==g.y)for(var S=this.distanceBetween(b,g),C=this.angleBetween(b,g),B=function(j){var D=b.x+Math.sin(C)*j-O,H=b.y+Math.cos(C)*j-O;window.requestAnimationFrame(function(){v.context.drawImage(P,Math.round(D),Math.round(H),u,u)})},L=0;L<S;L+=1)B(L);else{var U=b.x-O,V=b.y-O;this.context.drawImage(P,Math.round(U),Math.round(V),u,u)}}},{key:"dpr",get:function(){return window.devicePixelRatio||1}}])&&w(I.prototype,M),f}();a.default=E}])})})(ba);var ca=Aa(ba.exports);function Na(r){let e,t;return{c(){e=De("svg"),t=De("path"),this.h()},l(a){e=Be(a,"svg",{xmlns:!0,width:!0,viewBox:!0,class:!0});var n=T(e);t=Be(n,"path",{fill:!0,stroke:!0,"stroke-width":!0,d:!0}),T(t).forEach(m),n.forEach(m),this.h()},h(){s(t,"fill","white"),s(t,"stroke","black"),s(t,"stroke-width","30"),s(t,"d","M480 256c0 123.4-100.5 223.9-223.9 223.9c-48.84 0-95.17-15.58-134.2-44.86c-14.12-10.59-16.97-30.66-6.375-44.81c10.59-14.12 30.62-16.94 44.81-6.375c27.84 20.91 61 31.94 95.88 31.94C344.3 415.8 416 344.1 416 256s-71.69-159.8-159.8-159.8c-37.46 0-73.09 13.49-101.3 36.64l45.12 45.14c17.01 17.02 4.955 46.1-19.1 46.1H35.17C24.58 224.1 16 215.5 16 204.9V59.04c0-24.04 29.07-36.08 46.07-19.07l47.6 47.63C149.9 52.71 201.5 32.11 256.1 32.11C379.5 32.11 480 132.6 480 256z"),s(e,"xmlns","http://www.w3.org/2000/svg"),s(e,"width","20"),s(e,"viewBox","0 0 512 512"),s(e,"class",r[0])},m(a,n){J(a,e,n),l(e,t)},p(a,[n]){n&1&&s(e,"class",a[0])},i:X,o:X,d(a){a&&m(e)}}}function za(r,e,t){let{classNames:a=""}=e;return r.$$set=n=>{"classNames"in n&&t(0,a=n.classNames)},[a]}class Ra extends ve{constructor(e){super(),me(this,e,za,Na,be,{classNames:0})}}function Da(r){var v;let e,t,a,n,o,i,p,d=((v=r[0])==null?void 0:v.label)+"",_,h,w,E,f,I,M,c;return E=new Ra({}),{c(){e=k("div"),t=k("div"),a=k("canvas"),n=z(),o=k("canvas"),i=z(),p=k("span"),_=A(d),h=z(),w=k("button"),Me(E.$$.fragment),this.h()},l(b){e=x(b,"DIV",{});var g=T(e);t=x(g,"DIV",{class:!0});var u=T(t);a=x(u,"CANVAS",{class:!0,width:!0,height:!0}),T(a).forEach(m),n=R(u),o=x(u,"CANVAS",{class:!0,width:!0,height:!0}),T(o).forEach(m),i=R(u),p=x(u,"SPAN",{class:!0});var y=T(p);_=N(y,d),y.forEach(m),h=R(u),w=x(u,"BUTTON",{class:!0});var O=T(w);Pe(E.$$.fragment,O),O.forEach(m),u.forEach(m),g.forEach(m),this.h()},h(){s(a,"class","canvas svelte-1k5plc8"),s(a,"width","256"),s(a,"height","512"),s(o,"class","brush svelte-1k5plc8"),s(o,"width","10"),s(o,"height","10"),s(p,"class","label svelte-1k5plc8"),s(w,"class","absolute bottom-0 left-0 p-3"),w.disabled=f=r[3].size<=0,s(t,"class","relative overflow-clip")},m(b,g){J(b,e,g),l(e,t),l(t,a),r[11](a),l(t,n),l(t,o),r[12](o),l(t,i),l(t,p),l(p,_),l(t,h),l(t,w),Ae(E,w,null),I=!0,M||(c=[q(a,"touchmove",ja),q(a,"pointerenter",Ba),q(a,"pointerup",r[4]),q(a,"pointerleave",r[4]),q(a,"pointercancel",r[4]),q(a,"pointerout",r[4]),q(a,"pointermove",r[6]),q(a,"pointerdown",r[5]),q(w,"click",Ye(r[13]))],M=!0)},p(b,[g]){var u;(!I||g&1)&&d!==(d=((u=b[0])==null?void 0:u.label)+"")&&Xe(_,d),(!I||g&8&&f!==(f=b[3].size<=0))&&(w.disabled=f)},i(b){I||(Ne(E.$$.fragment,b),I=!0)},o(b){ze(E.$$.fragment,b),I=!1},d(b){b&&m(e),r[11](null),r[12](null),Re(E),M=!1,Mt(c)}}}function Ba(){}function ua(r,e){const t=r.getBoundingClientRect();return{x:(e.clientX-t.left)*(r.width/t.width),y:(e.clientY-t.top)*(r.height/t.height)}}function pa(r){r.fillStyle="#000000",r.fillRect(0,0,r.canvas.width,r.canvas.height)}function Et(r,e){r.drawImage(e,0,0,r.canvas.width,r.canvas.height)}const ja=r=>r.preventDefault();function La(r,e,t){let a,n,o,i;F(r,Ue,S=>t(3,a=S)),F(r,Ot,S=>t(10,n=S)),F(r,Ve,S=>t(0,o=S)),F(r,It,S=>t(18,i=S));let p,d,_,h,w={x:0,y:0},E;ya(()=>{t(9,h=p.getContext("2d")),t(8,_=d.getContext("2d")),window.devicePixelRatio=1,E=new ca(p),t(1,p.style.height="unset",p),t(1,p.style.width="unset",p),Q(It,i=p,i),pa(h)});let f=!1,I;function M(){t(2,d.style.top=`${10+o.size/2}px`,d),t(2,d.style.left=`${10+o.size/2}px`,d),f=!1}function c(S){f=!0,w=ua(p,S),E.draw({from:w,to:w,size:o.size,color:o.color}),I=ma(),Ue.update(C=>(C.set(I,{brush:o,points:[{from:w,to:w}]}),C))}function v(S){const C=ua(p,S);t(2,d.style.top=`${S.offsetY}px`,d),t(2,d.style.left=`${S.offsetX}px`,d),f&&(E.draw({from:w,to:C,size:o.size,color:o.color}),Ue.update(B=>{const L=B.get(I);return L==null||L.points.push({from:w,to:C}),B}),w=C)}function b(S){const{size:C,color:B}=S;t(2,d.width=C,d),t(2,d.height=C,d),t(8,_.fillStyle=B,_),_.arc(C/2,C/2,C/2,0,2*Math.PI),_.fill()}function g(){if(a.size<=0)return;const S=Array.from(a.keys());Ue.update(C=>(C.delete(S[S.length-1]),C)),u(h)}function u(S){const C=document.createElement("canvas");C.width=256,C.height=512,window.devicePixelRatio=1;const B=new ca(C);pa(S),n&&Et(S,n),Array.from(a.values()).forEach(L=>{L.points.forEach((U,V)=>{B.draw({from:U.from,to:U.to,size:L.brush.size,color:L.brush.color})})}),requestAnimationFrame(()=>{Et(S,C)})}function y(S){xt[S?"unshift":"push"](()=>{p=S,t(1,p)})}function O(S){xt[S?"unshift":"push"](()=>{d=S,t(2,d),t(8,_),t(0,o)})}const P=()=>g();return r.$$.update=()=>{r.$$.dirty&257&&_&&o&&(b(o),t(2,d.style.top=`${10+o.size/2}px`,d),t(2,d.style.left=`${10+o.size/2}px`,d)),r.$$.dirty&1536&&n&&(Et(h,n),Q(Ue,a=new Map,a))},[o,p,d,a,M,c,v,g,_,h,n,y,O,P]}class Ha extends ve{constructor(e){super(),me(this,e,La,Da,be,{})}}function da(r){let e,t,a;return{c(){e=k("img"),this.h()},l(n){e=x(n,"IMG",{class:!0,alt:!0,src:!0,width:!0,height:!0}),this.h()},h(){s(e,"class",t="image "+(r[1]?"opacity-30":"")+" svelte-1iibjwx"),s(e,"alt","Generative Human Result"),kt(e.src,a=r[0])||s(e,"src",a),s(e,"width","256"),s(e,"height","512")},m(n,o){J(n,e,o)},p(n,o){o&2&&t!==(t="image "+(n[1]?"opacity-30":"")+" svelte-1iibjwx")&&s(e,"class",t),o&1&&!kt(e.src,a=n[0])&&s(e,"src",a)},d(n){n&&m(e)}}}function ha(r){let e,t,a,n,o,i;return{c(){e=k("div"),t=De("svg"),a=De("path"),n=z(),o=k("span"),i=A(r[2]),this.h()},l(p){e=x(p,"DIV",{class:!0});var d=T(e);t=Be(d,"svg",{xmlns:!0,fill:!0,viewBox:!0,class:!0});var _=T(t);a=Be(_,"path",{fill:!0,d:!0}),T(a).forEach(m),_.forEach(m),n=R(d),o=x(d,"SPAN",{class:!0});var h=T(o);i=N(h,r[2]),h.forEach(m),d.forEach(m),this.h()},h(){s(a,"fill","currentColor"),s(a,"d","M20 12a8 8 0 0 1-8 8v4a12 12 0 0 0 12-12h-4Zm-2-5.3a8 8 0 0 1 2 5.3h4c0-3-1.1-5.8-3-8l-3 2.7Z"),s(t,"xmlns","http://www.w3.org/2000/svg"),s(t,"fill","none"),s(t,"viewBox","0 0 24 24"),s(t,"class","animate-spin max-w-[3rem]"),s(o,"class","text-xs"),s(e,"class","loading svelte-1iibjwx")},m(p,d){J(p,e,d),l(e,t),l(t,a),l(e,n),l(e,o),l(o,i)},p(p,d){d&4&&Xe(i,p[2])},d(p){p&&m(e)}}}function Ua(r){let e,t,a=r[0]&&da(r),n=r[1]&&ha(r);return{c(){e=k("div"),a&&a.c(),t=z(),n&&n.c(),this.h()},l(o){e=x(o,"DIV",{class:!0});var i=T(e);a&&a.l(i),t=R(i),n&&n.l(i),i.forEach(m),this.h()},h(){s(e,"class","relative overflow-clip flex flex-col justify-center items-center w-full h-full")},m(o,i){J(o,e,i),a&&a.m(e,null),l(e,t),n&&n.m(e,null)},p(o,[i]){o[0]?a?a.p(o,i):(a=da(o),a.c(),a.m(e,t)):a&&(a.d(1),a=null),o[1]?n?n.p(o,i):(n=ha(o),n.c(),n.m(e,null)):n&&(n.d(1),n=null)},i:X,o:X,d(o){o&&m(e),a&&a.d(),n&&n.d()}}}async function Va(r){return new Promise((e,t)=>{try{const a=document.createElement("a");a.download=`sucess-${Date.now()}.png`,a.target="_self",a.onclick=async n=>{a.href&&URL.revokeObjectURL(a.href),a.href=r},requestAnimationFrame(()=>{console.log("Downloading image."),a.click(),e(null)})}catch{t()}})}function Wa(r,e,t){let a,n,o,i,p;F(r,$e,h=>t(3,a=h)),F(r,Tt,h=>t(0,n=h)),F(r,je,h=>t(1,o=h)),F(r,Ct,h=>t(4,i=h)),F(r,It,h=>t(5,p=h));let d="";async function _(h,{texture:w,steps:E,seed:f}){const I=ma(11);let M,c;t(2,d="Generating");const v=new AbortController;await fetch(Zt+"/api/queue/push/",{signal:v.signal,headers:{"Content-Type":"application/json"},method:"POST",body:JSON.stringify({fn_index:2,data:[h,w,E,Number(f)],action:"predict",session_hash:I})}).then(async u=>{({hash:M,queue_position:c}=await u.json())}).catch(u=>{console.log(u)});let b,g;for(;b!=="QUEUED"||b!=="PENDING";)try{const u=await fetch(Zt+"/api/queue/status/",{signal:v.signal,headers:{"Content-Type":"application/json"},method:"POST",body:JSON.stringify({hash:M})});if(u.status!=200)break;if({status:b,data:g}=await u.json(),b==="QUEUED")t(2,d=`Queue ${g}/${c}`);else if(b==="PENDING")t(2,d="Pending");else if(b==="FAILED"){t(2,d="Failed");break}else if(b==="COMPLETE"){t(2,d="Complete");break}await new Promise(y=>setTimeout(y,1e3))}catch(u){console.log(u);break}return g}return r.$$.update=()=>{r.$$.dirty&50&&(async()=>{if(o){const h=await _(p.toDataURL(),i);h&&Q(Tt,n=h.data[0],n),Q(je,o=!1,o)}})(),r.$$.dirty&9&&(async()=>a&&(await Va(n),Q($e,a=!1,a)))()},[n,o,d,a,i,p]}class Ga extends ve{constructor(e){super(),me(this,e,Wa,Ua,be,{})}}function qa(r){let e,t,a,n,o,i,p,d,_,h,w,E,f,I,M,c,v,b,g,u,y,O,P,S,C,B,L,U,V,j,D,H,K,Z,_e,ye,$,we,ne,Ze,Ee,ie,Ke,et,tt,ee,ke,Le,at,rt,te,lt,ce,ot,st,ue,nt,it,ct,He,xe,ut,pt,pe,dt,de,ht,le,he,ft,fe,gt,ae,vt,We,mt,re,bt,Ge,_t,ge,Se,yt,Pt;return pe=new Ia({}),de=new xa({}),he=new Ha({}),fe=new Ga({}),ge=new Ma({}),{c(){e=k("div"),t=k("article"),a=k("h1"),n=A("Drawings to Human"),o=z(),i=k("p"),p=A("This is an unofficial drawing tool to explore the generative human generator "),d=k("a"),_=k("span"),h=A("Text2Human"),w=A(`. Please check all the model features on this
2
- `),E=k("a"),f=A("Space"),I=A("."),M=z(),c=k("small"),v=k("h4"),b=A("Thanks to"),g=z(),u=k("p"),y=A("Authors: "),O=k("a"),P=A("Yuming Jiang"),S=A(`,
3
- `),C=k("a"),B=A("Shuai Yang"),L=A(`,
4
- `),U=k("a"),V=A("Haonan Qiu"),j=A(`,
5
- `),D=k("a"),H=A("Wayne Wu"),K=A(`,
6
- `),Z=k("a"),_e=A("Chen Change Loy"),ye=A(`
7
- and `),$=k("a"),we=A("Ziwei Liu"),ne=k("br"),Ze=z(),Ee=k("p"),ie=k("a"),Ke=A("@hysts"),et=A(" for the original Space implementation"),tt=z(),ee=k("details"),ke=k("summary"),Le=k("small"),at=A("More"),rt=z(),te=k("p"),lt=A("The backend is powered by a "),ce=k("a"),ot=A("Gradio"),st=A(`
8
- application running on
9
- `),ue=k("a"),nt=A("Spaces"),it=A(`. You can
10
- also check the source code and clone it locally if you want:`),ct=z(),He=k("p"),xe=k("code"),ut=A("git clone https://huggingface.co/spaces/CVPR/Text2Human"),pt=z(),Me(pe.$$.fragment),dt=z(),Me(de.$$.fragment),ht=z(),le=k("div"),Me(he.$$.fragment),ft=z(),Me(fe.$$.fragment),gt=z(),ae=k("button"),vt=A("Generate Human"),mt=z(),re=k("button"),bt=A("Save Result"),_t=z(),Me(ge.$$.fragment),this.h()},l(W){e=x(W,"DIV",{class:!0});var G=T(e);t=x(G,"ARTICLE",{class:!0});var oe=T(t);a=x(oe,"H1",{});var At=T(a);n=N(At,"Drawings to Human"),At.forEach(m),o=R(oe),i=x(oe,"P",{});var Te=T(i);p=N(Te,"This is an unofficial drawing tool to explore the generative human generator "),d=x(Te,"A",{href:!0,target:!0});var Nt=T(d);_=x(Nt,"SPAN",{});var zt=T(_);h=N(zt,"Text2Human"),zt.forEach(m),Nt.forEach(m),w=N(Te,`. Please check all the model features on this
11
- `),E=x(Te,"A",{href:!0,target:!0});var Rt=T(E);f=N(Rt,"Space"),Rt.forEach(m),I=N(Te,"."),Te.forEach(m),M=R(oe),c=x(oe,"SMALL",{});var Ie=T(c);v=x(Ie,"H4",{id:!0});var Dt=T(v);b=N(Dt,"Thanks to"),Dt.forEach(m),g=R(Ie),u=x(Ie,"P",{});var Y=T(u);y=N(Y,"Authors: "),O=x(Y,"A",{href:!0,target:!0});var Bt=T(O);P=N(Bt,"Yuming Jiang"),Bt.forEach(m),S=N(Y,`,
12
- `),C=x(Y,"A",{href:!0,target:!0});var jt=T(C);B=N(jt,"Shuai Yang"),jt.forEach(m),L=N(Y,`,
13
- `),U=x(Y,"A",{href:!0,target:!0});var Lt=T(U);V=N(Lt,"Haonan Qiu"),Lt.forEach(m),j=N(Y,`,
14
- `),D=x(Y,"A",{href:!0,target:!0});var Ht=T(D);H=N(Ht,"Wayne Wu"),Ht.forEach(m),K=N(Y,`,
15
- `),Z=x(Y,"A",{href:!0,target:!0});var Ut=T(Z);_e=N(Ut,"Chen Change Loy"),Ut.forEach(m),ye=N(Y,`
16
- and `),$=x(Y,"A",{href:!0,target:!0});var Vt=T($);we=N(Vt,"Ziwei Liu"),Vt.forEach(m),ne=x(Y,"BR",{}),Y.forEach(m),Ze=R(Ie),Ee=x(Ie,"P",{});var wt=T(Ee);ie=x(wt,"A",{href:!0,target:!0});var Wt=T(ie);Ke=N(Wt,"@hysts"),Wt.forEach(m),et=N(wt," for the original Space implementation"),wt.forEach(m),Ie.forEach(m),tt=R(oe),ee=x(oe,"DETAILS",{});var Oe=T(ee);ke=x(Oe,"SUMMARY",{class:!0});var Gt=T(ke);Le=x(Gt,"SMALL",{});var qt=T(Le);at=N(qt,"More"),qt.forEach(m),Gt.forEach(m),rt=R(Oe),te=x(Oe,"P",{});var Ce=T(te);lt=N(Ce,"The backend is powered by a "),ce=x(Ce,"A",{href:!0,target:!0});var Ft=T(ce);ot=N(Ft,"Gradio"),Ft.forEach(m),st=N(Ce,`
17
- application running on
18
- `),ue=x(Ce,"A",{href:!0,target:!0});var Yt=T(ue);nt=N(Yt,"Spaces"),Yt.forEach(m),it=N(Ce,`. You can
19
- also check the source code and clone it locally if you want:`),Ce.forEach(m),ct=R(Oe),He=x(Oe,"P",{});var Jt=T(He);xe=x(Jt,"CODE",{class:!0});var $t=T(xe);ut=N($t,"git clone https://huggingface.co/spaces/CVPR/Text2Human"),$t.forEach(m),Jt.forEach(m),Oe.forEach(m),oe.forEach(m),pt=R(G),Pe(pe.$$.fragment,G),dt=R(G),Pe(de.$$.fragment,G),ht=R(G),le=x(G,"DIV",{class:!0});var qe=T(le);Pe(he.$$.fragment,qe),ft=R(qe),Pe(fe.$$.fragment,qe),qe.forEach(m),gt=R(G),ae=x(G,"BUTTON",{class:!0});var Qt=T(ae);vt=N(Qt,"Generate Human"),Qt.forEach(m),mt=R(G),re=x(G,"BUTTON",{class:!0});var Xt=T(re);bt=N(Xt,"Save Result"),Xt.forEach(m),_t=R(G),Pe(ge.$$.fragment,G),G.forEach(m),this.h()},h(){s(d,"href","https://github.com/yumingj/Text2Human"),s(d,"target","_blank"),s(E,"href","https://huggingface.co/spaces/CVPR/Text2Human"),s(E,"target","_blank"),s(v,"id","thanks-to"),s(O,"href","https://yumingj.github.io/"),s(O,"target","_blank"),s(C,"href","https://williamyang1991.github.io/"),s(C,"target","_blank"),s(U,"href","http://haonanqiu.com/"),s(U,"target","_blank"),s(D,"href","https://wywu.github.io/"),s(D,"target","_blank"),s(Z,"href","https://www.mmlab-ntu.com/person/ccloy/"),s(Z,"target","_blank"),s($,"href","https://liuziwei7.github.io/"),s($,"target","_blank"),s(ie,"href","https://huggingface.co/hysts"),s(ie,"target","_blank"),s(ke,"class","cursor-pointer"),s(ce,"href","https://gradio.app/"),s(ce,"target","_blank"),s(ue,"href","https://huggingface.co/spaces/CVPR/Text2Human"),s(ue,"target","_blank"),s(xe,"class","block whitespace-pre overflow-x-scroll"),s(t,"class","prose dark:prose-invert"),s(le,"class","drawings py-3 -mx-3 svelte-237ry5"),ae.disabled=We=r[0]===!0,s(ae,"class","svelte-237ry5"),re.disabled=Ge=r[1]===!0||!r[2],s(re,"class","svelte-237ry5"),s(e,"class","max-w-screen-md mx-auto px-3 py-5 relative z-0")},m(W,G){J(W,e,G),l(e,t),l(t,a),l(a,n),l(t,o),l(t,i),l(i,p),l(i,d),l(d,_),l(_,h),l(i,w),l(i,E),l(E,f),l(i,I),l(t,M),l(t,c),l(c,v),l(v,b),l(c,g),l(c,u),l(u,y),l(u,O),l(O,P),l(u,S),l(u,C),l(C,B),l(u,L),l(u,U),l(U,V),l(u,j),l(u,D),l(D,H),l(u,K),l(u,Z),l(Z,_e),l(u,ye),l(u,$),l($,we),l(u,ne),l(c,Ze),l(c,Ee),l(Ee,ie),l(ie,Ke),l(Ee,et),l(t,tt),l(t,ee),l(ee,ke),l(ke,Le),l(Le,at),l(ee,rt),l(ee,te),l(te,lt),l(te,ce),l(ce,ot),l(te,st),l(te,ue),l(ue,nt),l(te,it),l(ee,ct),l(ee,He),l(He,xe),l(xe,ut),l(e,pt),Ae(pe,e,null),l(e,dt),Ae(de,e,null),l(e,ht),l(e,le),Ae(he,le,null),l(le,ft),Ae(fe,le,null),l(e,gt),l(e,ae),l(ae,vt),l(e,mt),l(e,re),l(re,bt),l(e,_t),Ae(ge,e,null),Se=!0,yt||(Pt=[q(ae,"click",Ye(r[3])),q(re,"click",Ye(r[4]))],yt=!0)},p(W,[G]){(!Se||G&1&&We!==(We=W[0]===!0))&&(ae.disabled=We),(!Se||G&6&&Ge!==(Ge=W[1]===!0||!W[2]))&&(re.disabled=Ge)},i(W){Se||(Ne(pe.$$.fragment,W),Ne(de.$$.fragment,W),Ne(he.$$.fragment,W),Ne(fe.$$.fragment,W),Ne(ge.$$.fragment,W),Se=!0)},o(W){ze(pe.$$.fragment,W),ze(de.$$.fragment,W),ze(he.$$.fragment,W),ze(fe.$$.fragment,W),ze(ge.$$.fragment,W),Se=!1},d(W){W&&m(e),Re(pe),Re(de),Re(he),Re(fe),Re(ge),yt=!1,Mt(Pt)}}}function Fa(r,e,t){let a,n,o;return F(r,je,d=>t(0,a=d)),F(r,$e,d=>t(1,n=d)),F(r,Tt,d=>t(2,o=d)),[a,n,o,()=>Q(je,a=!0,a),()=>Q($e,n=!0,n)]}class $a extends ve{constructor(e){super(),me(this,e,Fa,qa,be,{})}}export{$a as default};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/batch_norm.py DELETED
@@ -1,243 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import torch
4
- import torch.distributed as dist
5
- from fvcore.nn.distributed import differentiable_all_reduce
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from detectron2.utils import comm, env
10
-
11
- from .wrappers import BatchNorm2d
12
-
13
-
14
- class FrozenBatchNorm2d(nn.Module):
15
- """
16
- BatchNorm2d where the batch statistics and the affine parameters are fixed.
17
-
18
- It contains non-trainable buffers called
19
- "weight" and "bias", "running_mean", "running_var",
20
- initialized to perform identity transformation.
21
-
22
- The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
23
- which are computed from the original four parameters of BN.
24
- The affine transform `x * weight + bias` will perform the equivalent
25
- computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
26
- When loading a backbone model from Caffe2, "running_mean" and "running_var"
27
- will be left unchanged as identity transformation.
28
-
29
- Other pre-trained backbone models may contain all 4 parameters.
30
-
31
- The forward is implemented by `F.batch_norm(..., training=False)`.
32
- """
33
-
34
- _version = 3
35
-
36
- def __init__(self, num_features, eps=1e-5):
37
- super().__init__()
38
- self.num_features = num_features
39
- self.eps = eps
40
- self.register_buffer("weight", torch.ones(num_features))
41
- self.register_buffer("bias", torch.zeros(num_features))
42
- self.register_buffer("running_mean", torch.zeros(num_features))
43
- self.register_buffer("running_var", torch.ones(num_features) - eps)
44
-
45
- def forward(self, x):
46
- if x.requires_grad:
47
- # When gradients are needed, F.batch_norm will use extra memory
48
- # because its backward op computes gradients for weight/bias as well.
49
- scale = self.weight * (self.running_var + self.eps).rsqrt()
50
- bias = self.bias - self.running_mean * scale
51
- scale = scale.reshape(1, -1, 1, 1)
52
- bias = bias.reshape(1, -1, 1, 1)
53
- out_dtype = x.dtype # may be half
54
- return x * scale.to(out_dtype) + bias.to(out_dtype)
55
- else:
56
- # When gradients are not needed, F.batch_norm is a single fused op
57
- # and provide more optimization opportunities.
58
- return F.batch_norm(
59
- x,
60
- self.running_mean,
61
- self.running_var,
62
- self.weight,
63
- self.bias,
64
- training=False,
65
- eps=self.eps,
66
- )
67
-
68
- def _load_from_state_dict(
69
- self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
70
- ):
71
- version = local_metadata.get("version", None)
72
-
73
- if version is None or version < 2:
74
- # when use offline modules, avoid overwriting running mean and var for loaded weights
75
- skip_reset = False
76
- for k_n in state_dict: # checkpoint weights
77
- if 'ignore_others' in k_n: #if 'offline' in k_n:
78
- skip_reset = True
79
- if not skip_reset:
80
- # No running_mean/var in early versions
81
- # This will silent the warnings
82
- if prefix + "running_mean" not in state_dict:
83
- state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
84
- if prefix + "running_var" not in state_dict:
85
- state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
86
-
87
- # NOTE: if a checkpoint is trained with BatchNorm and loaded (together with
88
- # version number) to FrozenBatchNorm, running_var will be wrong. One solution
89
- # is to remove the version number from the checkpoint.
90
- if version is not None and version < 3:
91
- logger = logging.getLogger(__name__)
92
- logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
93
- # In version < 3, running_var are used without +eps.
94
- state_dict[prefix + "running_var"] -= self.eps
95
-
96
- super()._load_from_state_dict(
97
- state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
98
- )
99
-
100
- def __repr__(self):
101
- return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
102
-
103
- @classmethod
104
- def convert_frozen_batchnorm(cls, module):
105
- """
106
- Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
107
-
108
- Args:
109
- module (torch.nn.Module):
110
-
111
- Returns:
112
- If module is BatchNorm/SyncBatchNorm, returns a new module.
113
- Otherwise, in-place convert module and return it.
114
-
115
- Similar to convert_sync_batchnorm in
116
- https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
117
- """
118
- bn_module = nn.modules.batchnorm
119
- bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
120
- res = module
121
- if isinstance(module, bn_module):
122
- res = cls(module.num_features)
123
- if module.affine:
124
- res.weight.data = module.weight.data.clone().detach()
125
- res.bias.data = module.bias.data.clone().detach()
126
- res.running_mean.data = module.running_mean.data
127
- res.running_var.data = module.running_var.data
128
- res.eps = module.eps
129
- else:
130
- for name, child in module.named_children():
131
- new_child = cls.convert_frozen_batchnorm(child)
132
- if new_child is not child:
133
- res.add_module(name, new_child)
134
- return res
135
-
136
-
137
- def get_norm(norm, out_channels):
138
- """
139
- Args:
140
- norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
141
- or a callable that takes a channel number and returns
142
- the normalization layer as a nn.Module.
143
-
144
- Returns:
145
- nn.Module or None: the normalization layer
146
- """
147
- if norm is None:
148
- return None
149
- if isinstance(norm, str):
150
- if len(norm) == 0:
151
- return None
152
- norm = {
153
- "BN": BatchNorm2d,
154
- # Fixed in https://github.com/pytorch/pytorch/pull/36382
155
- "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
156
- "FrozenBN": FrozenBatchNorm2d,
157
- "GN": lambda channels: nn.GroupNorm(32, channels),
158
- # for debugging:
159
- "nnSyncBN": nn.SyncBatchNorm,
160
- "naiveSyncBN": NaiveSyncBatchNorm,
161
- }[norm]
162
- return norm(out_channels)
163
-
164
-
165
- class NaiveSyncBatchNorm(BatchNorm2d):
166
- """
167
- In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
168
- when the batch size on each worker is different.
169
- (e.g., when scale augmentation is used, or when it is applied to mask head).
170
-
171
- This is a slower but correct alternative to `nn.SyncBatchNorm`.
172
-
173
- Note:
174
- There isn't a single definition of Sync BatchNorm.
175
-
176
- When ``stats_mode==""``, this module computes overall statistics by using
177
- statistics of each worker with equal weight. The result is true statistics
178
- of all samples (as if they are all on one worker) only when all workers
179
- have the same (N, H, W). This mode does not support inputs with zero batch size.
180
-
181
- When ``stats_mode=="N"``, this module computes overall statistics by weighting
182
- the statistics of each worker by their ``N``. The result is true statistics
183
- of all samples (as if they are all on one worker) only when all workers
184
- have the same (H, W). It is slower than ``stats_mode==""``.
185
-
186
- Even though the result of this module may not be the true statistics of all samples,
187
- it may still be reasonable because it might be preferrable to assign equal weights
188
- to all workers, regardless of their (H, W) dimension, instead of putting larger weight
189
- on larger images. From preliminary experiments, little difference is found between such
190
- a simplified implementation and an accurate computation of overall mean & variance.
191
- """
192
-
193
- def __init__(self, *args, stats_mode="", **kwargs):
194
- super().__init__(*args, **kwargs)
195
- assert stats_mode in ["", "N"]
196
- self._stats_mode = stats_mode
197
-
198
- def forward(self, input):
199
- if comm.get_world_size() == 1 or not self.training:
200
- return super().forward(input)
201
-
202
- B, C = input.shape[0], input.shape[1]
203
-
204
- half_input = input.dtype == torch.float16
205
- if half_input:
206
- # fp16 does not have good enough numerics for the reduction here
207
- input = input.float()
208
- mean = torch.mean(input, dim=[0, 2, 3])
209
- meansqr = torch.mean(input * input, dim=[0, 2, 3])
210
-
211
- if self._stats_mode == "":
212
- assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
213
- vec = torch.cat([mean, meansqr], dim=0)
214
- vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())
215
- mean, meansqr = torch.split(vec, C)
216
- momentum = self.momentum
217
- else:
218
- if B == 0:
219
- vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
220
- vec = vec + input.sum() # make sure there is gradient w.r.t input
221
- else:
222
- vec = torch.cat(
223
- [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
224
- )
225
- vec = differentiable_all_reduce(vec * B)
226
-
227
- total_batch = vec[-1].detach()
228
- momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
229
- mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero
230
-
231
- var = meansqr - mean * mean
232
- invstd = torch.rsqrt(var + self.eps)
233
- scale = self.weight * invstd
234
- bias = self.bias - mean * scale
235
- scale = scale.reshape(1, -1, 1, 1)
236
- bias = bias.reshape(1, -1, 1, 1)
237
-
238
- self.running_mean += momentum * (mean.detach() - self.running_mean)
239
- self.running_var += momentum * (var.detach() - self.running_var)
240
- ret = input * scale + bias
241
- if half_input:
242
- ret = ret.half()
243
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CamCam17/Alexwww-davide-comic-book-characters/Dockerfile DELETED
@@ -1,17 +0,0 @@
1
- FROM ghcr.io/livebook-dev/livebook:latest-cuda11.8
2
-
3
- ENV LIVEBOOK_APP_SERVICE_NAME "🐳 Hugging Face - $SPACE_TITLE"
4
- ENV LIVEBOOK_APP_SERVICE_URL "https://huggingface.co/spaces/$SPACE_AUTHOR_NAME/$SPACE_REPO_NAME"
5
- ENV LIVEBOOK_UPDATE_INSTRUCTIONS_URL "https://livebook.dev"
6
- ENV LIVEBOOK_WITHIN_IFRAME "true"
7
- ENV LIVEBOOK_APPS_PATH "/public-apps"
8
- ENV LIVEBOOK_APPS_PATH_WARMUP "manual"
9
- ENV LIVEBOOK_DATA_PATH "/data"
10
- ENV LIVEBOOK_PORT 7860
11
-
12
- EXPOSE 7860
13
- USER root
14
- COPY public-apps/ /public-apps
15
- RUN mkdir -p /data
16
- RUN chmod 777 /data
17
- RUN /app/bin/warmup_apps.sh
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/loading/__init__.py DELETED
@@ -1,36 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL import ImageFilter
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.utils import make_jpg_or_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def loading(images: List[BuildImage], texts, args):
14
- img_big = images[0].convert("RGBA").resize_width(500)
15
- img_big = img_big.filter(ImageFilter.GaussianBlur(radius=3))
16
- h1 = img_big.height
17
- mask = BuildImage.new("RGBA", img_big.size, (0, 0, 0, 32))
18
- icon = BuildImage.open(img_dir / "icon.png")
19
- img_big.paste(mask, alpha=True).paste(icon, (200, int(h1 / 2) - 50), alpha=True)
20
-
21
- def make(img: BuildImage) -> BuildImage:
22
- img_small = img.convert("RGBA").resize_width(100)
23
- h2 = max(img_small.height, 80)
24
- frame = BuildImage.new("RGBA", (500, h1 + h2 + 10), "white")
25
- frame.paste(img_big, alpha=True).paste(
26
- img_small, (100, h1 + 5 + (h2 - img_small.height) // 2), alpha=True
27
- )
28
- frame.draw_text(
29
- (210, h1 + 5, 480, h1 + h2 + 5), "不出来", halign="left", max_fontsize=60
30
- )
31
- return frame
32
-
33
- return make_jpg_or_gif(images[0], make)
34
-
35
-
36
- add_meme("loading", loading, min_images=1, max_images=1, keywords=["加载中"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/theb.py DELETED
@@ -1,48 +0,0 @@
1
- import json
2
- import sys
3
- from re import findall
4
- from curl_cffi import requests
5
-
6
- config = json.loads(sys.argv[1])
7
- prompt = config['messages'][-1]['content']
8
-
9
- headers = {
10
- 'authority': 'chatbot.theb.ai',
11
- 'accept': 'application/json, text/plain, */*',
12
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
13
- 'content-type': 'application/json',
14
- 'origin': 'https://chatbot.theb.ai',
15
- 'referer': 'https://chatbot.theb.ai/',
16
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
17
- 'sec-ch-ua-mobile': '?0',
18
- 'sec-ch-ua-platform': '"macOS"',
19
- 'sec-fetch-dest': 'empty',
20
- 'sec-fetch-mode': 'cors',
21
- 'sec-fetch-site': 'same-origin',
22
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
23
- }
24
-
25
- json_data = {
26
- 'prompt': prompt,
27
- 'options': {}
28
- }
29
-
30
- def format(chunk):
31
- try:
32
- completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
33
- print(completion_chunk, flush=True, end='')
34
-
35
- except Exception as e:
36
- print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
37
- return
38
-
39
- while True:
40
- try:
41
- response = requests.post('https://chatbot.theb.ai/api/chat-process',
42
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
43
-
44
- exit(0)
45
-
46
- except Exception as e:
47
- print('[ERROR] an error occured, retrying... |', e, flush=True)
48
- continue