parquet-converter commited on
Commit
80fec49
·
1 Parent(s): a8e8663

Update parquet files (step 10 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1368565466ki/Satdia/transforms.py +0 -193
  2. spaces/17TheWord/vits-models/utils.py +0 -225
  3. spaces/1gistliPinn/ChatGPT4/Examples/Chak De India Telugu Movie Free Torrent Download !!TOP!!.md +0 -38
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DJ Studio 5 APK - The Ultimate Music Mixer App for Android Devices.md +0 -102
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre Clash Mini APK el juego de batallas automticas en tiempo real con los personajes de Clash.md +0 -131
  6. spaces/1phancelerku/anime-remove-background/Download Video TikTok Without Watermark - Fast Easy and Free - Online TikTok Video Download.md +0 -144
  7. spaces/1phancelerku/anime-remove-background/Download the Word Game that Keeps You on Your Toes Word Blitz.md +0 -106
  8. spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/GenerSpeech.py +0 -123
  9. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual.py +0 -123
  10. spaces/AIGText/GlyphControl/ldm/modules/midas/api.py +0 -170
  11. spaces/AISuperheroes/01ST-CSV-Dataset-Analyzer/app.py +0 -83
  12. spaces/AIZero2HeroBootcamp/Memory/README.md +0 -12
  13. spaces/Acapellas/vocalinstrumentalremover/app.py +0 -25
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/TapCell.js +0 -20
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/Menu.d.ts +0 -49
  16. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/代码重写为全英文_多线程.py +0 -138
  17. spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/model_irse.py +0 -84
  18. spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/hyperparameters.py +0 -28
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +0 -473
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/__init__.py +0 -20
  21. spaces/Andy1621/uniformer_image_detection/configs/atss/README.md +0 -21
  22. spaces/Andy1621/uniformer_image_detection/mmdet/core/post_processing/__init__.py +0 -8
  23. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/dynamic_roi_head.py +0 -154
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/grid_roi_head.py +0 -176
  25. spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py +0 -2
  26. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/chromadb.py +0 -376
  27. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/__init__.py +0 -2
  28. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/upfirdn2d.py +0 -330
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/filetypes.py +0 -27
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/cygwinccompiler.py +0 -364
  31. spaces/AyushP/PolicyCompareBot/README.md +0 -12
  32. spaces/Benson/text-generation/Examples/5 Documento De Pregunta Beca 2016 Pdf.md +0 -76
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/util.py +0 -1932
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/_musllinux.py +0 -136
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/abc.py +0 -33
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py +0 -216
  37. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/cityscapes_evaluation.py +0 -112
  38. spaces/CVPR/LIVE/thrust/thrust/allocate_unique.h +0 -444
  39. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/reverse.h +0 -44
  40. spaces/ChillyFaze/runwayml-stable-diffusion-v1-5/app.py +0 -3
  41. spaces/CrabApple/prompthero-openjourney-v2/app.py +0 -3
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/middleware/wsgi.py +0 -1
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/filterPen.py +0 -164
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-22108117.js +0 -0
  45. spaces/DaleChen/AutoGPT/autogpt/chat.py +0 -175
  46. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/post_process.py +0 -34
  47. spaces/Denevan/BingAI/README.md +0 -12
  48. spaces/DragGan/DragGan/torch_utils/training_stats.py +0 -268
  49. spaces/Duskfallcrew/Duskfallcrew-Osenayan_Mix/app.py +0 -19
  50. spaces/ECCV2022/PSG/OpenPSG/configs/_base_/schedules/schedule_1x.py +0 -10
spaces/1368565466ki/Satdia/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/vits-models/utils.py DELETED
@@ -1,225 +0,0 @@
1
- import os
2
- import sys
3
- import argparse
4
- import logging
5
- import json
6
- import subprocess
7
- import numpy as np
8
- import librosa
9
- import torch
10
-
11
- MATPLOTLIB_FLAG = False
12
-
13
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
14
- logger = logging
15
-
16
-
17
- def load_checkpoint(checkpoint_path, model, optimizer=None):
18
- assert os.path.isfile(checkpoint_path)
19
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
20
- iteration = checkpoint_dict['iteration']
21
- learning_rate = checkpoint_dict['learning_rate']
22
- if optimizer is not None:
23
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
24
- saved_state_dict = checkpoint_dict['model']
25
- if hasattr(model, 'module'):
26
- state_dict = model.module.state_dict()
27
- else:
28
- state_dict = model.state_dict()
29
- new_state_dict= {}
30
- for k, v in state_dict.items():
31
- try:
32
- new_state_dict[k] = saved_state_dict[k]
33
- except:
34
- logger.info("%s is not in the checkpoint" % k)
35
- new_state_dict[k] = v
36
- if hasattr(model, 'module'):
37
- model.module.load_state_dict(new_state_dict)
38
- else:
39
- model.load_state_dict(new_state_dict)
40
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
41
- checkpoint_path, iteration))
42
- return model, optimizer, learning_rate, iteration
43
-
44
-
45
- def plot_spectrogram_to_numpy(spectrogram):
46
- global MATPLOTLIB_FLAG
47
- if not MATPLOTLIB_FLAG:
48
- import matplotlib
49
- matplotlib.use("Agg")
50
- MATPLOTLIB_FLAG = True
51
- mpl_logger = logging.getLogger('matplotlib')
52
- mpl_logger.setLevel(logging.WARNING)
53
- import matplotlib.pylab as plt
54
- import numpy as np
55
-
56
- fig, ax = plt.subplots(figsize=(10,2))
57
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
58
- interpolation='none')
59
- plt.colorbar(im, ax=ax)
60
- plt.xlabel("Frames")
61
- plt.ylabel("Channels")
62
- plt.tight_layout()
63
-
64
- fig.canvas.draw()
65
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
66
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
67
- plt.close()
68
- return data
69
-
70
-
71
- def plot_alignment_to_numpy(alignment, info=None):
72
- global MATPLOTLIB_FLAG
73
- if not MATPLOTLIB_FLAG:
74
- import matplotlib
75
- matplotlib.use("Agg")
76
- MATPLOTLIB_FLAG = True
77
- mpl_logger = logging.getLogger('matplotlib')
78
- mpl_logger.setLevel(logging.WARNING)
79
- import matplotlib.pylab as plt
80
- import numpy as np
81
-
82
- fig, ax = plt.subplots(figsize=(6, 4))
83
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
84
- interpolation='none')
85
- fig.colorbar(im, ax=ax)
86
- xlabel = 'Decoder timestep'
87
- if info is not None:
88
- xlabel += '\n\n' + info
89
- plt.xlabel(xlabel)
90
- plt.ylabel('Encoder timestep')
91
- plt.tight_layout()
92
-
93
- fig.canvas.draw()
94
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
95
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
96
- plt.close()
97
- return data
98
-
99
-
100
- def load_audio_to_torch(full_path, target_sampling_rate):
101
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
102
- return torch.FloatTensor(audio.astype(np.float32))
103
-
104
-
105
- def load_filepaths_and_text(filename, split="|"):
106
- with open(filename, encoding='utf-8') as f:
107
- filepaths_and_text = [line.strip().split(split) for line in f]
108
- return filepaths_and_text
109
-
110
-
111
- def get_hparams(init=True):
112
- parser = argparse.ArgumentParser()
113
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
114
- help='JSON file for configuration')
115
- parser.add_argument('-m', '--model', type=str, required=True,
116
- help='Model name')
117
-
118
- args = parser.parse_args()
119
- model_dir = os.path.join("./logs", args.model)
120
-
121
- if not os.path.exists(model_dir):
122
- os.makedirs(model_dir)
123
-
124
- config_path = args.config
125
- config_save_path = os.path.join(model_dir, "config.json")
126
- if init:
127
- with open(config_path, "r") as f:
128
- data = f.read()
129
- with open(config_save_path, "w") as f:
130
- f.write(data)
131
- else:
132
- with open(config_save_path, "r") as f:
133
- data = f.read()
134
- config = json.loads(data)
135
-
136
- hparams = HParams(**config)
137
- hparams.model_dir = model_dir
138
- return hparams
139
-
140
-
141
- def get_hparams_from_dir(model_dir):
142
- config_save_path = os.path.join(model_dir, "config.json")
143
- with open(config_save_path, "r") as f:
144
- data = f.read()
145
- config = json.loads(data)
146
-
147
- hparams =HParams(**config)
148
- hparams.model_dir = model_dir
149
- return hparams
150
-
151
-
152
- def get_hparams_from_file(config_path):
153
- with open(config_path, "r") as f:
154
- data = f.read()
155
- config = json.loads(data)
156
-
157
- hparams =HParams(**config)
158
- return hparams
159
-
160
-
161
- def check_git_hash(model_dir):
162
- source_dir = os.path.dirname(os.path.realpath(__file__))
163
- if not os.path.exists(os.path.join(source_dir, ".git")):
164
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
165
- source_dir
166
- ))
167
- return
168
-
169
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
170
-
171
- path = os.path.join(model_dir, "githash")
172
- if os.path.exists(path):
173
- saved_hash = open(path).read()
174
- if saved_hash != cur_hash:
175
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
176
- saved_hash[:8], cur_hash[:8]))
177
- else:
178
- open(path, "w").write(cur_hash)
179
-
180
-
181
- def get_logger(model_dir, filename="train.log"):
182
- global logger
183
- logger = logging.getLogger(os.path.basename(model_dir))
184
- logger.setLevel(logging.DEBUG)
185
-
186
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
187
- if not os.path.exists(model_dir):
188
- os.makedirs(model_dir)
189
- h = logging.FileHandler(os.path.join(model_dir, filename))
190
- h.setLevel(logging.DEBUG)
191
- h.setFormatter(formatter)
192
- logger.addHandler(h)
193
- return logger
194
-
195
-
196
- class HParams():
197
- def __init__(self, **kwargs):
198
- for k, v in kwargs.items():
199
- if type(v) == dict:
200
- v = HParams(**v)
201
- self[k] = v
202
-
203
- def keys(self):
204
- return self.__dict__.keys()
205
-
206
- def items(self):
207
- return self.__dict__.items()
208
-
209
- def values(self):
210
- return self.__dict__.values()
211
-
212
- def __len__(self):
213
- return len(self.__dict__)
214
-
215
- def __getitem__(self, key):
216
- return getattr(self, key)
217
-
218
- def __setitem__(self, key, value):
219
- return setattr(self, key, value)
220
-
221
- def __contains__(self, key):
222
- return key in self.__dict__
223
-
224
- def __repr__(self):
225
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Chak De India Telugu Movie Free Torrent Download !!TOP!!.md DELETED
@@ -1,38 +0,0 @@
1
- <br />
2
- <h1>How to Watch Chak De India in Telugu for Free</h1>
3
- <p>Chak De India is a 2007 Bollywood sports drama film starring Shah Rukh Khan as a former hockey player who coaches the Indian women's national hockey team. The film was a critical and commercial success, winning several awards and inspiring many people with its patriotic and empowering message.</p>
4
- <p>If you are a fan of Chak De India and want to watch it in Telugu, you might be wondering how to do that without paying any money. Well, there are some ways to download or stream the movie for free using torrent sites or online platforms. However, you should be aware of the risks and legal issues involved in doing so.</p>
5
- <h2>Chak De India Telugu Movie Free Torrent Download</h2><br /><p><b><b>Download File</b> &#10042; <a href="https://imgfil.com/2uy0X9">https://imgfil.com/2uy0X9</a></b></p><br /><br />
6
- <h2>Using Torrent Sites</h2>
7
- <p>Torrent sites are websites that allow users to share files using peer-to-peer (P2P) technology. You can find almost any movie or show on torrent sites, including Chak De India in Telugu. However, you need to have a torrent client software installed on your device to download the files. Some of the popular torrent clients are uTorrent, BitTorrent, qBittorrent, etc.</p>
8
- <p>To use torrent sites, you need to follow these steps:</p>
9
- <ol>
10
- <li>Search for "Chak De India Telugu Movie Free Torrent Download" on any torrent site. Some of the popular torrent sites are The Pirate Bay, 1337x, RARBG, etc.</li>
11
- <li>Select a torrent file that has good quality and seeders. Seeders are users who have the complete file and are sharing it with others. The more seeders a torrent has, the faster it will download.</li>
12
- <li>Download the torrent file and open it with your torrent client. The torrent client will start downloading the movie from other users.</li>
13
- <li>Once the download is complete, you can watch the movie using any media player that supports subtitles.</li>
14
- </ol>
15
- <p>However, using torrent sites has some disadvantages and risks. For example:</p>
16
- <ul>
17
- <li>Torrent sites are illegal in many countries and regions. You might face legal action or fines if you are caught downloading or sharing copyrighted content.</li>
18
- <li>Torrent sites are often infected with malware or viruses that can harm your device or steal your personal information.</li>
19
- <li>Torrent sites are unreliable and unregulated. You might not find the movie you want or get a fake or corrupted file instead.</li>
20
- <li>Torrent sites can expose your IP address and location to other users or hackers who can track your online activity or attack your network.</li>
21
- </ul>
22
- <h2>Using Online Platforms</h2>
23
- <p>Online platforms are websites or apps that allow users to watch movies or shows online for free or with a subscription. You can find many online platforms that offer Chak De India in Telugu, such as Zee5, MX Player, YouTube, etc.</p>
24
- <p>To use online platforms, you need to follow these steps:</p>
25
- <ol>
26
- <li>Search for "Chak De India Telugu Movie Free Online" on any online platform. Some of the popular online platforms are Zee5[^1^], MX Player[^2^], YouTube[^3^], etc.</li>
27
- <li>Select the movie and click on play. You might need to create an account or sign in with your existing account to access some online platforms.</li>
28
- <li>Enjoy watching the movie online for free or with a subscription.</li>
29
- </ol>
30
- <p>However, using online platforms has some disadvantages and risks as well. For example:</p>
31
- <ul>
32
- <li>Online platforms might not have the movie you want or have it in low quality or with ads.</li>
33
- <li>Online platforms might require you to pay a subscription fee or register with your personal information to access some content.</li>
34
- <li>Online platforms might not be available in your region or country due to geo-restrictions or licensing issues.</li>
35
- <li>Online platforms might violate the copyrights of the original creators or distributors of the movie and face legal action or removal.</li>
36
- </ul></p> d5da3c52bf<br />
37
- <br />
38
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DJ Studio 5 APK - The Ultimate Music Mixer App for Android Devices.md DELETED
@@ -1,102 +0,0 @@
1
-
2
- <h1>Download DJ Studio 5 APK: A Free Music Mixer for Android</h1>
3
- <p>Do you love mixing music and creating your own beats? Do you want to turn your Android device into a virtual DJ station? If yes, then you should download DJ Studio 5 APK, a free music mixer app that lets you manipulate music in the palm of your hands. In this article, we will tell you what DJ Studio 5 APK is, what features it has, what are its pros and cons, how to download and install it on your Android device, and how to use it to mix music like a pro.</p>
4
- <h2>What is DJ Studio 5 APK?</h2>
5
- <p>DJ Studio 5 APK is a mobile DJ app that allows you to mix, remix, scratch, loop, or pitch your music. The app and all of its functions are completely free, unlike previous versions, which required an in-app purchase to unlock unlimited playback. DJ Studio 5 APK is designed to be user friendly, social, and responsive. You can access and browse your mp3 music library by folder, artist, album, or name. You can edit and re-order your playlist. You can also record your mixes live and share them on SoundCloud or other social networks.</p>
6
- <h2>download dj studio 5 apk</h2><br /><p><b><b>DOWNLOAD</b> &middot;&middot;&middot; <a href="https://urlin.us/2uT2pV">https://urlin.us/2uT2pV</a></b></p><br /><br />
7
- <h3>Features of DJ Studio 5 APK</h3>
8
- <p>Some of the key features of DJ Studio 5 APK are:</p>
9
- <ul>
10
- <li>Wide compatibility: Android 2.3 and more</li>
11
- <li>2 virtual turntables with cross fader</li>
12
- <li>Customize your decks with up to 7 skins</li>
13
- <li>Unique scratch engine and disc physics</li>
14
- <li>8 sound effects: Flanger, Phaser, Gate, Reverb, Bit crusher, 3D, Brake, and FlippingDouble</li>
15
- <li>3-bands equalizer for each deck</li>
16
- <li>10 customizable sample pads</li>
17
- <li>One CUE/RECALL point per deck</li>
18
- <li>IN/OUT and beat based loops</li>
19
- <li>Pre-Cueing with headphones or Y-cable</li>
20
- <li>Automatic landscape and portrait mode</li>
21
- <li>Live sound spectrum view with beats detection and zoom</li>
22
- <li>No registration fee, no limitation, no watermark, no trackers, no stealing data, no popups everywhere, everyday</li>
23
- <li>Only optional paid skins to support the developers' work</li>
24
- </ul>
25
- <h3>Pros and Cons of DJ Studio 5 APK</h3>
26
- <p>Like any other app, DJ Studio 5 APK has its pros and cons. Here are some of them:</p>
27
- <table>
28
- <tr><th>Pros</th><th>Cons</th></tr>
29
- <tr><td>Fully fledged mobile mixer</td><td>Steep learning curve for beginners</td></tr>
30
- <tr><td>Free and comprehensive</td><td>Some compatibility issues on smaller devices</td></tr>
31
- <tr><td>Social and responsive</td><td>No effects other than the ones provided</td></tr>
32
- <tr><td>Lots of options and customization</td><td>No support for external controllers or MIDI devices</td></tr>
33
- <tr><td>Frequent updates and improvements</td><td>No offline mode or backup option</td></tr>
34
- </table>
35
- <h2>How to Download and Install DJ Studio 5 APK on Android?</h2>
36
- <p>If you want to download and install DJ Studio 5 APK on your Android device, you need to follow these steps:</p>
37
- <h3>Step 1: Enable Unknown Sources</h3>
38
- <p>Since DJ Studio 5 APK is not available on the Google Play Store, you need to enable the installation of apps from unknown sources on your device. To do this, go to your device's settings, then security, and then toggle on the option that says "Unknown sources". This will allow you to install apps that are not from the official app store.</p>
39
- <h3>Step 2: Download DJ Studio 5 APK File</h3>
40
- <p>Next, you need to download the DJ Studio 5 APK file from a reliable source. You can use the link below to download the latest version of the app. The file size is about 13 MB and it is virus-free and safe to download.</p>
41
- <p><a href="">Download DJ Studio 5 APK</a></p>
42
- <p>Download DJ Studio 5 - Free music mixer for Android<br />
43
- How to install DJ Studio 5 APK on your device<br />
44
- DJ Studio 5 - Music mixer app review and features<br />
45
- Best alternatives to DJ Studio 5 for Android<br />
46
- DJ Studio 5 - Free music mixer tips and tricks<br />
47
- Download DJ Studio 5 mod APK with premium features<br />
48
- DJ Studio 5 - Music mixer latest version update<br />
49
- How to use DJ Studio 5 to mix, remix, scratch, loop or pitch your music<br />
50
- DJ Studio 5 - Free music mixer vs other DJ apps for Android<br />
51
- Download DJ Studio 5 APK from FileHippo<br />
52
- How to uninstall DJ Studio 5 from your device<br />
53
- DJ Studio 5 - Music mixer user feedback and ratings<br />
54
- Download DJ Studio 5 APK from Softonic<br />
55
- How to fix DJ Studio 5 errors and issues<br />
56
- DJ Studio 5 - Free music mixer tutorial and guide<br />
57
- Download DJ Studio 5 APK from Google Play Store<br />
58
- How to customize DJ Studio 5 settings and preferences<br />
59
- DJ Studio 5 - Music mixer FAQs and answers<br />
60
- Download DJ Studio 5 APK from APKPure<br />
61
- How to backup and restore DJ Studio 5 data<br />
62
- DJ Studio 5 - Free music mixer pros and cons<br />
63
- Download DJ Studio 5 APK from APKMirror<br />
64
- How to connect DJ Studio 5 to external devices and speakers<br />
65
- DJ Studio 5 - Music mixer best practices and recommendations<br />
66
- Download DJ Studio 5 APK from Uptodown<br />
67
- How to share your DJ Studio 5 mixes on Soundcloud<br />
68
- DJ Studio 5 - Free music mixer supported formats and devices<br />
69
- Download DJ Studio 5 APK from Aptoide<br />
70
- How to record your DJ Studio 5 sessions and save them as MP3 files<br />
71
- DJ Studio 5 - Music mixer compatible headphones and controllers</p>
72
- <h3>Step 3: Install DJ Studio 5 APK File</h3>
73
- <p>Once you have downloaded the DJ Studio 5 APK file, you need to locate it on your device and tap on it to start the installation process. You may see a warning message that says "This type of file can harm your device. Do you want to keep DJStudio5.apk anyway?". Just tap on "OK" and proceed. Then, you will see another message that says "Do you want to install this application? It does not require any special access". Tap on "Install" and wait for the installation to finish. You may also see a message that says "App installed". Tap on "Open" to launch the app or "Done" to exit.</p>
74
- <h2>How to Use DJ Studio 5 APK to Mix Music?</h2>
75
- <p>Now that you have installed DJ Studio 5 APK on your Android device, you are ready to mix music like a DJ. Here are some tips on how to use the app:</p>
76
- <h3>Choose Your Decks and Skins</h3>
77
- <p>When you open the app, you will see two virtual turntables with a cross fader in between. You can swipe left or right to switch between different decks and skins. You can also tap on the menu icon at the top left corner and select "Decks & Skins" to customize your decks with up to 7 skins. You can choose from classic, gold, neon, metal, diamond, platinum, or wood skins.</p>
78
- <h3>Load Your Music and Adjust the Settings</h3>
79
- <p>To load your music, tap on the music icon at the top right corner and browse your mp3 music library by folder, artist, album, or name. You can also search for a specific song using the search bar. To load a song onto a deck, just drag and drop it onto the turntable. You can also edit and re-order your playlist by tapping on the playlist icon at the bottom right corner.</p>
80
- <p>To adjust the settings, tap on the gear icon at the top right corner and select "Settings". Here you can change various options such as sound quality, pitch range, cue mode, auto sync mode, sound effects, equalizer, sample pads, loop mode, pre-cueing mode, and more. You can also access the help section and rate the app from here.</p>
81
- <h3>Mix, Scratch, Loop, and Pitch Your Music</h3>
82
- <p>To mix your music, use the cross fader to blend the sounds from both decks. You can also use the volume sliders to adjust the volume of each deck individually. To scratch your music, swipe your finger on the turntable as if you were using a real vinyl record. To loop your music, tap on the loop icon at the bottom left corner and select a loop length from 1/32 to 32 beats. To pitch your music, use the pitch slider at the bottom of each deck to change the speed and tone of the music.</p>
83
- <p>To add some sound effects to your mix, tap on the FX icon at the bottom left corner and select one of the 8 sound effects: Flanger, Phaser, Gate, Reverb, Bit crusher, 3D, Brake, or FlippingDouble. You can also adjust the intensity of each effect by using the knob below it. To add some samples to your mix, tap on the pad icon at the bottom right corner and select one of the 10 customizable sample pads. You can also record your own samples by tapping and holding on an empty pad.</p>
84
- <p>To record your mix live, tap on the record icon at the top right corner and select "Record". The app will start recording your mix as an mp3 file in your device's storage. To stop recording, tap on the record icon again and select "Stop". You can also listen to your recorded mixes by tapping on the record icon and selecting "My recordings". To share your mixes with others, tap on the share icon at the top right corner and select one of the available options: SoundCloud, Facebook, Twitter, Google+, or Email.</p>
85
- <h2>Conclusion</h2>
86
- <p>DJ Studio 5 APK is a free music mixer app that lets you mix, remix, scratch, loop, or pitch your music on your Android device. It has a lot of features and options to customize your decks and your mix. It is also social and responsive, allowing you to record and share your mixes with others. DJ Studio 5 APK is a great app for music lovers and aspiring DJs who want to have fun and create their own beats.</p>
87
- <h2>FAQs</h2>
88
- <p>Here are some frequently asked questions about DJ Studio 5 APK:</p>
89
- <ul>
90
- <li><b>Q: Is DJ Studio 5 APK safe to download and use?</b></li>
91
- <li>A: Yes, DJ Studio 5 APK is safe to download and use. It does not contain any viruses, malware, or trackers. It also does not require any special access or permissions on your device.</li>
92
- <li><b>Q: How can I update DJ Studio 5 APK to the latest version?</b></li>
93
- <li>A: You can update DJ Studio 5 APK by downloading and installing the latest version of the app from the same source that you downloaded it from. You can also check for updates by tapping on the menu icon at the top left corner and selecting "Check for updates".</li>
94
- <li><b>Q: How can I support the developers of DJ Studio 5 APK?</b></li>
95
- <li>A: You can support the developers of DJ Studio 5 APK by rating and reviewing the app on the source that you downloaded it from. You can also purchase some optional paid skins to enhance your decks and support their work.</li>
96
- <li><b>Q: How can I contact the developers of DJ Studio 5 APK?</b></li>
97
- <li>A: You can contact the developers of DJ Studio 5 APK by sending them an email at [email protected]. You can also visit their website at www.beatronik.com or follow them on Facebook at www.facebook.com/beatronik.</li>
98
- <li><b>Q: How can I learn more about DJ Studio 5 APK?</b></li>
99
- <li>A: You can learn more about DJ Studio 5 APK by reading the help section in the app. To access it, tap on the gear icon at the top right corner and select "Help". You can also watch some tutorial videos on YouTube by searching for "DJ Studio 5 tutorial".</li>
100
- </ul></p> 197e85843d<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre Clash Mini APK el juego de batallas automticas en tiempo real con los personajes de Clash.md DELETED
@@ -1,131 +0,0 @@
1
- <br />
2
- <h1>Clash Mini APK Ultima Version 2022: A Fun and Strategy-Packed Board Game</h1>
3
- <p>If you are a fan of the Clash Universe, you will love Clash Mini, a new game from Supercell that combines fun, strategy, and board game elements. In this game, you will collect, summon, and upgrade your army of Minis, which are cute versions of your favorite Clash characters. You will also duel and Rumble with other players in real-time auto battles, where you will have to predict your opponent's moves and assemble your winning strategy and formation. Clash Mini is a game of choices, where every decision matters.</p>
4
- <p>In this article, we will show you how to download and install Clash Mini APK Ultima Version 2022 on your Android device, how to play Clash Mini and win battles, what's new in Clash Mini APK Ultima Version 2022, and the pros and cons of this game. We will also answer some frequently asked questions about Clash Mini APK Ultima Version 2022. So, let's get started!</p>
5
- <h2>clash mini apk ultima version 2022</h2><br /><p><b><b>Download Zip</b> &raquo;&raquo;&raquo; <a href="https://urlin.us/2uSXo8">https://urlin.us/2uSXo8</a></b></p><br /><br />
6
- <h2>How to Download and Install Clash Mini APK on Your Android Device</h2>
7
- <p>Clash Mini is not yet available on Google Play Store, but you can download it from the official website of Clash Mini or use one of the links below:</p>
8
- <ul>
9
- <li><a href="(^1^)">Clash Mini APK (Android Game) - Free Download - APKCombo</a></li>
10
- <li><a href="(^2^)">Descargar Clash Mini APK - Última Versión 2023 - APKCombo</a></li>
11
- </ul>
12
- <p>Here are the steps to download and install Clash Mini APK on your Android device:</p>
13
- <ol>
14
- <li>Go to the official website of Clash Mini or use one of the links above.</li>
15
- <li>Tap on the download button and wait for the APK file to be downloaded.</li>
16
- <li>Enable unknown sources in your device settings if you haven't done so already. This will allow you to install apps from sources other than Google Play Store.</li>
17
- <li>Locate the downloaded APK file and tap on it to start the installation process.</li>
18
- <li>Follow the on-screen instructions and grant the necessary permissions to the app.</li>
19
- <li>Launch the app and enjoy playing Clash Mini on your Android device.</li>
20
- </ol>
21
- <h2>How to Play Clash Mini and Win Battles</h2>
22
- <p>Clash Mini is a game of choices, where you will have to make smart decisions before and during each battle. <h2>How to Play Clash Mini and Win Battles</h2>
23
- <p>Clash Mini is a game of choices, where you will have to make smart decisions before and during each battle. Here are some tips and tricks to help you play Clash Mini and win battles:</p>
24
- <ul>
25
- <li><b>Choosing the Right Characters:</b> Not all characters are created equal. Each character has its strengths and weaknesses, as well as a special ability that can be activated once per battle. You will have to choose your characters wisely, based on their roles, abilities, and synergies. For example, some characters are good at dealing damage, some are good at tanking damage, some are good at healing or buffing allies, and some are good at disrupting or debuffing enemies. You will also have to consider the cost of each character, as you will have a limited amount of gold to spend on each round.</li>
26
- <li><b>Positioning on the Battlefield:</b> The positioning of your characters on the battlefield plays a crucial role in determining the outcome of the game. You will have to place your characters strategically, based on their range, direction, and area of effect. For example, some characters can attack from a distance, some can attack in a straight line, some can attack in a cone shape, and some can attack in an area around them. You will also have to consider the terrain of the board, as some tiles can provide bonuses or penalties to your characters. For example, some tiles can increase or decrease the damage or health of your characters, some tiles can block or allow movement, and some tiles can trigger special effects.</li>
27
- <li><b>Utilizing Special Abilities:</b> Each character in Clash Mini has a special ability that can be activated once per battle. These abilities can make a huge difference in the game, as they can provide powerful effects such as healing, shielding, stunning, freezing, burning, or summoning. You will have to use your special abilities wisely, based on the situation and timing of the battle. For example, some abilities are best used at the beginning of the battle, some are best used in the middle of the battle, and some are best used at the end of the battle. You will also have to consider the cooldown and duration of each ability, as well as the interaction with other abilities.</li>
28
- <li><b>Participating in Duels and Rumbles:</b> Clash Mini offers two modes of play: Duels and Rumbles. In Duels, you will face one opponent at a time in a best-of-three match. In Rumbles, you will face seven opponents at once in a free-for-all match. Both modes offer different challenges and rewards. In Duels, you will have to adapt to your opponent's strategy and formation, as well as use your gold efficiently. In Rumbles, you will have to survive against multiple enemies and use your abilities effectively. Both modes offer trophies that can increase your league ranking and unlock new rewards.</li>
29
- </ul>
30
- <h2>What's New in Clash Mini APK Ultima Version 2022</h2>
31
- <p>Clash Mini APK Ultima Version 2022 is the latest update of Clash Mini that includes new features, improvements, and bug fixes. Here are some of the highlights of Clash Mini APK Ultima Version 2022:</p>
32
- <ul>
33
- <li><b>New Minis:</b> Clash Mini APK Ultima Version 2022 introduces four new Minis to the game: Lumberjack, Ice Wizard, Electro Dragon, and Princess. Each new Mini has a unique ability and role that can add more variety and fun to your battles.</li>
34
- <li><b>New Boards:</b> Clash Mini APK Ultima Version 2022 adds two new boards to the game: Frozen Peak and Electro Valley. Each new board has a different theme and terrain that can affect your strategy and formation.</li>
35
- <li><b>New Skins:</b> Clash Mini APK Ultima Version 2022 brings new skins for your Heroes and Minis that can customize their appearance and style. You can unlock new skins by completing quests or purchasing them with gems.</li>
36
- <li><b>New Features:</b> Clash Mini APK Ultima Version 2022 also adds new features such as chat system, clan system, replay system, leaderboard system, achievement system, and more. These features can enhance your social and competitive experience in Clash Mini.</li>
37
- <li><b>Bug Fixes and Performance Improvements:</b> Clash Mini APK Ultima Version 2022 also fixes some bugs and improves the performance of the game. These changes can make your gameplay smoother and more enjoyable.</li>
38
- </ul>
39
- <h2>Pros and Cons of Clash Mini APK Ultima Version 2022</h2>
40
- <p>Clash Mini APK Ultima Version 2022 is a fun and strategy-packed board game that offers many advantages and disadvantages. Here is a table comparing the pros and <h2>Pros and Cons of Clash Mini APK Ultima Version 2022</h2>
41
- <p>Clash Mini APK Ultima Version 2022 is a fun and strategy-packed board game that offers many advantages and disadvantages. Here is a table comparing the pros and cons of Clash Mini APK Ultima Version 2022:</p>
42
- <p>clash mini apk download latest version 2022<br />
43
- clash mini apk mod ultima version 2022<br />
44
- clash mini apk android game free download<br />
45
- clash mini apk ultima version 2022 para pc<br />
46
- clash mini apk ultima version 2022 sin conexion<br />
47
- clash mini apk ultima version 2022 actualizada<br />
48
- clash mini apk ultima version 2022 mega<br />
49
- clash mini apk ultima version 2022 mediafire<br />
50
- clash mini apk ultima version 2022 hackeada<br />
51
- clash mini apk ultima version 2022 full<br />
52
- clash mini apk ultima version 2022 gratis<br />
53
- clash mini apk ultima version 2022 premium<br />
54
- clash mini apk ultima version 2022 pro<br />
55
- clash mini apk ultima version 2022 unlocked<br />
56
- clash mini apk ultima version 2022 unlimited<br />
57
- clash mini apk ultima version 2022 offline<br />
58
- clash mini apk ultima version 2022 online<br />
59
- clash mini apk ultima version 2022 no root<br />
60
- clash mini apk ultima version 2022 sin publicidad<br />
61
- clash mini apk ultima version 2022 con todo desbloqueado<br />
62
- clash mini apk ultima version 2022 con gemas infinitas<br />
63
- clash mini apk ultima version 2022 con monedas ilimitadas<br />
64
- clash mini apk ultima version 2022 con todos los personajes<br />
65
- clash mini apk ultima version 2022 con todos los niveles<br />
66
- clash mini apk ultima version 2022 con todos los modos de juego<br />
67
- clash mini apk ultima version 2022 con graficos mejorados<br />
68
- clash mini apk ultima version 2022 con sonido optimizado<br />
69
- clash mini apk ultima version 2022 con controles personalizados<br />
70
- clash mini apk ultima version 2022 con soporte para gamepad<br />
71
- clash mini apk ultima version 2022 con compatibilidad para android tv y tablet<br />
72
- clash mini apk ultima version 2022 review y gameplay<br />
73
- clash mini apk ultima version 2022 tutorial y guia<br />
74
- clash mini apk ultima version 2022 trucos y consejos<br />
75
- clash mini apk ultima version 2022 codigos y regalos<br />
76
- clash mini apk ultima version 2022 noticias y novedades<br />
77
- clash mini apk ultima version 2022 requisitos y especificaciones<br />
78
- clash mini apk ultima version 2022 instalacion y configuracion<br />
79
- clash mini apk ultima version 2022 opiniones y valoraciones<br />
80
- clash mini apk ultima version 2022 comparacion y diferencias<br />
81
- clash mini apk ultima version 2022 ventajas y desventajas</p>
82
- <table>
83
- <tr>
84
- <th>Pros</th>
85
- <th>Cons</th>
86
- </tr>
87
- <tr>
88
- <td>- Easy to download and install on your Android device</td>
89
- <td>- Not available on Google Play Store or other platforms</td>
90
- </tr>
91
- <tr>
92
- <td>- Simple and intuitive gameplay with a lot of choices and depth</td>
93
- <td>- Requires internet connection and may consume data or battery</td>
94
- </tr>
95
- <tr>
96
- <td>- Cute and colorful graphics and animations with a Clash Universe theme</td>
97
- <td>- May have some bugs or glitches that affect the performance or experience</td>
98
- </tr>
99
- <tr>
100
- <td>- A variety of characters, boards, skins, and features to unlock and enjoy</td>
101
- <td>- Some items or features may require gems or real money to purchase or access</td>
102
- </tr>
103
- <tr>
104
- <td>- A social and competitive mode with duels, rumbles, chat, clan, leaderboard, and more</td>
105
- <td>- May encounter some toxic or unfair players or situations in the game</td>
106
- </tr>
107
- </table>
108
- <h2>Conclusion and FAQs</h2>
109
- <p>Clash Mini APK Ultima Version 2022 is a fun and strategy-packed board game that you can play on your Android device. It is a game of choices, where you will have to collect, summon, and upgrade your army of Minis, as well as duel and Rumble with other players in real-time auto battles. Clash Mini APK Ultima Version 2022 also offers new Minis, boards, skins, and features that can enhance your gameplay. However, Clash Mini APK Ultima Version 2022 also has some drawbacks, such as not being available on Google Play Store or other platforms, requiring internet connection, having some bugs or glitches, requiring gems or real money for some items or features, and encountering some toxic or unfair players or situations.</p>
110
- <p>If you are interested in playing Clash Mini APK Ultima Version 2022, you can download it from the official website of Clash Mini or use one of the links below:</p>
111
- <ul>
112
- <li><a href="">Clash Mini APK (Android Game) - Free Download - APKCombo</a></li>
113
- <li><a href="">Descargar Clash Mini APK - Última Versión 2023 - APKCombo</a></li>
114
- </ul>
115
- <p>We hope this article has helped you learn more about Clash Mini APK Ultima Version 2022. If you have any questions about Clash Mini APK Ultima Version 2022, you can check out the FAQs below or leave a comment. Thank you for reading!</p>
116
- <h3>FAQs</h3>
117
- <ol>
118
- <li><b>What is Clash Mini?</b></li>
119
- <p>Clash Mini is a new game from Supercell that combines fun, strategy, and board game elements. It is set in the Clash Universe, where you will collect, summon, and upgrade your army of Minis, which are cute versions of your favorite Clash characters. You will also duel and Rumble with other players in real-time auto battles, where you will have to predict your opponent's moves and assemble your winning strategy and formation.</p>
120
- <li><b>Is Clash Mini free to play?</b></li>
121
- <p>Yes, Clash Mini is free to play. You can download and install it on your Android device without paying anything. However, some items or features in the game may require gems or real money to purchase or access. You can earn gems by completing quests or watching ads, or you can buy them with real money. You can also disable in-app purchases in your device settings if you don't want to spend any money on the game.</p>
122
- <li><b>Is Clash Mini safe to download and install?</b></li>
123
- <p>Yes, Clash Mini is safe to download and install. It is developed by Supercell, a reputable game company that has created other popular games such as Clash of Clans, Clash Royale, Brawl Stars, and Hay Day. The APK file of Clash Mini is also scanned for viruses and malware before being uploaded to the official website of Clash Mini or other sources. However, you should always be careful when downloading and installing apps from unknown sources. You should only download and install apps from trusted sources such as the official website of the app developer or Google Play Store.</p>
124
- <li><b>How can I update Clash Mini to the latest version?</b></li>
125
- <p>You can update Clash Mini to the latest version by following the same steps as downloading and installing it. You will <li><b>How can I update Clash Mini to the latest version?</b></li>
126
- <p>You can update Clash Mini to the latest version by following the same steps as downloading and installing it. You will have to go to the official website of Clash Mini or use one of the links above, and download the latest APK file of Clash Mini. Then, you will have to install it on your device, replacing the old version. You may also receive a notification in the game when a new update is available, and you can tap on it to update the game.</p>
127
- <li><b>Can I play Clash Mini with my friends?</b></li>
128
- <p>Yes, you can play Clash Mini with your friends. You can join or create a clan with your friends, and chat, share, and battle with them. You can also invite your friends to join your Rumble or Duel, and compete with them or against them. You can also add your friends as contacts in the game, and see their online status, profile, and trophies.</p>
129
- </ol></p> 197e85843d<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Video TikTok Without Watermark - Fast Easy and Free - Online TikTok Video Download.md DELETED
@@ -1,144 +0,0 @@
1
- <br />
2
- <h1>How to Download Online from TikTok</h1>
3
- <p>TikTok is one of the most popular social media apps in the world, with over 1 billion active users. It allows users to create and share short videos with music and effects, covering various topics such as comedy, education, beauty, sports, and more. But what if you want to download online from TikTok and save your favorite videos for offline viewing or sharing? In this article, we will show you how to download online from TikTok with or without a watermark, as well as some of the benefits of doing so.</p>
4
- <h2>What Is TikTok and Why Download Videos from It?</h2>
5
- <h3>TikTok is a popular social media app that allows users to create and share short videos with music and effects.</h3>
6
- <p>TikTok was launched in 2016 as a global version of Douyin, a Chinese video-sharing app. It has since grown into one of the most downloaded apps in the world, surpassing Facebook, Instagram, YouTube, and Snapchat. Users can create videos up to 60 seconds long using various filters, stickers, transitions, and soundtracks. They can also browse through millions of videos uploaded by other users in different categories such as For You, Following, Trending, Discover, etc.</p>
7
- <h2>download online from tiktok</h2><br /><p><b><b>Download</b> &#128505; <a href="https://jinyurl.com/2uNJjo">https://jinyurl.com/2uNJjo</a></b></p><br /><br />
8
- <h3>Downloading videos from TikTok can help you save your favorite content, share it with others, or use it for other purposes.</h3>
9
- <p>There are many reasons why you might want to download online from TikTok and save the videos on your device. For example, you might want to:</p>
10
- <ul>
11
- <li>Watch your favorite videos offline without internet connection or data usage.</li>
12
- <li>Edit your downloaded videos with other apps or software and create new content.</li>
13
- <li>Share your downloaded videos with your friends or family on other platforms or channels, such as WhatsApp, Facebook, Instagram, YouTube, etc.</li>
14
- <li>Use your downloaded videos for educational, personal, or professional purposes, such as learning new skills, making presentations, creating portfolios, etc.</li>
15
- </ul>
16
- <p>Downloading online from TikTok can also help you avoid losing your favorite videos if they are deleted by the creator or the platform for some reason. You can always have a backup of your favorite content on your device.</p>
17
- <p>How to download TikTok videos without watermark<br />
18
- TikTok video downloader no logo online<br />
19
- Save TikTok videos in HD quality MP4 format<br />
20
- Download TikTok videos on mobile phone or PC<br />
21
- TikTok video download without watermark app<br />
22
- TikTok downloader without watermark - ssstik.io<br />
23
- TikTok downloader - SnapTik.App<br />
24
- Download video tiktok without a watermark for free<br />
25
- TikTok video download without watermark - SnapTik<br />
26
- How to get the TikTok video download link<br />
27
- Download TikTok videos (Musically) without logo online<br />
28
- Save non watermarked TikTok videos<br />
29
- Remove watermark from TikTok videos<br />
30
- Download TikTok videos with no trademark<br />
31
- TikTok video download at high speed<br />
32
- Save TikTok video without watermark in mp4 or mp3 online<br />
33
- TikTok downloader works in every browser and operating system<br />
34
- Download TikTok video on mobile phone using TT app<br />
35
- Save TikTok without watermark on PC, laptop, Mac, or Linux<br />
36
- Download by using your browsers - no need to install any software<br />
37
- Download TikTok videos unlimited - no limits or any other restrictions<br />
38
- Download SnapTik Android App for downloading TikTok videos<br />
39
- How to use the TikTok video downloader without watermark app<br />
40
- Download TikTok photo slide show as Mp4 Video format<br />
41
- Download each image in the slide show to your computer right away<br />
42
- How to download video tiktok no watermark using SnapTik.App<br />
43
- How to save TikTok videos or remove TikTok watermark on Android phones<br />
44
- How to download TikTok without watermark using sssTik.io<br />
45
- How to save non watermarked TikTok videos on iPhone or iPad<br />
46
- How to download TikTok videos on Windows 10, 8, 7, XP, Vista, or Mac OS X<br />
47
- How to download TikTok videos with sound or music<br />
48
- How to download private or live TikTok videos without watermark<br />
49
- How to download multiple or bulk TikTok videos at once<br />
50
- How to download TikTok videos by username or hashtag<br />
51
- How to download trending or viral TikTok videos without watermark<br />
52
- How to edit or trim downloaded TikTok videos without watermark<br />
53
- How to convert downloaded TikTok videos to GIFs or memes<br />
54
- How to upload downloaded TikTok videos to Instagram, YouTube, Facebook, or Twitter<br />
55
- How to watch downloaded TikTok videos offline or without internet connection<br />
56
- How to download high-quality or 4K resolution TikTok videos without watermark</p>
57
- <h2>How to Download TikTok Videos with the Watermark</h2>
58
- <h3>The easiest way to download TikTok videos is to use the built-in save option in the app or the website.</h3>
59
- <p>If you want to download online from TikTok in a simple and quick way, you can use the save option that is available in the app or the website. Here are the steps to follow:</p>
60
- <ol>
61
- <li>Open the TikTok app or website and find the video that you want to download.</li>
62
- <li>Tap on the share icon at the bottom right corner of the video screen.</li>
63
- <li>Select Save video from the list of options that appear.</li>
64
- <li>Wait for the video to be downloaded and saved on your device.</li>
65
- </ol>
66
- <p>You can find your downloaded videos in your device's gallery or camera roll. You can also access them from the app by tapping on Me > Saved videos.</p>
67
- <h3>However, this method will leave a watermark with the TikTok logo and the creator's name on the video.</h3>
68
- <p>One drawback of using the save option is that it will leave a watermark on the downloaded video. The watermark will show the TikTok logo and the username of the creator at the top left corner of the video. This can be annoying or distracting for some users who want to enjoy or use the video without any logo. It can also affect the quality or appearance of the video. If you want to download online from TikTok without a watermark, you need to use other methods that we will discuss in the next section.</p>
69
- <h2>How to Download TikTok Videos without the Watermark</h2>
70
- <h3>To remove the watermark from TikTok videos, you need to use third-party tools that can download videos without the logo.</h3>
71
- <p>Fortunately, there are many tools available online that can help you download online from TikTok without a watermark. These tools are usually websites or apps that allow you to paste the link of the TikTok video and download it in MP4 or MP3 format without any logo. Some of these tools also offer other features such as downloading multiple videos at once, choosing different resolutions or qualities, cropping or trimming the video, etc.</p>
72
- <h3>Some of the best tools for downloading online from TikTok are SSSTik.io, SnapTik.App, TTVDL, and TikFast.</h3>
73
- <p>We have tested and reviewed some of the most popular and reliable tools for downloading online from TikTok without a watermark. Here are our top picks and how to use them:</p>
74
- <h4>SSSTik.io</h4>
75
- <h5>A free tool that helps you download TikTok videos without logo online in MP4 or MP3 format.</h5>
76
- <p>To use SSSTik.io, follow these steps:</p>
77
- <ol>
78
- <li>Open SSSTik.io website on your browser.</li>
79
- <li>Copy and paste the link of the TikTok video that you want to download in the text field on the website and tap on the save button.</li>
80
- <li>Choose the format that you want to download, either MP4 or MP3.</li>
81
- <li>Tap on the download button and wait for the video to be downloaded and saved on your device.</li>
82
- </ol>
83
- <p>You can also use SSSTik.io to download TikTok videos by adding "sss" before "tiktok.com" in the video link. For example, if the video link is https://www.tiktok.com/@user/video/123456789, you can change it to https://www.ssstiktok.com/@user/video/123456789 and paste it in the text field on the website.</p>
84
- <h4>SnapTik.App</h4>
85
- <h5>One of the best TikTok downloaders available online that allows you to download video tiktok without a watermark.</h5>
86
- <p>To use SnapTik.App, follow these steps:</p>
87
- <ol>
88
- <li>Open SnapTik.App website on your browser.</li>
89
- <li>Copy and paste the link of the TikTok video that you want to download in the input field on the website and click on the download button.</li>
90
- <li>Choose the quality that you want to download, either high quality or low quality.</li>
91
- <li>Click on the download button and wait for the video to be downloaded and saved on your device.</li>
92
- </ol>
93
- <p>You can also use SnapTik.App to download TikTok videos by adding "snaptik" before "app" in the video link. For example, if the video link is https://www.tiktok.com/@user/video/123456789, you can change it to https://www.snaptik.app/@user/video/123456789 and paste it in the input field on the website.</p>
94
- <h4>TTVDL</h4>
95
- <h5>A TikTok video downloader that downloads TikTok MP4 videos without a watermark or logo.</h5>
96
- <p>To use TTVDL, follow these steps:</p>
97
- <ol>
98
- <li>Open TTVDL website on your browser.</li>
99
- <li>Copy and paste the link of the TikTok video that you want to download in the input field on the website and hit enter or click download button.</li>
100
- <li>Choose the resolution that you want to download, either 720p or 360p.</li>
101
- <li>Click on the download button and wait for the video to be downloaded and saved on your device.</li>
102
- </ol>
103
- <h4>TikFast</h4>
104
- <h5>A tool that allows you to download Tik-Tok videos from tiktok.com in high quality without any trademark.</h5>
105
- <p>To use TikFast, follow these steps:</p>
106
- <ol>
107
- <li>Open TikFast website on your browser.</li>
108
- <li>Copy and paste the link of the TikTok video that you want to download in the input field on the website and hit enter or click download button.</li>
109
- <li>Choose the quality that you want to download, either original or compressed.</li>
110
- <li>Click on the download button and wait for the video to be downloaded and saved on your device.</li>
111
- </ol>
112
- <h2>Benefits of Downloading Online from TikTok</h2>
113
- <h3>Downloading online from TikTok can offer you many benefits, such as:</h3>
114
- <ul>
115
- <li><h4>Saving your favorite content for offline viewing or editing.</h4>
116
- <p>By downloading online from TikTok, you can save your favorite videos on your device and watch them anytime, anywhere, without internet connection or data usage. You can also edit your downloaded videos with other apps or software and create new content according to your preferences.</p></li>
117
- <li><h4>Sharing your downloaded videos with others on different platforms or channels.</h4>
118
- <p>By downloading online from TikTok, you can share your favorite videos with your friends or family on other platforms or channels, such as WhatsApp, Facebook, Instagram, YouTube, etc. You can also use your downloaded videos for educational, personal, or professional purposes, such as making presentations, creating portfolios, etc.</p></li>
119
- <li><h4>Learning new skills, recipes, dances, or trends from TikTok videos.</h4>
120
- <p>By downloading online from TikTok, you can learn new skills, recipes, dances, or trends from TikTok videos. You can follow the instructions or tips from the creators and improve your knowledge or abilities. You can also practice or perform the skills, recipes, dances, or trends that you learned from TikTok videos.</p></li>
121
- <li><h4>Enjoying funny, creative, and entertaining content from TikTok creators.</h4>
122
- <p>By downloading online from TikTok, you can enjoy funny, creative, and entertaining content from TikTok creators. You can watch the videos that make you laugh, inspire you, amaze you, or touch you. You can also appreciate the talent and effort of the creators and support them by liking or commenting on their videos.</p></li>
123
- </ul>
124
- <h2>Conclusion</h2>
125
- <h3>TikTok is a great app for creating and watching short videos with music and effects.</h3>
126
- <p>TikTok is one of the most popular social media apps in the world that allows users to create and share short videos with music and effects. It covers various topics such as comedy, education, beauty, sports, and more. It has over 1 billion active users who upload millions of videos every day.</p>
127
- <h3>If you want to download online from TikTok, you can use the built-in save option or third-party tools that can remove the watermark from the videos.</h3>
128
- <p>If you want to download online from TikTok and save your favorite videos for offline viewing or sharing, you can use the save option that is available in the app or the website. However, this method will leave a watermark with the TikTok logo and the creator's name on the video. If you want to remove the watermark from TikTok videos, you need to use third-party tools that can download videos without the logo. Some of the best tools for downloading online from TikTok are SSSTik.io, SnapTik.App, TTVDL, and TikFast. These tools are easy to use and can download TikTok videos in high quality without any trademark.</p>
129
- <h3>Downloading online from TikTok can help you save, share, or use your favorite content for various purposes.</h3>
130
- <p>Downloading online from TikTok can offer you many benefits, such as saving your favorite content for offline viewing or editing, sharing your downloaded videos with others on different platforms or channels, learning new skills, recipes, dances, or trends from TikTok videos, and enjoying funny, creative, and entertaining content from TikTok creators. You can also avoid losing your favorite videos if they are deleted by the creator or the platform for some reason.</p>
131
- <p>We hope this article has helped you learn how to download online from TikTok with or without a watermark. If you have any questions or feedback, please feel free to leave a comment below. Happy downloading!</p>
132
- <h2>FAQs</h2>
133
- <h3>Q: Is it legal to download online from TikTok?</h3>
134
- <p>A: It depends on the terms and conditions of the app and the website, as well as the copyright laws of your country. Generally, it is legal to download online from TikTok for personal use only, as long as you do not violate the rights of the creators or the platform. However, it is illegal to download online from TikTok for commercial use or distribution without the permission of the creators or the platform.</p>
135
- <h3>Q: How can I download online from TikTok on my iPhone or iPad?</h3>
136
- <p>A: You can use the same methods that we have mentioned in this article to download online from TikTok on your iPhone or iPad. However, you might need to install a file manager app such as Documents by Readdle or Files by Apple to access and manage your downloaded videos on your device.</p>
137
- <h3>Q: How can I download online from TikTok on my Android phone or tablet?</h3>
138
- <p>A: You can use the same methods that we have mentioned in this article to download online from TikTok on your Android phone or tablet. However, you might need to enable unknown sources in your device settings to install some of the third-party apps that we have recommended.</p>
139
- <h3>Q: How can I download online from TikTok on my PC or Mac?</h3>
140
- <p>A: You can use the same methods that we have mentioned in this article to download online from TikTok on your PC or Mac. However, you might need to install a video player app such as VLC Media Player or QuickTime Player to play your downloaded videos on your computer.</p>
141
- <h3>Q: How can I download online from TikTok with sound?</h3>
142
- <p>A: You can use any of the methods that we have mentioned in this article to download online from TikTok with sound. However, some of the tools might offer you an option to download only the video without sound or only the sound without video. In that case, you need to choose the option that includes both video and sound.</p> 401be4b1e0<br />
143
- <br />
144
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download the Word Game that Keeps You on Your Toes Word Blitz.md DELETED
@@ -1,106 +0,0 @@
1
- <br />
2
- <h1>How to Download the Best Word Games for Android and iOS</h1>
3
- <p>Do you love playing with words and letters? Do you want to improve your vocabulary, spelling, memory, focus, and brain health? If you answered yes to these questions, then you should try playing word games on your mobile device. Word games are fun and challenging puzzles that involve forming, finding, or guessing words according to certain rules. They can also help you relax, unwind, and learn something new every day.</p>
4
- <h2>download the word game</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://jinyurl.com/2uNKW0">https://jinyurl.com/2uNKW0</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download word games for your Android or iOS device. We will also recommend some of the best word games that you can play on your phone or tablet. Whether you prefer crossword puzzles, anagrams, word searches, or picture clues, there is a word game for everyone. So, let's get started!</p>
6
- <h2>What are word games and why should you play them?</h2>
7
- <h3>Word games are fun and challenging puzzles that involve words and letters</h3>
8
- <p>Word games are a type of puzzle game that requires you to use your language skills to solve them. There are many kinds of word games, such as letter arrangement games, paper and pencil games, semantic games, modern word games, and more. Some examples of word games are Scrabble, Boggle, Hangman, Crosswords, Wordament, etc.</p>
9
- <p>Word games can be played alone or with others, online or offline, on a board or on a screen. They can also vary in difficulty, theme, genre, and style. Some word games are based on logic, some on creativity, some on trivia, some on humor, and some on strategy. No matter what kind of word game you choose, you will always have a good time playing it.</p>
10
- <h3>Word games can improve your vocabulary, spelling, memory, focus, and brain health</h3>
11
- <p>Playing word games is not only fun but also beneficial for your mind and body. Word games can help you improve your vocabulary by exposing you to new words and their meanings. They can also help you improve your spelling by making you pay attention to the correct order of letters. They can also help you improve your memory by making you recall words that you have learned before. They can also help you improve your focus by making you concentrate on finding or forming words within a limited time or space.</p>
12
- <p>Moreover, playing word games can boost your brain health by stimulating your cognitive abilities. Research has shown that playing word games can reduce the risk of dementia , enhance your verbal skills , increase your creativity , and release dopamine , which is a neurotransmitter that makes you feel good. Playing word games can also relieve stress , improve your social skills , enhance your concentration , and increase your confidence .</p>
13
- <p>download the best word game for free<br />
14
- how to download the word game on your device<br />
15
- download the word game and challenge your friends<br />
16
- download the word game with the most levels<br />
17
- download the word game that improves your vocabulary<br />
18
- download the word game that is fun and educational<br />
19
- download the word game that works offline<br />
20
- download the word game that has no ads<br />
21
- download the word game that supports multiple languages<br />
22
- download the word game that is easy to play<br />
23
- download the word game that is addictive and engaging<br />
24
- download the word game that is suitable for all ages<br />
25
- download the word game that has a leaderboard and achievements<br />
26
- download the word game that has daily puzzles and rewards<br />
27
- download the word game that has a variety of modes and themes<br />
28
- download the word game that is updated regularly<br />
29
- download the word game that has a user-friendly interface<br />
30
- download the word game that has a high rating and positive reviews<br />
31
- download the word game that is compatible with your device<br />
32
- download the word game that is secure and safe<br />
33
- download the word game that is fast and smooth<br />
34
- download the word game that is developed by a reputable company<br />
35
- download the word game that is featured on the app store<br />
36
- download the word game that is recommended by experts<br />
37
- download the word game that is popular and trending<br />
38
- download the new version of the word game<br />
39
- download the latest update of the word game<br />
40
- download the premium version of the word game<br />
41
- download the pro version of the word game<br />
42
- download the full version of the word game<br />
43
- download the modded version of the word game<br />
44
- download the hacked version of the word game<br />
45
- download the cracked version of the word game<br />
46
- download the unlocked version of the word game<br />
47
- download the cheat codes for the word game<br />
48
- download the tips and tricks for the word game<br />
49
- download the guide and walkthrough for the word game<br />
50
- download the solutions and answers for the word game<br />
51
- download the hints and clues for the word game<br />
52
- download the bonus content for the word game</p>
53
- <h2>How to download word games for your mobile device?</h2>
54
- <h3>Choose a word game that suits your preference and skill level</h3>
55
- <p>The first step to downloading a word game for your mobile device is to choose one that suits your preference and skill level <li>Tap on the app icon and then tap on the install button.</li>
56
- <li>Wait for the app to download and install on your device.</li>
57
- <li>Tap on the open button or find the app icon on your home screen and tap on it.</li>
58
- </ol>
59
- <p>To install an app from the web , you should follow these steps:</p>
60
- <ol>
61
- <li>Open the web browser on your device.</li>
62
- <li>Go to the website of the word game that you want to download.</li>
63
- <li>Look for the download link or button and tap on it.</li>
64
- <li>Wait for the app to download on your device.</li>
65
- <li>Go to your device settings and enable the option to install apps from unknown sources.</li>
66
- <li>Find the downloaded file on your device and tap on it.</li>
67
- <li>Follow the instructions to install and launch the app.</li>
68
- </ol>
69
- <p>To launch an app , you should follow these steps:</p>
70
- <ol>
71
- <li>Find the app icon on your home screen or app drawer and tap on it.</li>
72
- <li>Wait for the app to load and display its main screen.</li>
73
- <li>Follow the instructions or prompts to start playing the word game.</li>
74
- </ol>
75
- <h2>What are some of the best word games for Android and iOS?</h2>
76
- <h3>Wordscapes: A relaxing and addictive crossword puzzle game</h3>
77
- <p>If you love crossword puzzles, you will love Wordscapes. Wordscapes is a word game that combines the best of crossword and word search games. You have to swipe letters to form words that fit into a crossword grid. You can also use hints, shuffles, or coins to help you solve the puzzles. Wordscapes has over 10,000 levels with beautiful backgrounds and themes. You can also play daily puzzles and challenges to earn rewards and bonuses. Wordscapes is a relaxing and addictive word game that will keep you entertained for hours.</p>
78
- <h3>Wordalot: A unique and challenging word game with pictures</h3>
79
- <p>If you love picture clues, you will love Wordalot. Wordalot is a word game that challenges you to find words hidden in pictures. You have to look at the picture carefully and use your imagination and logic to figure out the words. You can also use hints or coins to reveal letters or words. Wordalot has over 1,000 levels with stunning graphics and animations. You can also play with friends and compare your scores and progress. Wordalot is a unique and challenging word game that will test your visual and verbal skills.</p>
80
- <h3>Pictoword: A fun and creative word game that combines two images</h3>
81
- <p>If you love word association, you will love Pictoword. Pictoword is a word game that asks you to guess a word or phrase based on two images. You have to look at the images and think of how they can be combined to form a new word or phrase. For example, if you see a picture of a sand and a witch, you can guess the word "sandwich". You can also use hints or coins to reveal letters or words. Pictoword has over 300 levels with different categories, such as celebrities, movies, brands, etc. You can also play with friends and family in multiplayer mode or create your own puzzles. Pictoword is a fun and creative word game that will make you think outside the box.</p>
82
- <h3>Bonza Word Puzzle: A clever and original word game that mixes crossword and jigsaw</h3>
83
- <p>If you love jigsaw puzzles, you will love Bonza Word Puzzle. Bonza Word Puzzle is a word game that blends crossword and jigsaw puzzles. You have to arrange fragments of words to form a complete crossword puzzle. You can also rotate, move, or zoom in on the fragments to fit them better. Bonza Word Puzzle has hundreds of levels with different themes, such as animals, music, food, etc. You can also play daily puzzles and challenges to earn coins and badges. You can also create your own puzzles and share them with other players. Bonza Word Puzzle is a clever and original word game that will challenge your brain and vocabulary.</p>
84
- <h3>Words with Friends: A popular and social word game that lets you play with friends online</h3>
85
- <p>If you love Scrabble, you will love Words with Friends. Words with Friends is a word game that lets you play with friends online. You have to form words on a board using letter tiles. You can also use special tiles, such as blanks, double letters, triple words, etc., to score more points. Words with Friends has millions of players around the world that you can chat and compete with. You can also play solo games against the computer or join tournaments and events. Words with Friends is a popular and social word game that will connect you with other word lovers.</p>
86
- <h2>Conclusion</h2>
87
- <h3>Word games are a great way to have fun and learn new words on your mobile device</h3>
88
- <p>Word games are one of the most popular and enjoyable genres of games that you can play on your mobile device. They are fun and challenging puzzles that involve words and letters. They can also help you improve your vocabulary, spelling, memory, focus, and brain health. They can also help you relax, unwind, and learn something new every day.</p>
89
- <h3>You can download word games easily from the app store or the web</h3>
90
- <p>Downloading word games for your mobile device is easy and convenient. You can download word games from the app store or the web. You just need to choose a word game that suits your preference and skill level, check its compatibility, ratings, reviews, and permissions, and follow the instructions to install and launch it. You can also update, delete, or reinstall the app anytime you want.</p>
91
- <h3>You can choose from a variety of word games that suit your taste and skill level</h3>
92
- <p>There are hundreds of word games available on the app store or the web, so you have plenty of options to choose from. You can choose from different kinds of word games, such as crossword puzzles, anagrams, word searches, picture clues, etc. You can also choose from different levels of difficulty, themes, genres, and styles. You can also play with friends or strangers online or offline. You can also create your own puzzles or play puzzles created by other players.</p>
93
- <p>Some of the best word games for Android and iOS are Wordscapes, Wordalot, Pictoword, Bonza Word Puzzle, and Words with Friends. These word games are fun, addictive, unique, challenging, and social. They will keep you entertained for hours and make you fall in love with words.</p>
94
- <h2>FAQs</h2>
95
- <h4>Q: How do I download word games for free?</h4>
96
- <p>A: Most word games are free to download from the app store or the web. However, some word games may have in-app purchases or ads that require you to pay money to access certain features or functions. You can also look for word games that offer free trials or discounts.</p>
97
- <h4>Q: How do I play word games offline?</h4>
98
- <p>A: Some word games can be played offline without an internet connection. However, some word games may require an internet connection to access certain features or functions, such as multiplayer modes, daily puzzles, updates, etc. You can check the description or settings of the app to see if it supports offline mode.</p>
99
- <h4>Q: How do I improve my word game skills?</h4>
100
- <p>A: The best way to improve your word game skills is to practice regularly and learn from your mistakes. You can also use hints or coins to help you solve difficult puzzles. You can also read books, magazines, newspapers, or websites to expand your vocabulary and knowledge. You can also play with friends or other players online to learn from their strategies and tips.</p>
101
- <h4>Q: How do I find new word games to play?</h4>
102
- <p>A: The easiest way to find new word games to play is to browse through the app store or the web. You can also search for keywords or categories that interest you. You can also read reviews or recommendations from other users or experts. You can also join online communities or forums that discuss word games.</p>
103
- <h4>Q: How do I create my own word game puzzles?</h4>
104
- <p>A: Some word games allow you to create your own puzzles and share them with other players. You can use your creativity and imagination to come up with interesting words and clues. You can also use online tools or generators to help you create puzzles. You can also edit or customize existing puzzles to make them more challenging or fun.</p> 197e85843d<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/GenerSpeech.py DELETED
@@ -1,123 +0,0 @@
1
- import torch
2
- import os
3
- import importlib
4
- from inference.tts.base_tts_infer import BaseTTSInfer
5
- from utils.ckpt_utils import load_ckpt, get_last_checkpoint
6
- from modules.GenerSpeech.model.generspeech import GenerSpeech
7
- from data_gen.tts.emotion import inference as EmotionEncoder
8
- from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance
9
- from data_gen.tts.emotion.inference import preprocess_wav
10
- from data_gen.tts.data_gen_utils import is_sil_phoneme
11
- from resemblyzer import VoiceEncoder
12
- from utils import audio
13
- class GenerSpeechInfer(BaseTTSInfer):
14
- def build_model(self):
15
- model = GenerSpeech(self.ph_encoder)
16
- model.eval()
17
- load_ckpt(model, self.hparams['work_dir'], 'model')
18
- return model
19
-
20
- def preprocess_input(self, inp):
21
- """
22
- :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
23
- :return:
24
- """
25
- # processed text
26
- preprocessor, preprocess_args = self.preprocessor, self.preprocess_args
27
- text_raw = inp['text']
28
- item_name = inp.get('item_name', '<ITEM_NAME>')
29
- ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph(preprocessor.txt_processor, text_raw, preprocess_args)
30
- ph_token = self.ph_encoder.encode(ph)
31
-
32
- # processed ref audio
33
- ref_audio = inp['ref_audio']
34
- processed_ref_audio = 'example/temp.wav'
35
- voice_encoder = VoiceEncoder().cuda()
36
- encoder = [self.ph_encoder, self.word_encoder]
37
- EmotionEncoder.load_model(self.hparams['emotion_encoder_path'])
38
- binarizer_cls = self.hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer')
39
- pkg = ".".join(binarizer_cls.split(".")[:-1])
40
- cls_name = binarizer_cls.split(".")[-1]
41
- binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
42
-
43
- ref_audio_raw, ref_text_raw = self.asr(ref_audio) # prepare text
44
- ph_ref, txt_ref, word_ref, ph2word_ref, ph_gb_word_ref = preprocessor.txt_to_ph(preprocessor.txt_processor, ref_text_raw, preprocess_args)
45
- ph_gb_word_nosil = ["_".join([p for p in w.split("_") if not is_sil_phoneme(p)]) for w in ph_gb_word_ref.split(" ") if not is_sil_phoneme(w)]
46
- phs_for_align = ['SIL'] + ph_gb_word_nosil + ['SIL']
47
- phs_for_align = " ".join(phs_for_align)
48
-
49
- # prepare files for alignment
50
- os.system('rm -r example/; mkdir example/')
51
- audio.save_wav(ref_audio_raw, processed_ref_audio, self.hparams['audio_sample_rate'])
52
- with open(f'example/temp.lab', 'w') as f_txt:
53
- f_txt.write(phs_for_align)
54
- os.system(f'mfa align example/ {self.hparams["binary_data_dir"]}/mfa_dict.txt {self.hparams["binary_data_dir"]}/mfa_model.zip example/textgrid/ --clean')
55
- item2tgfn = 'example/textgrid/temp.TextGrid' # prepare textgrid alignment
56
-
57
- item = binarizer_cls.process_item(item_name, ph_ref, txt_ref, item2tgfn, processed_ref_audio, 0, 0, encoder, self.hparams['binarization_args'])
58
- item['emo_embed'] = Embed_utterance(preprocess_wav(item['wav_fn']))
59
- item['spk_embed'] = voice_encoder.embed_utterance(item['wav'])
60
-
61
- item.update({
62
- 'ref_ph': item['ph'],
63
- 'ph': ph,
64
- 'ph_token': ph_token,
65
- 'text': txt
66
- })
67
- return item
68
-
69
- def input_to_batch(self, item):
70
- item_names = [item['item_name']]
71
- text = [item['text']]
72
- ph = [item['ph']]
73
-
74
- txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
75
- txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
76
- mels = torch.FloatTensor(item['mel'])[None, :].to(self.device)
77
- f0 = torch.FloatTensor(item['f0'])[None, :].to(self.device)
78
- # uv = torch.FloatTensor(item['uv']).to(self.device)
79
- mel2ph = torch.LongTensor(item['mel2ph'])[None, :].to(self.device)
80
- spk_embed = torch.FloatTensor(item['spk_embed'])[None, :].to(self.device)
81
- emo_embed = torch.FloatTensor(item['emo_embed'])[None, :].to(self.device)
82
-
83
- ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device)
84
- mel2word = torch.LongTensor(item['mel2word'])[None, :].to(self.device)
85
- word_tokens = torch.LongTensor(item['word_tokens'])[None, :].to(self.device)
86
-
87
- batch = {
88
- 'item_name': item_names,
89
- 'text': text,
90
- 'ph': ph,
91
- 'mels': mels,
92
- 'f0': f0,
93
- 'txt_tokens': txt_tokens,
94
- 'txt_lengths': txt_lengths,
95
- 'spk_embed': spk_embed,
96
- 'emo_embed': emo_embed,
97
- 'mel2ph': mel2ph,
98
- 'ph2word': ph2word,
99
- 'mel2word': mel2word,
100
- 'word_tokens': word_tokens,
101
- }
102
- return batch
103
-
104
- def forward_model(self, inp):
105
- sample = self.input_to_batch(inp)
106
- txt_tokens = sample['txt_tokens'] # [B, T_t]
107
- with torch.no_grad():
108
- output = self.model(txt_tokens, ref_mel2ph=sample['mel2ph'], ref_mel2word=sample['mel2word'], ref_mels=sample['mels'],
109
- spk_embed=sample['spk_embed'], emo_embed=sample['emo_embed'], global_steps=300000, infer=True)
110
- mel_out = output['mel_out']
111
- wav_out = self.run_vocoder(mel_out)
112
- wav_out = wav_out.squeeze().cpu().numpy()
113
- return wav_out
114
-
115
-
116
-
117
-
118
- if __name__ == '__main__':
119
- inp = {
120
- 'text': 'here we go',
121
- 'ref_audio': 'assets/0011_001570.wav'
122
- }
123
- GenerSpeechInfer.example_run(inp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual.py DELETED
@@ -1,123 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import sys
5
-
6
- sys.path.insert(0, '.') # nopep8
7
- from ldm.modules.losses_audio.vqperceptual import *
8
-
9
-
10
- class LPAPSWithDiscriminator(nn.Module):
11
- def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
12
- disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
13
- perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
14
- disc_loss="hinge"):
15
-
16
- super().__init__()
17
- assert disc_loss in ["hinge", "vanilla"]
18
- self.kl_weight = kl_weight
19
- self.pixel_weight = pixelloss_weight
20
- self.perceptual_loss = LPAPS().eval()# LPIPS用于日常图像,而LPAPS用于梅尔谱图
21
- self.perceptual_weight = perceptual_weight
22
- # output log variance
23
- self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
24
-
25
- self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
26
- n_layers=disc_num_layers,
27
- use_actnorm=use_actnorm,
28
- ).apply(weights_init)
29
- self.discriminator_iter_start = disc_start
30
- if disc_loss == "hinge":
31
- self.disc_loss = hinge_d_loss
32
- elif disc_loss == "vanilla":
33
- self.disc_loss = vanilla_d_loss
34
- else:
35
- raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
36
- print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
37
- self.disc_factor = disc_factor
38
- self.discriminator_weight = disc_weight
39
- self.disc_conditional = disc_conditional
40
-
41
-
42
- def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
43
- if last_layer is not None:
44
- nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
45
- g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
46
- else:
47
- nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
48
- g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
49
-
50
- d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
51
- d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
52
- d_weight = d_weight * self.discriminator_weight
53
- return d_weight
54
-
55
- def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
56
- global_step, last_layer=None, cond=None, split="train", weights=None):
57
- rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
58
- if self.perceptual_weight > 0:
59
- p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
60
- # print(f"p_loss {p_loss}")
61
- rec_loss = rec_loss + self.perceptual_weight * p_loss
62
- else:
63
- p_loss = torch.tensor([0.0])
64
-
65
- nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
66
- weighted_nll_loss = nll_loss
67
- if weights is not None:
68
- weighted_nll_loss = weights*nll_loss
69
- weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
70
- nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
71
- kl_loss = posteriors.kl()
72
- kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
73
-
74
- # now the GAN part
75
- if optimizer_idx == 0:
76
- # generator update
77
- if cond is None:
78
- assert not self.disc_conditional
79
- logits_fake = self.discriminator(reconstructions.contiguous())
80
- else:
81
- assert self.disc_conditional
82
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
83
- g_loss = -torch.mean(logits_fake)
84
-
85
- try:
86
- d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
87
- except RuntimeError:
88
- assert not self.training
89
- d_weight = torch.tensor(0.0)
90
-
91
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
92
- loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss
93
-
94
- log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
95
- "{}/logvar".format(split): self.logvar.detach(),
96
- "{}/kl_loss".format(split): kl_loss.detach().mean(),
97
- "{}/nll_loss".format(split): nll_loss.detach().mean(),
98
- "{}/rec_loss".format(split): rec_loss.detach().mean(),
99
- "{}/d_weight".format(split): d_weight.detach(),
100
- "{}/disc_factor".format(split): torch.tensor(disc_factor),
101
- "{}/g_loss".format(split): g_loss.detach().mean(),
102
- }
103
- return loss, log
104
-
105
- if optimizer_idx == 1:
106
- # second pass for discriminator update
107
- if cond is None:
108
- logits_real = self.discriminator(inputs.contiguous().detach())
109
- logits_fake = self.discriminator(reconstructions.contiguous().detach())
110
- else:
111
- logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
112
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
113
-
114
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
115
- d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
116
-
117
- log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
118
- "{}/logits_real".format(split): logits_real.detach().mean(),
119
- "{}/logits_fake".format(split): logits_fake.detach().mean()
120
- }
121
- return d_loss, log
122
-
123
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/ldm/modules/midas/api.py DELETED
@@ -1,170 +0,0 @@
1
- # based on https://github.com/isl-org/MiDaS
2
-
3
- import cv2
4
- import torch
5
- import torch.nn as nn
6
- from torchvision.transforms import Compose
7
-
8
- from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
9
- from ldm.modules.midas.midas.midas_net import MidasNet
10
- from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
11
- from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
12
-
13
-
14
- ISL_PATHS = {
15
- "dpt_large": "midas_models/dpt_large-midas-2f21e586.pt",
16
- "dpt_hybrid": "midas_models/dpt_hybrid-midas-501f0c75.pt",
17
- "midas_v21": "",
18
- "midas_v21_small": "",
19
- }
20
-
21
-
22
- def disabled_train(self, mode=True):
23
- """Overwrite model.train with this function to make sure train/eval mode
24
- does not change anymore."""
25
- return self
26
-
27
-
28
- def load_midas_transform(model_type):
29
- # https://github.com/isl-org/MiDaS/blob/master/run.py
30
- # load transform only
31
- if model_type == "dpt_large": # DPT-Large
32
- net_w, net_h = 384, 384
33
- resize_mode = "minimal"
34
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
35
-
36
- elif model_type == "dpt_hybrid": # DPT-Hybrid
37
- net_w, net_h = 384, 384
38
- resize_mode = "minimal"
39
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
40
-
41
- elif model_type == "midas_v21":
42
- net_w, net_h = 384, 384
43
- resize_mode = "upper_bound"
44
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
45
-
46
- elif model_type == "midas_v21_small":
47
- net_w, net_h = 256, 256
48
- resize_mode = "upper_bound"
49
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
50
-
51
- else:
52
- assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
53
-
54
- transform = Compose(
55
- [
56
- Resize(
57
- net_w,
58
- net_h,
59
- resize_target=None,
60
- keep_aspect_ratio=True,
61
- ensure_multiple_of=32,
62
- resize_method=resize_mode,
63
- image_interpolation_method=cv2.INTER_CUBIC,
64
- ),
65
- normalization,
66
- PrepareForNet(),
67
- ]
68
- )
69
-
70
- return transform
71
-
72
-
73
- def load_model(model_type):
74
- # https://github.com/isl-org/MiDaS/blob/master/run.py
75
- # load network
76
- model_path = ISL_PATHS[model_type]
77
- if model_type == "dpt_large": # DPT-Large
78
- model = DPTDepthModel(
79
- path=model_path,
80
- backbone="vitl16_384",
81
- non_negative=True,
82
- )
83
- net_w, net_h = 384, 384
84
- resize_mode = "minimal"
85
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
86
-
87
- elif model_type == "dpt_hybrid": # DPT-Hybrid
88
- model = DPTDepthModel(
89
- path=model_path,
90
- backbone="vitb_rn50_384",
91
- non_negative=True,
92
- )
93
- net_w, net_h = 384, 384
94
- resize_mode = "minimal"
95
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
96
-
97
- elif model_type == "midas_v21":
98
- model = MidasNet(model_path, non_negative=True)
99
- net_w, net_h = 384, 384
100
- resize_mode = "upper_bound"
101
- normalization = NormalizeImage(
102
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
103
- )
104
-
105
- elif model_type == "midas_v21_small":
106
- model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
107
- non_negative=True, blocks={'expand': True})
108
- net_w, net_h = 256, 256
109
- resize_mode = "upper_bound"
110
- normalization = NormalizeImage(
111
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
112
- )
113
-
114
- else:
115
- print(f"model_type '{model_type}' not implemented, use: --model_type large")
116
- assert False
117
-
118
- transform = Compose(
119
- [
120
- Resize(
121
- net_w,
122
- net_h,
123
- resize_target=None,
124
- keep_aspect_ratio=True,
125
- ensure_multiple_of=32,
126
- resize_method=resize_mode,
127
- image_interpolation_method=cv2.INTER_CUBIC,
128
- ),
129
- normalization,
130
- PrepareForNet(),
131
- ]
132
- )
133
-
134
- return model.eval(), transform
135
-
136
-
137
- class MiDaSInference(nn.Module):
138
- MODEL_TYPES_TORCH_HUB = [
139
- "DPT_Large",
140
- "DPT_Hybrid",
141
- "MiDaS_small"
142
- ]
143
- MODEL_TYPES_ISL = [
144
- "dpt_large",
145
- "dpt_hybrid",
146
- "midas_v21",
147
- "midas_v21_small",
148
- ]
149
-
150
- def __init__(self, model_type):
151
- super().__init__()
152
- assert (model_type in self.MODEL_TYPES_ISL)
153
- model, _ = load_model(model_type)
154
- self.model = model
155
- self.model.train = disabled_train
156
-
157
- def forward(self, x):
158
- # x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
159
- # NOTE: we expect that the correct transform has been called during dataloading.
160
- with torch.no_grad():
161
- prediction = self.model(x)
162
- prediction = torch.nn.functional.interpolate(
163
- prediction.unsqueeze(1),
164
- size=x.shape[2:],
165
- mode="bicubic",
166
- align_corners=False,
167
- )
168
- assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
169
- return prediction
170
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/01ST-CSV-Dataset-Analyzer/app.py DELETED
@@ -1,83 +0,0 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import traceback
4
- import sys
5
-
6
- from st_aggrid import AgGrid
7
- from st_aggrid.grid_options_builder import GridOptionsBuilder
8
- from st_aggrid.shared import JsCode
9
- from download import download_button
10
- from st_aggrid import GridUpdateMode, DataReturnMode
11
-
12
- # Page config is set once with icon title and display style. Wide mode since we want screen real estate for wide CSV files
13
- st.set_page_config(page_icon="📝", page_title="📝CSV Data Analyzer📊", layout="wide")
14
-
15
- # Style
16
- def _max_width_():
17
- max_width_str = f"max-width: 1800px;"
18
- st.markdown(
19
- f"""
20
- <style>
21
- .reportview-container .main .block-container{{
22
- {max_width_str}
23
- }}
24
- </style>
25
- """,
26
- unsafe_allow_html=True,
27
- )
28
-
29
- # Title Bar with Images and Icons
30
- col1, col2, col3 = st.columns([1,6,1])
31
- with col1:
32
- st.image("https://cdnb.artstation.com/p/assets/images/images/054/910/875/large/aaron-wacker-cyberpunk-computer-brain-design.jpg?1665656558",width=128,)
33
- with col2:
34
- st.title("📝 CSV Data Analyzer 📊")
35
- with col3:
36
- st.image("https://cdna.artstation.com/p/assets/images/images/054/910/878/large/aaron-wacker-cyberpunk-computer-devices-iot.jpg?1665656564",width=128,)
37
-
38
- # Upload
39
- c29, c30, c31 = st.columns([1, 6, 1])
40
- with c30:
41
- uploaded_file = st.file_uploader("", key="1", help="To activate 'wide mode', go to the menu > Settings > turn on 'wide mode'",)
42
- if uploaded_file is not None:
43
- file_container = st.expander("Check your uploaded .csv")
44
- #try:
45
- shows = pd.read_csv(uploaded_file)
46
- #except:
47
- # print(sys.exc_info()[2])
48
-
49
- uploaded_file.seek(0)
50
- file_container.write(shows)
51
- else:
52
- st.info(f"""⬆️Upload a 📝.CSV file. Examples: [Chatbot](https://huggingface.co/datasets/awacke1/Carddata.csv) [Mindfulness](https://huggingface.co/datasets/awacke1/MindfulStory.csv) [Wikipedia](https://huggingface.co/datasets/awacke1/WikipediaSearch)""")
53
- st.stop()
54
-
55
- # DisplayGrid
56
- gb = GridOptionsBuilder.from_dataframe(shows)
57
- gb.configure_default_column(enablePivot=True, enableValue=True, enableRowGroup=True)
58
- gb.configure_selection(selection_mode="multiple", use_checkbox=True)
59
- gb.configure_side_bar()
60
- gridOptions = gb.build()
61
- st.success(f"""💡 Tip! Hold shift key when selecting rows to select multiple rows at once.""")
62
- response = AgGrid(
63
- shows,
64
- gridOptions=gridOptions,
65
- enable_enterprise_modules=True,
66
- update_mode=GridUpdateMode.MODEL_CHANGED,
67
- data_return_mode=DataReturnMode.FILTERED_AND_SORTED,
68
- fit_columns_on_grid_load=False,
69
- )
70
-
71
- # Filters
72
- df = pd.DataFrame(response["selected_rows"])
73
- st.subheader("Filtered data will appear below 📊 ")
74
- st.text("")
75
- st.table(df)
76
- st.text("")
77
-
78
- # Download
79
- c29, c30, c31 = st.columns([1, 1, 2])
80
- with c29:
81
- CSVButton = download_button(df,"Dataset.csv","Download CSV file",)
82
- with c30:
83
- CSVButton = download_button(df,"Dataset.txt","Download TXT file",)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/Memory/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Memory
3
- emoji: 📚
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Acapellas/vocalinstrumentalremover/app.py DELETED
@@ -1,25 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from scipy.io.wavfile import write
4
-
5
-
6
- def inference(audio):
7
- os.makedirs("out", exist_ok=True)
8
- write('test.wav', audio[0], audio[1])
9
- os.system("python3 -m demucs.separate -n htdemucs --two-stems=vocals -d cpu test.wav -o out")
10
- return "./out/htdemucs/test/vocals.wav","./out/htdemucs/test/no_vocals.wav"
11
-
12
- title = ""
13
- description = ""
14
- article = ""
15
-
16
- examples=[['test.mp3']]
17
- gr.Interface(
18
- inference,
19
- gr.Audio(type="numpy", label="Input"),
20
- [gr.Audio(type="filepath", label="Vocals"),gr.Audio(type="filepath", label="No Vocals / Instrumental")],
21
- title=title,
22
- description=description,
23
- article=article,
24
- examples=examples
25
- ).launch(enable_queue=True,debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/TapCell.js DELETED
@@ -1,20 +0,0 @@
1
- import Tap from '../../tap/Tap.js';
2
- import EmitCellEvent from './EmitCellEvent.js';
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- var TapCell = function (table, tableConfig) {
7
- var tapConfig = GetValue(tableConfig, 'tap', undefined);
8
- if (tapConfig === false) {
9
- return;
10
- }
11
-
12
- table._tap = new Tap(table, tapConfig);
13
- table._tap
14
- .on('tap', function (tap, gameObject, lastPointer) {
15
- var eventName = `cell.${tap.tapsCount}tap`
16
- EmitCellEvent(this.eventEmitter, eventName, tap.gameObject, tap.worldX, tap.worldY, lastPointer);
17
- }, this)
18
- };
19
-
20
- export default TapCell;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/Menu.d.ts DELETED
@@ -1,49 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Buttons from '../buttons/Buttons';
3
-
4
-
5
- export default Menu;
6
-
7
- declare namespace Menu {
8
-
9
- type EaseConfigTypes = number |
10
- {
11
- duration?: number,
12
- orientation?: 0 | 1 | 'x' | 'y' | 'h' | 'v',
13
- ease?: string
14
- }
15
-
16
- type ExpandEventTypes = 'button.click' | 'button.over';
17
-
18
- type SubMenuSideTypes = 0 | 1 | 2 | 3 | 'right' | 'down' | 'left' | 'up';
19
-
20
- interface IConfig extends Buttons.IConfig {
21
- items: any[],
22
-
23
- createBackgroundCallback?: (items: any[]) => Phaser.GameObjects.GameObject,
24
-
25
- createBackgroundCallbackScope?: object,
26
-
27
- createButtonCallback?: (item: any, index: number, items: any[]) => Phaser.GameObjects.GameObject,
28
-
29
- createButtonCallbackScope?: object,
30
-
31
- easeIn?: EaseConfigTypes,
32
- easeOut?: EaseConfigTypes,
33
-
34
- expandEvent?: ExpandEventTypes,
35
-
36
- subMenuSide?: SubMenuSideTypes,
37
- }
38
- }
39
-
40
- declare class Menu extends Buttons {
41
- constructor(
42
- scene: Phaser.Scene,
43
- config?: Menu.IConfig
44
- );
45
-
46
- collapse(): this;
47
-
48
- collapseSubMenu(): this;
49
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/代码重写为全英文_多线程.py DELETED
@@ -1,138 +0,0 @@
1
- import threading
2
- from request_llm.bridge_all import predict_no_ui_long_connection
3
- from toolbox import update_ui
4
- from toolbox import CatchException, write_results_to_file, report_execption
5
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit
6
-
7
- def extract_code_block_carefully(txt):
8
- splitted = txt.split('```')
9
- n_code_block_seg = len(splitted) - 1
10
- if n_code_block_seg <= 1: return txt
11
- # 剩下的情况都开头除去 ``` 结尾除去一次 ```
12
- txt_out = '```'.join(splitted[1:-1])
13
- return txt_out
14
-
15
-
16
-
17
- def break_txt_into_half_at_some_linebreak(txt):
18
- lines = txt.split('\n')
19
- n_lines = len(lines)
20
- pre = lines[:(n_lines//2)]
21
- post = lines[(n_lines//2):]
22
- return "\n".join(pre), "\n".join(post)
23
-
24
-
25
- @CatchException
26
- def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
27
- # 第1步:清空历史,以免输入溢出
28
- history = []
29
-
30
- # 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
31
- try:
32
- import tiktoken
33
- except:
34
- report_execption(chatbot, history,
35
- a = f"解析项目: {txt}",
36
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
37
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
38
- return
39
-
40
- # 第3步:集合文件
41
- import time, glob, os, shutil, re
42
- os.makedirs('gpt_log/generated_english_version', exist_ok=True)
43
- os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
44
- file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
45
- [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
46
- # file_manifest = ['./toolbox.py']
47
- i_say_show_user_buffer = []
48
-
49
- # 第4步:随便显示点什么防止卡顿的感觉
50
- for index, fp in enumerate(file_manifest):
51
- # if 'test_project' in fp: continue
52
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
53
- file_content = f.read()
54
- i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
55
- i_say_show_user_buffer.append(i_say_show_user)
56
- chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
57
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
58
-
59
-
60
- # 第5步:Token限制下的截断与处理
61
- MAX_TOKEN = 3000
62
- from request_llm.bridge_all import model_info
63
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
64
- def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
65
-
66
-
67
- # 第6步:任务函数
68
- mutable_return = [None for _ in file_manifest]
69
- observe_window = [[""] for _ in file_manifest]
70
- def thread_worker(fp,index):
71
- if index > 10:
72
- time.sleep(60)
73
- print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
74
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
75
- file_content = f.read()
76
- i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
77
- try:
78
- gpt_say = ""
79
- # 分解代码文件
80
- file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
81
- for file_content_partial in file_content_breakdown:
82
- i_say = i_say_template(fp, file_content_partial)
83
- # # ** gpt request **
84
- gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
85
- gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
86
- gpt_say += gpt_say_partial
87
- mutable_return[index] = gpt_say
88
- except ConnectionAbortedError as token_exceed_err:
89
- print('至少一个线程任务Token溢出而失败', e)
90
- except Exception as e:
91
- print('至少一个线程任务意外失败', e)
92
-
93
- # 第7步:所有线程同时开始执行任务函数
94
- handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
95
- for h in handles:
96
- h.daemon = True
97
- h.start()
98
- chatbot.append(('开始了吗?', f'多线程操作已经开始'))
99
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
100
-
101
- # 第8步:循环轮询各个线程是否执行完毕
102
- cnt = 0
103
- while True:
104
- cnt += 1
105
- time.sleep(0.2)
106
- th_alive = [h.is_alive() for h in handles]
107
- if not any(th_alive): break
108
- # 更好��UI视觉效果
109
- observe_win = []
110
- for thread_index, alive in enumerate(th_alive):
111
- observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
112
- stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
113
- stat_str = ''.join(stat)
114
- chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
115
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
116
-
117
- # 第9步:把结果写入文件
118
- for index, h in enumerate(handles):
119
- h.join() # 这里其实不需要join了,肯定已经都结束了
120
- fp = file_manifest[index]
121
- gpt_say = mutable_return[index]
122
- i_say_show_user = i_say_show_user_buffer[index]
123
-
124
- where_to_relocate = f'gpt_log/generated_english_version/{fp}'
125
- if gpt_say is not None:
126
- with open(where_to_relocate, 'w+', encoding='utf-8') as f:
127
- f.write(gpt_say)
128
- else: # 失败
129
- shutil.copyfile(file_manifest[index], where_to_relocate)
130
- chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
131
- history.append(i_say_show_user); history.append(gpt_say)
132
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
133
- time.sleep(1)
134
-
135
- # 第10步:备份一个文件
136
- res = write_results_to_file(history)
137
- chatbot.append(("生成一份任务执行报告", res))
138
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/model_irse.py DELETED
@@ -1,84 +0,0 @@
1
- from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
- from encoder4editing.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
-
4
- """
5
- Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
6
- """
7
-
8
-
9
- class Backbone(Module):
10
- def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
11
- super(Backbone, self).__init__()
12
- assert input_size in [112, 224], "input_size should be 112 or 224"
13
- assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
14
- assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
15
- blocks = get_blocks(num_layers)
16
- if mode == 'ir':
17
- unit_module = bottleneck_IR
18
- elif mode == 'ir_se':
19
- unit_module = bottleneck_IR_SE
20
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
21
- BatchNorm2d(64),
22
- PReLU(64))
23
- if input_size == 112:
24
- self.output_layer = Sequential(BatchNorm2d(512),
25
- Dropout(drop_ratio),
26
- Flatten(),
27
- Linear(512 * 7 * 7, 512),
28
- BatchNorm1d(512, affine=affine))
29
- else:
30
- self.output_layer = Sequential(BatchNorm2d(512),
31
- Dropout(drop_ratio),
32
- Flatten(),
33
- Linear(512 * 14 * 14, 512),
34
- BatchNorm1d(512, affine=affine))
35
-
36
- modules = []
37
- for block in blocks:
38
- for bottleneck in block:
39
- modules.append(unit_module(bottleneck.in_channel,
40
- bottleneck.depth,
41
- bottleneck.stride))
42
- self.body = Sequential(*modules)
43
-
44
- def forward(self, x):
45
- x = self.input_layer(x)
46
- x = self.body(x)
47
- x = self.output_layer(x)
48
- return l2_norm(x)
49
-
50
-
51
- def IR_50(input_size):
52
- """Constructs a ir-50 model."""
53
- model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
54
- return model
55
-
56
-
57
- def IR_101(input_size):
58
- """Constructs a ir-101 model."""
59
- model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
60
- return model
61
-
62
-
63
- def IR_152(input_size):
64
- """Constructs a ir-152 model."""
65
- model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
66
- return model
67
-
68
-
69
- def IR_SE_50(input_size):
70
- """Constructs a ir_se-50 model."""
71
- model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
72
- return model
73
-
74
-
75
- def IR_SE_101(input_size):
76
- """Constructs a ir_se-101 model."""
77
- model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
78
- return model
79
-
80
-
81
- def IR_SE_152(input_size):
82
- """Constructs a ir_se-152 model."""
83
- model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
84
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/hyperparameters.py DELETED
@@ -1,28 +0,0 @@
1
- # Architechture
2
- lpips_type = 'alex'
3
- first_inv_type = 'w+' # 'w+'
4
- optim_type = 'adam'
5
-
6
- # Locality regularization
7
- latent_ball_num_of_samples = 1
8
- locality_regularization_interval = 1
9
- use_locality_regularization = False
10
- regulizer_l2_lambda = 0.1
11
- regulizer_lpips_lambda = 0.1
12
- regulizer_alpha = 30
13
-
14
- # Loss
15
- pt_l2_lambda = 1
16
- pt_lpips_lambda = 1
17
-
18
- # Steps
19
- LPIPS_value_threshold = 0.04
20
- max_pti_steps = 350
21
- first_inv_steps = 450
22
- max_images_to_invert = 30
23
-
24
- # Optimization
25
- pti_learning_rate = 5e-4
26
- first_inv_lr = 8e-3
27
- train_batch_size = 1
28
- use_last_w_pivots = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py DELETED
@@ -1,473 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import warnings
16
- from functools import partial
17
- from typing import Dict, List, Optional, Union
18
-
19
- import jax
20
- import jax.numpy as jnp
21
- import numpy as np
22
- from flax.core.frozen_dict import FrozenDict
23
- from flax.jax_utils import unreplicate
24
- from flax.training.common_utils import shard
25
- from packaging import version
26
- from PIL import Image
27
- from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel
28
-
29
- from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel
30
- from ...schedulers import (
31
- FlaxDDIMScheduler,
32
- FlaxDPMSolverMultistepScheduler,
33
- FlaxLMSDiscreteScheduler,
34
- FlaxPNDMScheduler,
35
- )
36
- from ...utils import deprecate, logging, replace_example_docstring
37
- from ..pipeline_flax_utils import FlaxDiffusionPipeline
38
- from . import FlaxStableDiffusionPipelineOutput
39
- from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
40
-
41
-
42
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
-
44
- # Set to True to use python for loop instead of jax.fori_loop for easier debugging
45
- DEBUG = False
46
-
47
- EXAMPLE_DOC_STRING = """
48
- Examples:
49
- ```py
50
- >>> import jax
51
- >>> import numpy as np
52
- >>> from flax.jax_utils import replicate
53
- >>> from flax.training.common_utils import shard
54
-
55
- >>> from diffusers import FlaxStableDiffusionPipeline
56
-
57
- >>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
58
- ... "runwayml/stable-diffusion-v1-5", revision="bf16", dtype=jax.numpy.bfloat16
59
- ... )
60
-
61
- >>> prompt = "a photo of an astronaut riding a horse on mars"
62
-
63
- >>> prng_seed = jax.random.PRNGKey(0)
64
- >>> num_inference_steps = 50
65
-
66
- >>> num_samples = jax.device_count()
67
- >>> prompt = num_samples * [prompt]
68
- >>> prompt_ids = pipeline.prepare_inputs(prompt)
69
- # shard inputs and rng
70
-
71
- >>> params = replicate(params)
72
- >>> prng_seed = jax.random.split(prng_seed, jax.device_count())
73
- >>> prompt_ids = shard(prompt_ids)
74
-
75
- >>> images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
76
- >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
77
- ```
78
- """
79
-
80
-
81
- class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline):
82
- r"""
83
- Flax-based pipeline for text-to-image generation using Stable Diffusion.
84
-
85
- This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
86
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
87
-
88
- Args:
89
- vae ([`FlaxAutoencoderKL`]):
90
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
91
- text_encoder ([`~transformers.FlaxCLIPTextModel`]):
92
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
93
- tokenizer ([`~transformers.CLIPTokenizer`]):
94
- A `CLIPTokenizer` to tokenize text.
95
- unet ([`FlaxUNet2DConditionModel`]):
96
- A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
97
- scheduler ([`SchedulerMixin`]):
98
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
99
- [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
100
- [`FlaxDPMSolverMultistepScheduler`].
101
- safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
102
- Classification module that estimates whether generated images could be considered offensive or harmful.
103
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
104
- about a model's potential harms.
105
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
106
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
107
- """
108
-
109
- def __init__(
110
- self,
111
- vae: FlaxAutoencoderKL,
112
- text_encoder: FlaxCLIPTextModel,
113
- tokenizer: CLIPTokenizer,
114
- unet: FlaxUNet2DConditionModel,
115
- scheduler: Union[
116
- FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
117
- ],
118
- safety_checker: FlaxStableDiffusionSafetyChecker,
119
- feature_extractor: CLIPImageProcessor,
120
- dtype: jnp.dtype = jnp.float32,
121
- ):
122
- super().__init__()
123
- self.dtype = dtype
124
-
125
- if safety_checker is None:
126
- logger.warning(
127
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
128
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
129
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
130
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
131
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
132
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
133
- )
134
-
135
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
136
- version.parse(unet.config._diffusers_version).base_version
137
- ) < version.parse("0.9.0.dev0")
138
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
139
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
140
- deprecation_message = (
141
- "The configuration file of the unet has set the default `sample_size` to smaller than"
142
- " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
143
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
144
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
145
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
146
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
147
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
148
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
149
- " the `unet/config.json` file"
150
- )
151
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
152
- new_config = dict(unet.config)
153
- new_config["sample_size"] = 64
154
- unet._internal_dict = FrozenDict(new_config)
155
-
156
- self.register_modules(
157
- vae=vae,
158
- text_encoder=text_encoder,
159
- tokenizer=tokenizer,
160
- unet=unet,
161
- scheduler=scheduler,
162
- safety_checker=safety_checker,
163
- feature_extractor=feature_extractor,
164
- )
165
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
166
-
167
- def prepare_inputs(self, prompt: Union[str, List[str]]):
168
- if not isinstance(prompt, (str, list)):
169
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
170
-
171
- text_input = self.tokenizer(
172
- prompt,
173
- padding="max_length",
174
- max_length=self.tokenizer.model_max_length,
175
- truncation=True,
176
- return_tensors="np",
177
- )
178
- return text_input.input_ids
179
-
180
- def _get_has_nsfw_concepts(self, features, params):
181
- has_nsfw_concepts = self.safety_checker(features, params)
182
- return has_nsfw_concepts
183
-
184
- def _run_safety_checker(self, images, safety_model_params, jit=False):
185
- # safety_model_params should already be replicated when jit is True
186
- pil_images = [Image.fromarray(image) for image in images]
187
- features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
188
-
189
- if jit:
190
- features = shard(features)
191
- has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
192
- has_nsfw_concepts = unshard(has_nsfw_concepts)
193
- safety_model_params = unreplicate(safety_model_params)
194
- else:
195
- has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
196
-
197
- images_was_copied = False
198
- for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
199
- if has_nsfw_concept:
200
- if not images_was_copied:
201
- images_was_copied = True
202
- images = images.copy()
203
-
204
- images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
205
-
206
- if any(has_nsfw_concepts):
207
- warnings.warn(
208
- "Potential NSFW content was detected in one or more images. A black image will be returned"
209
- " instead. Try again with a different prompt and/or seed."
210
- )
211
-
212
- return images, has_nsfw_concepts
213
-
214
- def _generate(
215
- self,
216
- prompt_ids: jnp.array,
217
- params: Union[Dict, FrozenDict],
218
- prng_seed: jax.random.KeyArray,
219
- num_inference_steps: int,
220
- height: int,
221
- width: int,
222
- guidance_scale: float,
223
- latents: Optional[jnp.array] = None,
224
- neg_prompt_ids: Optional[jnp.array] = None,
225
- ):
226
- if height % 8 != 0 or width % 8 != 0:
227
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
228
-
229
- # get prompt text embeddings
230
- prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
231
-
232
- # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
233
- # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
234
- batch_size = prompt_ids.shape[0]
235
-
236
- max_length = prompt_ids.shape[-1]
237
-
238
- if neg_prompt_ids is None:
239
- uncond_input = self.tokenizer(
240
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
241
- ).input_ids
242
- else:
243
- uncond_input = neg_prompt_ids
244
- negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
245
- context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
246
-
247
- # Ensure model output will be `float32` before going into the scheduler
248
- guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32)
249
-
250
- latents_shape = (
251
- batch_size,
252
- self.unet.config.in_channels,
253
- height // self.vae_scale_factor,
254
- width // self.vae_scale_factor,
255
- )
256
- if latents is None:
257
- latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
258
- else:
259
- if latents.shape != latents_shape:
260
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
261
-
262
- def loop_body(step, args):
263
- latents, scheduler_state = args
264
- # For classifier free guidance, we need to do two forward passes.
265
- # Here we concatenate the unconditional and text embeddings into a single batch
266
- # to avoid doing two forward passes
267
- latents_input = jnp.concatenate([latents] * 2)
268
-
269
- t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
270
- timestep = jnp.broadcast_to(t, latents_input.shape[0])
271
-
272
- latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
273
-
274
- # predict the noise residual
275
- noise_pred = self.unet.apply(
276
- {"params": params["unet"]},
277
- jnp.array(latents_input),
278
- jnp.array(timestep, dtype=jnp.int32),
279
- encoder_hidden_states=context,
280
- ).sample
281
- # perform guidance
282
- noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
283
- noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
284
-
285
- # compute the previous noisy sample x_t -> x_t-1
286
- latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
287
- return latents, scheduler_state
288
-
289
- scheduler_state = self.scheduler.set_timesteps(
290
- params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape
291
- )
292
-
293
- # scale the initial noise by the standard deviation required by the scheduler
294
- latents = latents * params["scheduler"].init_noise_sigma
295
-
296
- if DEBUG:
297
- # run with python for loop
298
- for i in range(num_inference_steps):
299
- latents, scheduler_state = loop_body(i, (latents, scheduler_state))
300
- else:
301
- latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state))
302
-
303
- # scale and decode the image latents with vae
304
- latents = 1 / self.vae.config.scaling_factor * latents
305
- image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
306
-
307
- image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
308
- return image
309
-
310
- @replace_example_docstring(EXAMPLE_DOC_STRING)
311
- def __call__(
312
- self,
313
- prompt_ids: jnp.array,
314
- params: Union[Dict, FrozenDict],
315
- prng_seed: jax.random.KeyArray,
316
- num_inference_steps: int = 50,
317
- height: Optional[int] = None,
318
- width: Optional[int] = None,
319
- guidance_scale: Union[float, jnp.array] = 7.5,
320
- latents: jnp.array = None,
321
- neg_prompt_ids: jnp.array = None,
322
- return_dict: bool = True,
323
- jit: bool = False,
324
- ):
325
- r"""
326
- The call function to the pipeline for generation.
327
-
328
- Args:
329
- prompt (`str` or `List[str]`, *optional*):
330
- The prompt or prompts to guide image generation.
331
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
332
- The height in pixels of the generated image.
333
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
334
- The width in pixels of the generated image.
335
- num_inference_steps (`int`, *optional*, defaults to 50):
336
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
337
- expense of slower inference.
338
- guidance_scale (`float`, *optional*, defaults to 7.5):
339
- A higher guidance scale value encourages the model to generate images closely linked to the text
340
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
341
- latents (`jnp.array`, *optional*):
342
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
343
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
344
- array is generated by sampling using the supplied random `generator`.
345
- jit (`bool`, defaults to `False`):
346
- Whether to run `pmap` versions of the generation and safety scoring functions.
347
-
348
- <Tip warning={true}>
349
-
350
- This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
351
- future release.
352
-
353
- </Tip>
354
-
355
- return_dict (`bool`, *optional*, defaults to `True`):
356
- Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
357
- a plain tuple.
358
-
359
- Examples:
360
-
361
- Returns:
362
- [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
363
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
364
- returned, otherwise a `tuple` is returned where the first element is a list with the generated images
365
- and the second element is a list of `bool`s indicating whether the corresponding generated image
366
- contains "not-safe-for-work" (nsfw) content.
367
- """
368
- # 0. Default height and width to unet
369
- height = height or self.unet.config.sample_size * self.vae_scale_factor
370
- width = width or self.unet.config.sample_size * self.vae_scale_factor
371
-
372
- if isinstance(guidance_scale, float):
373
- # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
374
- # shape information, as they may be sharded (when `jit` is `True`), or not.
375
- guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
376
- if len(prompt_ids.shape) > 2:
377
- # Assume sharded
378
- guidance_scale = guidance_scale[:, None]
379
-
380
- if jit:
381
- images = _p_generate(
382
- self,
383
- prompt_ids,
384
- params,
385
- prng_seed,
386
- num_inference_steps,
387
- height,
388
- width,
389
- guidance_scale,
390
- latents,
391
- neg_prompt_ids,
392
- )
393
- else:
394
- images = self._generate(
395
- prompt_ids,
396
- params,
397
- prng_seed,
398
- num_inference_steps,
399
- height,
400
- width,
401
- guidance_scale,
402
- latents,
403
- neg_prompt_ids,
404
- )
405
-
406
- if self.safety_checker is not None:
407
- safety_params = params["safety_checker"]
408
- images_uint8_casted = (images * 255).round().astype("uint8")
409
- num_devices, batch_size = images.shape[:2]
410
-
411
- images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
412
- images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
413
- images = np.asarray(images)
414
-
415
- # block images
416
- if any(has_nsfw_concept):
417
- for i, is_nsfw in enumerate(has_nsfw_concept):
418
- if is_nsfw:
419
- images[i] = np.asarray(images_uint8_casted[i])
420
-
421
- images = images.reshape(num_devices, batch_size, height, width, 3)
422
- else:
423
- images = np.asarray(images)
424
- has_nsfw_concept = False
425
-
426
- if not return_dict:
427
- return (images, has_nsfw_concept)
428
-
429
- return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
430
-
431
-
432
- # Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation.
433
- # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
434
- @partial(
435
- jax.pmap,
436
- in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0),
437
- static_broadcasted_argnums=(0, 4, 5, 6),
438
- )
439
- def _p_generate(
440
- pipe,
441
- prompt_ids,
442
- params,
443
- prng_seed,
444
- num_inference_steps,
445
- height,
446
- width,
447
- guidance_scale,
448
- latents,
449
- neg_prompt_ids,
450
- ):
451
- return pipe._generate(
452
- prompt_ids,
453
- params,
454
- prng_seed,
455
- num_inference_steps,
456
- height,
457
- width,
458
- guidance_scale,
459
- latents,
460
- neg_prompt_ids,
461
- )
462
-
463
-
464
- @partial(jax.pmap, static_broadcasted_argnums=(0,))
465
- def _p_get_has_nsfw_concepts(pipe, features, params):
466
- return pipe._get_has_nsfw_concepts(features, params)
467
-
468
-
469
- def unshard(x: jnp.ndarray):
470
- # einops.rearrange(x, 'd b ... -> (d b) ...')
471
- num_devices, batch_size = x.shape[:2]
472
- rest = x.shape[2:]
473
- return x.reshape(num_devices * batch_size, *rest)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/__init__.py DELETED
@@ -1,20 +0,0 @@
1
- from ...utils import (
2
- OptionalDependencyNotAvailable,
3
- is_torch_available,
4
- is_transformers_available,
5
- is_transformers_version,
6
- )
7
-
8
-
9
- try:
10
- if not (is_transformers_available() and is_torch_available()):
11
- raise OptionalDependencyNotAvailable()
12
- except OptionalDependencyNotAvailable:
13
- from ...utils.dummy_torch_and_transformers_objects import (
14
- ImageTextPipelineOutput,
15
- UniDiffuserPipeline,
16
- )
17
- else:
18
- from .modeling_text_decoder import UniDiffuserTextDecoder
19
- from .modeling_uvit import UniDiffuserModel, UTransformer2DModel
20
- from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/atss/README.md DELETED
@@ -1,21 +0,0 @@
1
- # Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- ```latex
8
- @article{zhang2019bridging,
9
- title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection},
10
- author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.},
11
- journal = {arXiv preprint arXiv:1912.02424},
12
- year = {2019}
13
- }
14
- ```
15
-
16
- ## Results and Models
17
-
18
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
19
- |:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
20
- | R-50 | pytorch | 1x | 3.7 | 19.7 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209_102539.log.json) |
21
- | R-101 | pytorch | 1x | 5.6 | 12.3 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/post_processing/__init__.py DELETED
@@ -1,8 +0,0 @@
1
- from .bbox_nms import fast_nms, multiclass_nms
2
- from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
3
- merge_aug_proposals, merge_aug_scores)
4
-
5
- __all__ = [
6
- 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
7
- 'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
8
- ]
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/dynamic_roi_head.py DELETED
@@ -1,154 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
- from mmdet.core import bbox2roi
5
- from mmdet.models.losses import SmoothL1Loss
6
- from ..builder import HEADS
7
- from .standard_roi_head import StandardRoIHead
8
-
9
- EPS = 1e-15
10
-
11
-
12
- @HEADS.register_module()
13
- class DynamicRoIHead(StandardRoIHead):
14
- """RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_."""
15
-
16
- def __init__(self, **kwargs):
17
- super(DynamicRoIHead, self).__init__(**kwargs)
18
- assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
19
- # the IoU history of the past `update_iter_interval` iterations
20
- self.iou_history = []
21
- # the beta history of the past `update_iter_interval` iterations
22
- self.beta_history = []
23
-
24
- def forward_train(self,
25
- x,
26
- img_metas,
27
- proposal_list,
28
- gt_bboxes,
29
- gt_labels,
30
- gt_bboxes_ignore=None,
31
- gt_masks=None):
32
- """Forward function for training.
33
-
34
- Args:
35
- x (list[Tensor]): list of multi-level img features.
36
-
37
- img_metas (list[dict]): list of image info dict where each dict
38
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
39
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
40
- For details on the values of these keys see
41
- `mmdet/datasets/pipelines/formatting.py:Collect`.
42
-
43
- proposals (list[Tensors]): list of region proposals.
44
-
45
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
46
- image in [tl_x, tl_y, br_x, br_y] format.
47
-
48
- gt_labels (list[Tensor]): class indices corresponding to each box
49
-
50
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
51
- boxes can be ignored when computing the loss.
52
-
53
- gt_masks (None | Tensor) : true segmentation masks for each box
54
- used if the architecture supports a segmentation task.
55
-
56
- Returns:
57
- dict[str, Tensor]: a dictionary of loss components
58
- """
59
- # assign gts and sample proposals
60
- if self.with_bbox or self.with_mask:
61
- num_imgs = len(img_metas)
62
- if gt_bboxes_ignore is None:
63
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
64
- sampling_results = []
65
- cur_iou = []
66
- for i in range(num_imgs):
67
- assign_result = self.bbox_assigner.assign(
68
- proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
69
- gt_labels[i])
70
- sampling_result = self.bbox_sampler.sample(
71
- assign_result,
72
- proposal_list[i],
73
- gt_bboxes[i],
74
- gt_labels[i],
75
- feats=[lvl_feat[i][None] for lvl_feat in x])
76
- # record the `iou_topk`-th largest IoU in an image
77
- iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
78
- len(assign_result.max_overlaps))
79
- ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
80
- cur_iou.append(ious[-1].item())
81
- sampling_results.append(sampling_result)
82
- # average the current IoUs over images
83
- cur_iou = np.mean(cur_iou)
84
- self.iou_history.append(cur_iou)
85
-
86
- losses = dict()
87
- # bbox head forward and loss
88
- if self.with_bbox:
89
- bbox_results = self._bbox_forward_train(x, sampling_results,
90
- gt_bboxes, gt_labels,
91
- img_metas)
92
- losses.update(bbox_results['loss_bbox'])
93
-
94
- # mask head forward and loss
95
- if self.with_mask:
96
- mask_results = self._mask_forward_train(x, sampling_results,
97
- bbox_results['bbox_feats'],
98
- gt_masks, img_metas)
99
- losses.update(mask_results['loss_mask'])
100
-
101
- # update IoU threshold and SmoothL1 beta
102
- update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
103
- if len(self.iou_history) % update_iter_interval == 0:
104
- new_iou_thr, new_beta = self.update_hyperparameters()
105
-
106
- return losses
107
-
108
- def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
109
- img_metas):
110
- num_imgs = len(img_metas)
111
- rois = bbox2roi([res.bboxes for res in sampling_results])
112
- bbox_results = self._bbox_forward(x, rois)
113
-
114
- bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
115
- gt_labels, self.train_cfg)
116
- # record the `beta_topk`-th smallest target
117
- # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
118
- # and bbox_weights, respectively
119
- pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
120
- num_pos = len(pos_inds)
121
- cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
122
- beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
123
- num_pos)
124
- cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
125
- self.beta_history.append(cur_target)
126
- loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
127
- bbox_results['bbox_pred'], rois,
128
- *bbox_targets)
129
-
130
- bbox_results.update(loss_bbox=loss_bbox)
131
- return bbox_results
132
-
133
- def update_hyperparameters(self):
134
- """Update hyperparameters like IoU thresholds for assigner and beta for
135
- SmoothL1 loss based on the training statistics.
136
-
137
- Returns:
138
- tuple[float]: the updated ``iou_thr`` and ``beta``.
139
- """
140
- new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
141
- np.mean(self.iou_history))
142
- self.iou_history = []
143
- self.bbox_assigner.pos_iou_thr = new_iou_thr
144
- self.bbox_assigner.neg_iou_thr = new_iou_thr
145
- self.bbox_assigner.min_pos_iou = new_iou_thr
146
- if (np.median(self.beta_history) < EPS):
147
- # avoid 0 or too small value for new_beta
148
- new_beta = self.bbox_head.loss_bbox.beta
149
- else:
150
- new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
151
- np.median(self.beta_history))
152
- self.beta_history = []
153
- self.bbox_head.loss_bbox.beta = new_beta
154
- return new_iou_thr, new_beta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/grid_roi_head.py DELETED
@@ -1,176 +0,0 @@
1
- import torch
2
-
3
- from mmdet.core import bbox2result, bbox2roi
4
- from ..builder import HEADS, build_head, build_roi_extractor
5
- from .standard_roi_head import StandardRoIHead
6
-
7
-
8
- @HEADS.register_module()
9
- class GridRoIHead(StandardRoIHead):
10
- """Grid roi head for Grid R-CNN.
11
-
12
- https://arxiv.org/abs/1811.12030
13
- """
14
-
15
- def __init__(self, grid_roi_extractor, grid_head, **kwargs):
16
- assert grid_head is not None
17
- super(GridRoIHead, self).__init__(**kwargs)
18
- if grid_roi_extractor is not None:
19
- self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor)
20
- self.share_roi_extractor = False
21
- else:
22
- self.share_roi_extractor = True
23
- self.grid_roi_extractor = self.bbox_roi_extractor
24
- self.grid_head = build_head(grid_head)
25
-
26
- def init_weights(self, pretrained):
27
- """Initialize the weights in head.
28
-
29
- Args:
30
- pretrained (str, optional): Path to pre-trained weights.
31
- Defaults to None.
32
- """
33
- super(GridRoIHead, self).init_weights(pretrained)
34
- self.grid_head.init_weights()
35
- if not self.share_roi_extractor:
36
- self.grid_roi_extractor.init_weights()
37
-
38
- def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
39
- """Ramdom jitter positive proposals for training."""
40
- for sampling_result, img_meta in zip(sampling_results, img_metas):
41
- bboxes = sampling_result.pos_bboxes
42
- random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
43
- -amplitude, amplitude)
44
- # before jittering
45
- cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
46
- wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
47
- # after jittering
48
- new_cxcy = cxcy + wh * random_offsets[:, :2]
49
- new_wh = wh * (1 + random_offsets[:, 2:])
50
- # xywh to xyxy
51
- new_x1y1 = (new_cxcy - new_wh / 2)
52
- new_x2y2 = (new_cxcy + new_wh / 2)
53
- new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
54
- # clip bboxes
55
- max_shape = img_meta['img_shape']
56
- if max_shape is not None:
57
- new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
58
- new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
59
-
60
- sampling_result.pos_bboxes = new_bboxes
61
- return sampling_results
62
-
63
- def forward_dummy(self, x, proposals):
64
- """Dummy forward function."""
65
- # bbox head
66
- outs = ()
67
- rois = bbox2roi([proposals])
68
- if self.with_bbox:
69
- bbox_results = self._bbox_forward(x, rois)
70
- outs = outs + (bbox_results['cls_score'],
71
- bbox_results['bbox_pred'])
72
-
73
- # grid head
74
- grid_rois = rois[:100]
75
- grid_feats = self.grid_roi_extractor(
76
- x[:self.grid_roi_extractor.num_inputs], grid_rois)
77
- if self.with_shared_head:
78
- grid_feats = self.shared_head(grid_feats)
79
- grid_pred = self.grid_head(grid_feats)
80
- outs = outs + (grid_pred, )
81
-
82
- # mask head
83
- if self.with_mask:
84
- mask_rois = rois[:100]
85
- mask_results = self._mask_forward(x, mask_rois)
86
- outs = outs + (mask_results['mask_pred'], )
87
- return outs
88
-
89
- def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
90
- img_metas):
91
- """Run forward function and calculate loss for box head in training."""
92
- bbox_results = super(GridRoIHead,
93
- self)._bbox_forward_train(x, sampling_results,
94
- gt_bboxes, gt_labels,
95
- img_metas)
96
-
97
- # Grid head forward and loss
98
- sampling_results = self._random_jitter(sampling_results, img_metas)
99
- pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
100
-
101
- # GN in head does not support zero shape input
102
- if pos_rois.shape[0] == 0:
103
- return bbox_results
104
-
105
- grid_feats = self.grid_roi_extractor(
106
- x[:self.grid_roi_extractor.num_inputs], pos_rois)
107
- if self.with_shared_head:
108
- grid_feats = self.shared_head(grid_feats)
109
- # Accelerate training
110
- max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
111
- sample_idx = torch.randperm(
112
- grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
113
- )]
114
- grid_feats = grid_feats[sample_idx]
115
-
116
- grid_pred = self.grid_head(grid_feats)
117
-
118
- grid_targets = self.grid_head.get_targets(sampling_results,
119
- self.train_cfg)
120
- grid_targets = grid_targets[sample_idx]
121
-
122
- loss_grid = self.grid_head.loss(grid_pred, grid_targets)
123
-
124
- bbox_results['loss_bbox'].update(loss_grid)
125
- return bbox_results
126
-
127
- def simple_test(self,
128
- x,
129
- proposal_list,
130
- img_metas,
131
- proposals=None,
132
- rescale=False):
133
- """Test without augmentation."""
134
- assert self.with_bbox, 'Bbox head must be implemented.'
135
-
136
- det_bboxes, det_labels = self.simple_test_bboxes(
137
- x, img_metas, proposal_list, self.test_cfg, rescale=False)
138
- # pack rois into bboxes
139
- grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes])
140
- if grid_rois.shape[0] != 0:
141
- grid_feats = self.grid_roi_extractor(
142
- x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
143
- self.grid_head.test_mode = True
144
- grid_pred = self.grid_head(grid_feats)
145
- # split batch grid head prediction back to each image
146
- num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes)
147
- grid_pred = {
148
- k: v.split(num_roi_per_img, 0)
149
- for k, v in grid_pred.items()
150
- }
151
-
152
- # apply bbox post-processing to each image individually
153
- bbox_results = []
154
- num_imgs = len(det_bboxes)
155
- for i in range(num_imgs):
156
- if det_bboxes[i].shape[0] == 0:
157
- bbox_results.append(grid_rois.new_tensor([]))
158
- else:
159
- det_bbox = self.grid_head.get_bboxes(
160
- det_bboxes[i], grid_pred['fused'][i], [img_metas[i]])
161
- if rescale:
162
- det_bbox[:, :4] /= img_metas[i]['scale_factor']
163
- bbox_results.append(
164
- bbox2result(det_bbox, det_labels[i],
165
- self.bbox_head.num_classes))
166
- else:
167
- bbox_results = [
168
- grid_rois.new_tensor([]) for _ in range(len(det_bboxes))
169
- ]
170
-
171
- if not self.with_mask:
172
- return bbox_results
173
- else:
174
- segm_results = self.simple_test_mask(
175
- x, img_metas, det_bboxes, det_labels, rescale=rescale)
176
- return list(zip(bbox_results, segm_results))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './apcnet_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/chromadb.py DELETED
@@ -1,376 +0,0 @@
1
- import threading
2
- import chromadb
3
- import posthog
4
- import torch
5
- import math
6
-
7
- import numpy as np
8
- import extensions.superboogav2.parameters as parameters
9
-
10
- from chromadb.config import Settings
11
- from sentence_transformers import SentenceTransformer
12
-
13
- from modules.logging_colors import logger
14
- from modules.text_generation import encode, decode
15
-
16
- logger.debug('Intercepting all calls to posthog.')
17
- posthog.capture = lambda *args, **kwargs: None
18
-
19
-
20
- class Collecter():
21
- def __init__(self):
22
- pass
23
-
24
- def add(self, texts: list[str], texts_with_context: list[str], starting_indices: list[int]):
25
- pass
26
-
27
- def get(self, search_strings: list[str], n_results: int) -> list[str]:
28
- pass
29
-
30
- def clear(self):
31
- pass
32
-
33
-
34
- class Embedder():
35
- def __init__(self):
36
- pass
37
-
38
- def embed(self, text: str) -> list[torch.Tensor]:
39
- pass
40
-
41
- class Info:
42
- def __init__(self, start_index, text_with_context, distance, id):
43
- self.text_with_context = text_with_context
44
- self.start_index = start_index
45
- self.distance = distance
46
- self.id = id
47
-
48
- def calculate_distance(self, other_info):
49
- if parameters.get_new_dist_strategy() == parameters.DIST_MIN_STRATEGY:
50
- # Min
51
- return min(self.distance, other_info.distance)
52
- elif parameters.get_new_dist_strategy() == parameters.DIST_HARMONIC_STRATEGY:
53
- # Harmonic mean
54
- return 2 * (self.distance * other_info.distance) / (self.distance + other_info.distance)
55
- elif parameters.get_new_dist_strategy() == parameters.DIST_GEOMETRIC_STRATEGY:
56
- # Geometric mean
57
- return (self.distance * other_info.distance) ** 0.5
58
- elif parameters.get_new_dist_strategy() == parameters.DIST_ARITHMETIC_STRATEGY:
59
- # Arithmetic mean
60
- return (self.distance + other_info.distance) / 2
61
- else: # Min is default
62
- return min(self.distance, other_info.distance)
63
-
64
- def merge_with(self, other_info):
65
- s1 = self.text_with_context
66
- s2 = other_info.text_with_context
67
- s1_start = self.start_index
68
- s2_start = other_info.start_index
69
-
70
- new_dist = self.calculate_distance(other_info)
71
-
72
- if self.should_merge(s1, s2, s1_start, s2_start):
73
- if s1_start <= s2_start:
74
- if s1_start + len(s1) >= s2_start + len(s2): # if s1 completely covers s2
75
- return Info(s1_start, s1, new_dist, self.id)
76
- else:
77
- overlap = max(0, s1_start + len(s1) - s2_start)
78
- return Info(s1_start, s1 + s2[overlap:], new_dist, self.id)
79
- else:
80
- if s2_start + len(s2) >= s1_start + len(s1): # if s2 completely covers s1
81
- return Info(s2_start, s2, new_dist, other_info.id)
82
- else:
83
- overlap = max(0, s2_start + len(s2) - s1_start)
84
- return Info(s2_start, s2 + s1[overlap:], new_dist, other_info.id)
85
-
86
- return None
87
-
88
- @staticmethod
89
- def should_merge(s1, s2, s1_start, s2_start):
90
- # Check if s1 and s2 are adjacent or overlapping
91
- s1_end = s1_start + len(s1)
92
- s2_end = s2_start + len(s2)
93
-
94
- return not (s1_end < s2_start or s2_end < s1_start)
95
-
96
- class ChromaCollector(Collecter):
97
- def __init__(self, embedder: Embedder):
98
- super().__init__()
99
- self.chroma_client = chromadb.Client(Settings(anonymized_telemetry=False))
100
- self.embedder = embedder
101
- self.collection = self.chroma_client.create_collection(name="context", embedding_function=self.embedder.embed)
102
- self.ids = []
103
- self.id_to_info = {}
104
- self.embeddings_cache = {}
105
- self.lock = threading.Lock() # Locking so the server doesn't break.
106
-
107
- def add(self, texts: list[str], texts_with_context: list[str], starting_indices: list[int], metadatas: list[dict] = None):
108
- with self.lock:
109
- assert metadatas is None or len(metadatas) == len(texts), "metadatas must be None or have the same length as texts"
110
-
111
- if len(texts) == 0:
112
- return
113
-
114
- new_ids = self._get_new_ids(len(texts))
115
-
116
- (existing_texts, existing_embeddings, existing_ids, existing_metas), \
117
- (non_existing_texts, non_existing_ids, non_existing_metas) = self._split_texts_by_cache_hit(texts, new_ids, metadatas)
118
-
119
- # If there are any already existing texts, add them all at once.
120
- if existing_texts:
121
- logger.info(f'Adding {len(existing_embeddings)} cached embeddings.')
122
- args = {'embeddings': existing_embeddings, 'documents': existing_texts, 'ids': existing_ids}
123
- if metadatas is not None:
124
- args['metadatas'] = existing_metas
125
- self.collection.add(**args)
126
-
127
- # If there are any non-existing texts, compute their embeddings all at once. Each call to embed has significant overhead.
128
- if non_existing_texts:
129
- non_existing_embeddings = self.embedder.embed(non_existing_texts).tolist()
130
- for text, embedding in zip(non_existing_texts, non_existing_embeddings):
131
- self.embeddings_cache[text] = embedding
132
-
133
- logger.info(f'Adding {len(non_existing_embeddings)} new embeddings.')
134
- args = {'embeddings': non_existing_embeddings, 'documents': non_existing_texts, 'ids': non_existing_ids}
135
- if metadatas is not None:
136
- args['metadatas'] = non_existing_metas
137
- self.collection.add(**args)
138
-
139
- # Create a dictionary that maps each ID to its context and starting index
140
- new_info = {
141
- id_: {'text_with_context': context, 'start_index': start_index}
142
- for id_, context, start_index in zip(new_ids, texts_with_context, starting_indices)
143
- }
144
-
145
- self.id_to_info.update(new_info)
146
- self.ids.extend(new_ids)
147
-
148
-
149
- def _split_texts_by_cache_hit(self, texts: list[str], new_ids: list[str], metadatas: list[dict]):
150
- existing_texts, non_existing_texts = [], []
151
- existing_embeddings = []
152
- existing_ids, non_existing_ids = [], []
153
- existing_metas, non_existing_metas = [], []
154
-
155
- for i, text in enumerate(texts):
156
- id_ = new_ids[i]
157
- metadata = metadatas[i] if metadatas is not None else None
158
- embedding = self.embeddings_cache.get(text)
159
- if embedding:
160
- existing_texts.append(text)
161
- existing_embeddings.append(embedding)
162
- existing_ids.append(id_)
163
- existing_metas.append(metadata)
164
- else:
165
- non_existing_texts.append(text)
166
- non_existing_ids.append(id_)
167
- non_existing_metas.append(metadata)
168
-
169
- return (existing_texts, existing_embeddings, existing_ids, existing_metas), \
170
- (non_existing_texts, non_existing_ids, non_existing_metas)
171
-
172
-
173
- def _get_new_ids(self, num_new_ids: int):
174
- if self.ids:
175
- max_existing_id = max(int(id_) for id_ in self.ids)
176
- else:
177
- max_existing_id = -1
178
-
179
- return [str(i + max_existing_id + 1) for i in range(num_new_ids)]
180
-
181
-
182
- def _find_min_max_start_index(self):
183
- max_index, min_index = 0, float('inf')
184
- for _, val in self.id_to_info.items():
185
- if val['start_index'] > max_index:
186
- max_index = val['start_index']
187
- if val['start_index'] < min_index:
188
- min_index = val['start_index']
189
- return min_index, max_index
190
-
191
-
192
- # NB: Does not make sense to weigh excerpts from different documents.
193
- # But let's say that's the user's problem. Perfect world scenario:
194
- # Apply time weighing to different documents. For each document, then, add
195
- # separate time weighing.
196
- def _apply_sigmoid_time_weighing(self, infos: list[Info], document_len: int, time_steepness: float, time_power: float):
197
- sigmoid = lambda x: 1 / (1 + np.exp(-x))
198
-
199
- weights = sigmoid(time_steepness * np.linspace(-10, 10, document_len))
200
-
201
- # Scale to [0,time_power] and shift it up to [1-time_power, 1]
202
- weights = weights - min(weights)
203
- weights = weights * (time_power / max(weights))
204
- weights = weights + (1 - time_power)
205
-
206
- # Reverse the weights
207
- weights = weights[::-1]
208
-
209
- for info in infos:
210
- index = info.start_index
211
- info.distance *= weights[index]
212
-
213
-
214
- def _filter_outliers_by_median_distance(self, infos: list[Info], significant_level: float):
215
- # Ensure there are infos to filter
216
- if not infos:
217
- return []
218
-
219
- # Find info with minimum distance
220
- min_info = min(infos, key=lambda x: x.distance)
221
-
222
- # Calculate median distance among infos
223
- median_distance = np.median([inf.distance for inf in infos])
224
-
225
- # Filter out infos that have a distance significantly greater than the median
226
- filtered_infos = [inf for inf in infos if inf.distance <= significant_level * median_distance]
227
-
228
- # Always include the info with minimum distance
229
- if min_info not in filtered_infos:
230
- filtered_infos.append(min_info)
231
-
232
- return filtered_infos
233
-
234
-
235
- def _merge_infos(self, infos: list[Info]):
236
- merged_infos = []
237
- current_info = infos[0]
238
-
239
- for next_info in infos[1:]:
240
- merged = current_info.merge_with(next_info)
241
- if merged is not None:
242
- current_info = merged
243
- else:
244
- merged_infos.append(current_info)
245
- current_info = next_info
246
-
247
- merged_infos.append(current_info)
248
- return merged_infos
249
-
250
-
251
- # Main function for retrieving chunks by distance. It performs merging, time weighing, and mean filtering.
252
- def _get_documents_ids_distances(self, search_strings: list[str], n_results: int):
253
- n_results = min(len(self.ids), n_results)
254
- if n_results == 0:
255
- return [], [], []
256
-
257
- if isinstance(search_strings, str):
258
- search_strings = [search_strings]
259
-
260
- infos = []
261
- min_start_index, max_start_index = self._find_min_max_start_index()
262
-
263
- for search_string in search_strings:
264
- result = self.collection.query(query_texts=search_string, n_results=math.ceil(n_results / len(search_strings)), include=['distances'])
265
- curr_infos = [Info(start_index=self.id_to_info[id]['start_index'],
266
- text_with_context=self.id_to_info[id]['text_with_context'],
267
- distance=distance, id=id)
268
- for id, distance in zip(result['ids'][0], result['distances'][0])]
269
-
270
- self._apply_sigmoid_time_weighing(infos=curr_infos, document_len=max_start_index - min_start_index + 1, time_steepness=parameters.get_time_steepness(), time_power=parameters.get_time_power())
271
- curr_infos = self._filter_outliers_by_median_distance(curr_infos, parameters.get_significant_level())
272
- infos.extend(curr_infos)
273
-
274
- infos.sort(key=lambda x: x.start_index)
275
- infos = self._merge_infos(infos)
276
-
277
- texts_with_context = [inf.text_with_context for inf in infos]
278
- ids = [inf.id for inf in infos]
279
- distances = [inf.distance for inf in infos]
280
-
281
- return texts_with_context, ids, distances
282
-
283
-
284
- # Get chunks by similarity
285
- def get(self, search_strings: list[str], n_results: int) -> list[str]:
286
- with self.lock:
287
- documents, _, _ = self._get_documents_ids_distances(search_strings, n_results)
288
- return documents
289
-
290
-
291
- # Get ids by similarity
292
- def get_ids(self, search_strings: list[str], n_results: int) -> list[str]:
293
- with self.lock:
294
- _, ids, _ = self._get_documents_ids_distances(search_strings, n_results)
295
- return ids
296
-
297
-
298
- # Cutoff token count
299
- def _get_documents_up_to_token_count(self, documents: list[str], max_token_count: int):
300
- # TODO: Move to caller; We add delimiters there which might go over the limit.
301
- current_token_count = 0
302
- return_documents = []
303
-
304
- for doc in documents:
305
- doc_tokens = encode(doc)[0]
306
- doc_token_count = len(doc_tokens)
307
- if current_token_count + doc_token_count > max_token_count:
308
- # If adding this document would exceed the max token count,
309
- # truncate the document to fit within the limit.
310
- remaining_tokens = max_token_count - current_token_count
311
-
312
- truncated_doc = decode(doc_tokens[:remaining_tokens], skip_special_tokens=True)
313
- return_documents.append(truncated_doc)
314
- break
315
- else:
316
- return_documents.append(doc)
317
- current_token_count += doc_token_count
318
-
319
- return return_documents
320
-
321
-
322
- # Get chunks by similarity and then sort by ids
323
- def get_sorted_by_ids(self, search_strings: list[str], n_results: int, max_token_count: int) -> list[str]:
324
- with self.lock:
325
- documents, ids, _ = self._get_documents_ids_distances(search_strings, n_results)
326
- sorted_docs = [x for _, x in sorted(zip(ids, documents))]
327
-
328
- return self._get_documents_up_to_token_count(sorted_docs, max_token_count)
329
-
330
-
331
- # Get chunks by similarity and then sort by distance (lowest distance is last).
332
- def get_sorted_by_dist(self, search_strings: list[str], n_results: int, max_token_count: int) -> list[str]:
333
- with self.lock:
334
- documents, _, distances = self._get_documents_ids_distances(search_strings, n_results)
335
- sorted_docs = [doc for doc, _ in sorted(zip(documents, distances), key=lambda x: x[1])] # sorted lowest -> highest
336
-
337
- # If a document is truncated or competely skipped, it would be with high distance.
338
- return_documents = self._get_documents_up_to_token_count(sorted_docs, max_token_count)
339
- return_documents.reverse() # highest -> lowest
340
-
341
- return return_documents
342
-
343
-
344
- def delete(self, ids_to_delete: list[str], where: dict):
345
- with self.lock:
346
- ids_to_delete = self.collection.get(ids=ids_to_delete, where=where)['ids']
347
- self.collection.delete(ids=ids_to_delete, where=where)
348
-
349
- # Remove the deleted ids from self.ids and self.id_to_info
350
- ids_set = set(ids_to_delete)
351
- self.ids = [id_ for id_ in self.ids if id_ not in ids_set]
352
- for id_ in ids_to_delete:
353
- self.id_to_info.pop(id_, None)
354
-
355
- logger.info(f'Successfully deleted {len(ids_to_delete)} records from chromaDB.')
356
-
357
-
358
- def clear(self):
359
- with self.lock:
360
- self.chroma_client.reset()
361
- self.collection = self.chroma_client.create_collection("context", embedding_function=self.embedder.embed)
362
- self.ids = []
363
- self.id_to_info = {}
364
-
365
- logger.info('Successfully cleared all records and reset chromaDB.')
366
-
367
-
368
- class SentenceTransformerEmbedder(Embedder):
369
- def __init__(self) -> None:
370
- logger.debug('Creating Sentence Embedder...')
371
- self.model = SentenceTransformer("sentence-transformers/all-mpnet-base-v2")
372
- self.embed = self.model.encode
373
-
374
-
375
- def make_collector():
376
- return ChromaCollector(SentenceTransformerEmbedder())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .fused_act import FusedLeakyReLU, fused_leaky_relu
2
- from .upfirdn2d import upfirdn2d
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/upfirdn2d.py DELETED
@@ -1,330 +0,0 @@
1
- # modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501
2
-
3
- # Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
4
- # NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator
5
- # Augmentation (ADA)
6
- # =======================================================================
7
-
8
- # 1. Definitions
9
-
10
- # "Licensor" means any person or entity that distributes its Work.
11
-
12
- # "Software" means the original work of authorship made available under
13
- # this License.
14
-
15
- # "Work" means the Software and any additions to or derivative works of
16
- # the Software that are made available under this License.
17
-
18
- # The terms "reproduce," "reproduction," "derivative works," and
19
- # "distribution" have the meaning as provided under U.S. copyright law;
20
- # provided, however, that for the purposes of this License, derivative
21
- # works shall not include works that remain separable from, or merely
22
- # link (or bind by name) to the interfaces of, the Work.
23
-
24
- # Works, including the Software, are "made available" under this License
25
- # by including in or with the Work either (a) a copyright notice
26
- # referencing the applicability of this License to the Work, or (b) a
27
- # copy of this License.
28
-
29
- # 2. License Grants
30
-
31
- # 2.1 Copyright Grant. Subject to the terms and conditions of this
32
- # License, each Licensor grants to you a perpetual, worldwide,
33
- # non-exclusive, royalty-free, copyright license to reproduce,
34
- # prepare derivative works of, publicly display, publicly perform,
35
- # sublicense and distribute its Work and any resulting derivative
36
- # works in any form.
37
-
38
- # 3. Limitations
39
-
40
- # 3.1 Redistribution. You may reproduce or distribute the Work only
41
- # if (a) you do so under this License, (b) you include a complete
42
- # copy of this License with your distribution, and (c) you retain
43
- # without modification any copyright, patent, trademark, or
44
- # attribution notices that are present in the Work.
45
-
46
- # 3.2 Derivative Works. You may specify that additional or different
47
- # terms apply to the use, reproduction, and distribution of your
48
- # derivative works of the Work ("Your Terms") only if (a) Your Terms
49
- # provide that the use limitation in Section 3.3 applies to your
50
- # derivative works, and (b) you identify the specific derivative
51
- # works that are subject to Your Terms. Notwithstanding Your Terms,
52
- # this License (including the redistribution requirements in Section
53
- # 3.1) will continue to apply to the Work itself.
54
-
55
- # 3.3 Use Limitation. The Work and any derivative works thereof only
56
- # may be used or intended for use non-commercially. Notwithstanding
57
- # the foregoing, NVIDIA and its affiliates may use the Work and any
58
- # derivative works commercially. As used herein, "non-commercially"
59
- # means for research or evaluation purposes only.
60
-
61
- # 3.4 Patent Claims. If you bring or threaten to bring a patent claim
62
- # against any Licensor (including any claim, cross-claim or
63
- # counterclaim in a lawsuit) to enforce any patents that you allege
64
- # are infringed by any Work, then your rights under this License from
65
- # such Licensor (including the grant in Section 2.1) will terminate
66
- # immediately.
67
-
68
- # 3.5 Trademarks. This License does not grant any rights to use any
69
- # Licensor’s or its affiliates’ names, logos, or trademarks, except
70
- # as necessary to reproduce the notices described in this License.
71
-
72
- # 3.6 Termination. If you violate any term of this License, then your
73
- # rights under this License (including the grant in Section 2.1) will
74
- # terminate immediately.
75
-
76
- # 4. Disclaimer of Warranty.
77
-
78
- # THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
79
- # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
80
- # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
81
- # NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
82
- # THIS LICENSE.
83
-
84
- # 5. Limitation of Liability.
85
-
86
- # EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
87
- # THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
88
- # SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
89
- # INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
90
- # OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
91
- # (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
92
- # LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
93
- # COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
94
- # THE POSSIBILITY OF SUCH DAMAGES.
95
-
96
- # =======================================================================
97
-
98
- import torch
99
- from torch.autograd import Function
100
- from torch.nn import functional as F
101
-
102
- from annotator.uniformer.mmcv.utils import to_2tuple
103
- from ..utils import ext_loader
104
-
105
- upfirdn2d_ext = ext_loader.load_ext('_ext', ['upfirdn2d'])
106
-
107
-
108
- class UpFirDn2dBackward(Function):
109
-
110
- @staticmethod
111
- def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
112
- in_size, out_size):
113
-
114
- up_x, up_y = up
115
- down_x, down_y = down
116
- g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
117
-
118
- grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
119
-
120
- grad_input = upfirdn2d_ext.upfirdn2d(
121
- grad_output,
122
- grad_kernel,
123
- up_x=down_x,
124
- up_y=down_y,
125
- down_x=up_x,
126
- down_y=up_y,
127
- pad_x0=g_pad_x0,
128
- pad_x1=g_pad_x1,
129
- pad_y0=g_pad_y0,
130
- pad_y1=g_pad_y1)
131
- grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
132
- in_size[3])
133
-
134
- ctx.save_for_backward(kernel)
135
-
136
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
137
-
138
- ctx.up_x = up_x
139
- ctx.up_y = up_y
140
- ctx.down_x = down_x
141
- ctx.down_y = down_y
142
- ctx.pad_x0 = pad_x0
143
- ctx.pad_x1 = pad_x1
144
- ctx.pad_y0 = pad_y0
145
- ctx.pad_y1 = pad_y1
146
- ctx.in_size = in_size
147
- ctx.out_size = out_size
148
-
149
- return grad_input
150
-
151
- @staticmethod
152
- def backward(ctx, gradgrad_input):
153
- kernel, = ctx.saved_tensors
154
-
155
- gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2],
156
- ctx.in_size[3], 1)
157
-
158
- gradgrad_out = upfirdn2d_ext.upfirdn2d(
159
- gradgrad_input,
160
- kernel,
161
- up_x=ctx.up_x,
162
- up_y=ctx.up_y,
163
- down_x=ctx.down_x,
164
- down_y=ctx.down_y,
165
- pad_x0=ctx.pad_x0,
166
- pad_x1=ctx.pad_x1,
167
- pad_y0=ctx.pad_y0,
168
- pad_y1=ctx.pad_y1)
169
- # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0],
170
- # ctx.out_size[1], ctx.in_size[3])
171
- gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
172
- ctx.out_size[0], ctx.out_size[1])
173
-
174
- return gradgrad_out, None, None, None, None, None, None, None, None
175
-
176
-
177
- class UpFirDn2d(Function):
178
-
179
- @staticmethod
180
- def forward(ctx, input, kernel, up, down, pad):
181
- up_x, up_y = up
182
- down_x, down_y = down
183
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
184
-
185
- kernel_h, kernel_w = kernel.shape
186
- batch, channel, in_h, in_w = input.shape
187
- ctx.in_size = input.shape
188
-
189
- input = input.reshape(-1, in_h, in_w, 1)
190
-
191
- ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
192
-
193
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
194
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
195
- ctx.out_size = (out_h, out_w)
196
-
197
- ctx.up = (up_x, up_y)
198
- ctx.down = (down_x, down_y)
199
- ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
200
-
201
- g_pad_x0 = kernel_w - pad_x0 - 1
202
- g_pad_y0 = kernel_h - pad_y0 - 1
203
- g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
204
- g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
205
-
206
- ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
207
-
208
- out = upfirdn2d_ext.upfirdn2d(
209
- input,
210
- kernel,
211
- up_x=up_x,
212
- up_y=up_y,
213
- down_x=down_x,
214
- down_y=down_y,
215
- pad_x0=pad_x0,
216
- pad_x1=pad_x1,
217
- pad_y0=pad_y0,
218
- pad_y1=pad_y1)
219
- # out = out.view(major, out_h, out_w, minor)
220
- out = out.view(-1, channel, out_h, out_w)
221
-
222
- return out
223
-
224
- @staticmethod
225
- def backward(ctx, grad_output):
226
- kernel, grad_kernel = ctx.saved_tensors
227
-
228
- grad_input = UpFirDn2dBackward.apply(
229
- grad_output,
230
- kernel,
231
- grad_kernel,
232
- ctx.up,
233
- ctx.down,
234
- ctx.pad,
235
- ctx.g_pad,
236
- ctx.in_size,
237
- ctx.out_size,
238
- )
239
-
240
- return grad_input, None, None, None, None
241
-
242
-
243
- def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
244
- """UpFRIDn for 2d features.
245
-
246
- UpFIRDn is short for upsample, apply FIR filter and downsample. More
247
- details can be found in:
248
- https://www.mathworks.com/help/signal/ref/upfirdn.html
249
-
250
- Args:
251
- input (Tensor): Tensor with shape of (n, c, h, w).
252
- kernel (Tensor): Filter kernel.
253
- up (int | tuple[int], optional): Upsampling factor. If given a number,
254
- we will use this factor for the both height and width side.
255
- Defaults to 1.
256
- down (int | tuple[int], optional): Downsampling factor. If given a
257
- number, we will use this factor for the both height and width side.
258
- Defaults to 1.
259
- pad (tuple[int], optional): Padding for tensors, (x_pad, y_pad) or
260
- (x_pad_0, x_pad_1, y_pad_0, y_pad_1). Defaults to (0, 0).
261
-
262
- Returns:
263
- Tensor: Tensor after UpFIRDn.
264
- """
265
- if input.device.type == 'cpu':
266
- if len(pad) == 2:
267
- pad = (pad[0], pad[1], pad[0], pad[1])
268
-
269
- up = to_2tuple(up)
270
-
271
- down = to_2tuple(down)
272
-
273
- out = upfirdn2d_native(input, kernel, up[0], up[1], down[0], down[1],
274
- pad[0], pad[1], pad[2], pad[3])
275
- else:
276
- _up = to_2tuple(up)
277
-
278
- _down = to_2tuple(down)
279
-
280
- if len(pad) == 4:
281
- _pad = pad
282
- elif len(pad) == 2:
283
- _pad = (pad[0], pad[1], pad[0], pad[1])
284
-
285
- out = UpFirDn2d.apply(input, kernel, _up, _down, _pad)
286
-
287
- return out
288
-
289
-
290
- def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1,
291
- pad_y0, pad_y1):
292
- _, channel, in_h, in_w = input.shape
293
- input = input.reshape(-1, in_h, in_w, 1)
294
-
295
- _, in_h, in_w, minor = input.shape
296
- kernel_h, kernel_w = kernel.shape
297
-
298
- out = input.view(-1, in_h, 1, in_w, 1, minor)
299
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
300
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
301
-
302
- out = F.pad(
303
- out,
304
- [0, 0,
305
- max(pad_x0, 0),
306
- max(pad_x1, 0),
307
- max(pad_y0, 0),
308
- max(pad_y1, 0)])
309
- out = out[:,
310
- max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0),
311
- max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ]
312
-
313
- out = out.permute(0, 3, 1, 2)
314
- out = out.reshape(
315
- [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
316
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
317
- out = F.conv2d(out, w)
318
- out = out.reshape(
319
- -1,
320
- minor,
321
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
322
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
323
- )
324
- out = out.permute(0, 2, 3, 1)
325
- out = out[:, ::down_y, ::down_x, :]
326
-
327
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
328
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
329
-
330
- return out.view(-1, channel, out_h, out_w)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/filetypes.py DELETED
@@ -1,27 +0,0 @@
1
- """Filetype information.
2
- """
3
-
4
- from typing import Tuple
5
-
6
- from pip._internal.utils.misc import splitext
7
-
8
- WHEEL_EXTENSION = ".whl"
9
- BZ2_EXTENSIONS: Tuple[str, ...] = (".tar.bz2", ".tbz")
10
- XZ_EXTENSIONS: Tuple[str, ...] = (
11
- ".tar.xz",
12
- ".txz",
13
- ".tlz",
14
- ".tar.lz",
15
- ".tar.lzma",
16
- )
17
- ZIP_EXTENSIONS: Tuple[str, ...] = (".zip", WHEEL_EXTENSION)
18
- TAR_EXTENSIONS: Tuple[str, ...] = (".tar.gz", ".tgz", ".tar")
19
- ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS
20
-
21
-
22
- def is_archive_file(name: str) -> bool:
23
- """Return True if `name` is a considered as an archive file."""
24
- ext = splitext(name)[1].lower()
25
- if ext in ARCHIVE_EXTENSIONS:
26
- return True
27
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/cygwinccompiler.py DELETED
@@ -1,364 +0,0 @@
1
- """distutils.cygwinccompiler
2
-
3
- Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
4
- handles the Cygwin port of the GNU C compiler to Windows. It also contains
5
- the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
6
- cygwin in no-cygwin mode).
7
- """
8
-
9
- import os
10
- import sys
11
- import copy
12
- import shlex
13
- import warnings
14
- from subprocess import check_output
15
-
16
- from distutils.unixccompiler import UnixCCompiler
17
- from distutils.file_util import write_file
18
- from distutils.errors import (
19
- DistutilsExecError,
20
- DistutilsPlatformError,
21
- CCompilerError,
22
- CompileError,
23
- )
24
- from distutils.version import LooseVersion, suppress_known_deprecation
25
-
26
-
27
- def get_msvcr():
28
- """Include the appropriate MSVC runtime library if Python was built
29
- with MSVC 7.0 or later.
30
- """
31
- msc_pos = sys.version.find('MSC v.')
32
- if msc_pos != -1:
33
- msc_ver = sys.version[msc_pos + 6 : msc_pos + 10]
34
- if msc_ver == '1300':
35
- # MSVC 7.0
36
- return ['msvcr70']
37
- elif msc_ver == '1310':
38
- # MSVC 7.1
39
- return ['msvcr71']
40
- elif msc_ver == '1400':
41
- # VS2005 / MSVC 8.0
42
- return ['msvcr80']
43
- elif msc_ver == '1500':
44
- # VS2008 / MSVC 9.0
45
- return ['msvcr90']
46
- elif msc_ver == '1600':
47
- # VS2010 / MSVC 10.0
48
- return ['msvcr100']
49
- elif msc_ver == '1700':
50
- # VS2012 / MSVC 11.0
51
- return ['msvcr110']
52
- elif msc_ver == '1800':
53
- # VS2013 / MSVC 12.0
54
- return ['msvcr120']
55
- elif 1900 <= int(msc_ver) < 2000:
56
- # VS2015 / MSVC 14.0
57
- return ['ucrt', 'vcruntime140']
58
- else:
59
- raise ValueError("Unknown MS Compiler version %s " % msc_ver)
60
-
61
-
62
- _runtime_library_dirs_msg = (
63
- "Unable to set runtime library search path on Windows, "
64
- "usually indicated by `runtime_library_dirs` parameter to Extension"
65
- )
66
-
67
-
68
- class CygwinCCompiler(UnixCCompiler):
69
- """Handles the Cygwin port of the GNU C compiler to Windows."""
70
-
71
- compiler_type = 'cygwin'
72
- obj_extension = ".o"
73
- static_lib_extension = ".a"
74
- shared_lib_extension = ".dll.a"
75
- dylib_lib_extension = ".dll"
76
- static_lib_format = "lib%s%s"
77
- shared_lib_format = "lib%s%s"
78
- dylib_lib_format = "cyg%s%s"
79
- exe_extension = ".exe"
80
-
81
- def __init__(self, verbose=0, dry_run=0, force=0):
82
-
83
- super().__init__(verbose, dry_run, force)
84
-
85
- status, details = check_config_h()
86
- self.debug_print(
87
- "Python's GCC status: {} (details: {})".format(status, details)
88
- )
89
- if status is not CONFIG_H_OK:
90
- self.warn(
91
- "Python's pyconfig.h doesn't seem to support your compiler. "
92
- "Reason: %s. "
93
- "Compiling may fail because of undefined preprocessor macros." % details
94
- )
95
-
96
- self.cc = os.environ.get('CC', 'gcc')
97
- self.cxx = os.environ.get('CXX', 'g++')
98
-
99
- self.linker_dll = self.cc
100
- shared_option = "-shared"
101
-
102
- self.set_executables(
103
- compiler='%s -mcygwin -O -Wall' % self.cc,
104
- compiler_so='%s -mcygwin -mdll -O -Wall' % self.cc,
105
- compiler_cxx='%s -mcygwin -O -Wall' % self.cxx,
106
- linker_exe='%s -mcygwin' % self.cc,
107
- linker_so=('{} -mcygwin {}'.format(self.linker_dll, shared_option)),
108
- )
109
-
110
- # Include the appropriate MSVC runtime library if Python was built
111
- # with MSVC 7.0 or later.
112
- self.dll_libraries = get_msvcr()
113
-
114
- @property
115
- def gcc_version(self):
116
- # Older numpy dependend on this existing to check for ancient
117
- # gcc versions. This doesn't make much sense with clang etc so
118
- # just hardcode to something recent.
119
- # https://github.com/numpy/numpy/pull/20333
120
- warnings.warn(
121
- "gcc_version attribute of CygwinCCompiler is deprecated. "
122
- "Instead of returning actual gcc version a fixed value 11.2.0 is returned.",
123
- DeprecationWarning,
124
- stacklevel=2,
125
- )
126
- with suppress_known_deprecation():
127
- return LooseVersion("11.2.0")
128
-
129
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
130
- """Compiles the source by spawning GCC and windres if needed."""
131
- if ext == '.rc' or ext == '.res':
132
- # gcc needs '.res' and '.rc' compiled to object files !!!
133
- try:
134
- self.spawn(["windres", "-i", src, "-o", obj])
135
- except DistutilsExecError as msg:
136
- raise CompileError(msg)
137
- else: # for other files use the C-compiler
138
- try:
139
- self.spawn(
140
- self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs
141
- )
142
- except DistutilsExecError as msg:
143
- raise CompileError(msg)
144
-
145
- def link(
146
- self,
147
- target_desc,
148
- objects,
149
- output_filename,
150
- output_dir=None,
151
- libraries=None,
152
- library_dirs=None,
153
- runtime_library_dirs=None,
154
- export_symbols=None,
155
- debug=0,
156
- extra_preargs=None,
157
- extra_postargs=None,
158
- build_temp=None,
159
- target_lang=None,
160
- ):
161
- """Link the objects."""
162
- # use separate copies, so we can modify the lists
163
- extra_preargs = copy.copy(extra_preargs or [])
164
- libraries = copy.copy(libraries or [])
165
- objects = copy.copy(objects or [])
166
-
167
- if runtime_library_dirs:
168
- self.warn(_runtime_library_dirs_msg)
169
-
170
- # Additional libraries
171
- libraries.extend(self.dll_libraries)
172
-
173
- # handle export symbols by creating a def-file
174
- # with executables this only works with gcc/ld as linker
175
- if (export_symbols is not None) and (
176
- target_desc != self.EXECUTABLE or self.linker_dll == "gcc"
177
- ):
178
- # (The linker doesn't do anything if output is up-to-date.
179
- # So it would probably better to check if we really need this,
180
- # but for this we had to insert some unchanged parts of
181
- # UnixCCompiler, and this is not what we want.)
182
-
183
- # we want to put some files in the same directory as the
184
- # object files are, build_temp doesn't help much
185
- # where are the object files
186
- temp_dir = os.path.dirname(objects[0])
187
- # name of dll to give the helper files the same base name
188
- (dll_name, dll_extension) = os.path.splitext(
189
- os.path.basename(output_filename)
190
- )
191
-
192
- # generate the filenames for these files
193
- def_file = os.path.join(temp_dir, dll_name + ".def")
194
-
195
- # Generate .def file
196
- contents = ["LIBRARY %s" % os.path.basename(output_filename), "EXPORTS"]
197
- for sym in export_symbols:
198
- contents.append(sym)
199
- self.execute(write_file, (def_file, contents), "writing %s" % def_file)
200
-
201
- # next add options for def-file
202
-
203
- # for gcc/ld the def-file is specified as any object files
204
- objects.append(def_file)
205
-
206
- # end: if ((export_symbols is not None) and
207
- # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
208
-
209
- # who wants symbols and a many times larger output file
210
- # should explicitly switch the debug mode on
211
- # otherwise we let ld strip the output file
212
- # (On my machine: 10KiB < stripped_file < ??100KiB
213
- # unstripped_file = stripped_file + XXX KiB
214
- # ( XXX=254 for a typical python extension))
215
- if not debug:
216
- extra_preargs.append("-s")
217
-
218
- UnixCCompiler.link(
219
- self,
220
- target_desc,
221
- objects,
222
- output_filename,
223
- output_dir,
224
- libraries,
225
- library_dirs,
226
- runtime_library_dirs,
227
- None, # export_symbols, we do this in our def-file
228
- debug,
229
- extra_preargs,
230
- extra_postargs,
231
- build_temp,
232
- target_lang,
233
- )
234
-
235
- def runtime_library_dir_option(self, dir):
236
- # cygwin doesn't support rpath. While in theory we could error
237
- # out like MSVC does, code might expect it to work like on Unix, so
238
- # just warn and hope for the best.
239
- self.warn(_runtime_library_dirs_msg)
240
- return []
241
-
242
- # -- Miscellaneous methods -----------------------------------------
243
-
244
- def _make_out_path(self, output_dir, strip_dir, src_name):
245
- # use normcase to make sure '.rc' is really '.rc' and not '.RC'
246
- norm_src_name = os.path.normcase(src_name)
247
- return super()._make_out_path(output_dir, strip_dir, norm_src_name)
248
-
249
- @property
250
- def out_extensions(self):
251
- """
252
- Add support for rc and res files.
253
- """
254
- return {
255
- **super().out_extensions,
256
- **{ext: ext + self.obj_extension for ext in ('.res', '.rc')},
257
- }
258
-
259
-
260
- # the same as cygwin plus some additional parameters
261
- class Mingw32CCompiler(CygwinCCompiler):
262
- """Handles the Mingw32 port of the GNU C compiler to Windows."""
263
-
264
- compiler_type = 'mingw32'
265
-
266
- def __init__(self, verbose=0, dry_run=0, force=0):
267
-
268
- super().__init__(verbose, dry_run, force)
269
-
270
- shared_option = "-shared"
271
-
272
- if is_cygwincc(self.cc):
273
- raise CCompilerError('Cygwin gcc cannot be used with --compiler=mingw32')
274
-
275
- self.set_executables(
276
- compiler='%s -O -Wall' % self.cc,
277
- compiler_so='%s -mdll -O -Wall' % self.cc,
278
- compiler_cxx='%s -O -Wall' % self.cxx,
279
- linker_exe='%s' % self.cc,
280
- linker_so='{} {}'.format(self.linker_dll, shared_option),
281
- )
282
-
283
- # Maybe we should also append -mthreads, but then the finished
284
- # dlls need another dll (mingwm10.dll see Mingw32 docs)
285
- # (-mthreads: Support thread-safe exception handling on `Mingw32')
286
-
287
- # no additional libraries needed
288
- self.dll_libraries = []
289
-
290
- # Include the appropriate MSVC runtime library if Python was built
291
- # with MSVC 7.0 or later.
292
- self.dll_libraries = get_msvcr()
293
-
294
- def runtime_library_dir_option(self, dir):
295
- raise DistutilsPlatformError(_runtime_library_dirs_msg)
296
-
297
-
298
- # Because these compilers aren't configured in Python's pyconfig.h file by
299
- # default, we should at least warn the user if he is using an unmodified
300
- # version.
301
-
302
- CONFIG_H_OK = "ok"
303
- CONFIG_H_NOTOK = "not ok"
304
- CONFIG_H_UNCERTAIN = "uncertain"
305
-
306
-
307
- def check_config_h():
308
- """Check if the current Python installation appears amenable to building
309
- extensions with GCC.
310
-
311
- Returns a tuple (status, details), where 'status' is one of the following
312
- constants:
313
-
314
- - CONFIG_H_OK: all is well, go ahead and compile
315
- - CONFIG_H_NOTOK: doesn't look good
316
- - CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
317
-
318
- 'details' is a human-readable string explaining the situation.
319
-
320
- Note there are two ways to conclude "OK": either 'sys.version' contains
321
- the string "GCC" (implying that this Python was built with GCC), or the
322
- installed "pyconfig.h" contains the string "__GNUC__".
323
- """
324
-
325
- # XXX since this function also checks sys.version, it's not strictly a
326
- # "pyconfig.h" check -- should probably be renamed...
327
-
328
- from distutils import sysconfig
329
-
330
- # if sys.version contains GCC then python was compiled with GCC, and the
331
- # pyconfig.h file should be OK
332
- if "GCC" in sys.version:
333
- return CONFIG_H_OK, "sys.version mentions 'GCC'"
334
-
335
- # Clang would also work
336
- if "Clang" in sys.version:
337
- return CONFIG_H_OK, "sys.version mentions 'Clang'"
338
-
339
- # let's see if __GNUC__ is mentioned in python.h
340
- fn = sysconfig.get_config_h_filename()
341
- try:
342
- config_h = open(fn)
343
- try:
344
- if "__GNUC__" in config_h.read():
345
- return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
346
- else:
347
- return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
348
- finally:
349
- config_h.close()
350
- except OSError as exc:
351
- return (CONFIG_H_UNCERTAIN, "couldn't read '{}': {}".format(fn, exc.strerror))
352
-
353
-
354
- def is_cygwincc(cc):
355
- '''Try to determine if the compiler that would be used is from cygwin.'''
356
- out_string = check_output(shlex.split(cc) + ['-dumpmachine'])
357
- return out_string.strip().endswith(b'cygwin')
358
-
359
-
360
- get_versions = None
361
- """
362
- A stand-in for the previous get_versions() function to prevent failures
363
- when monkeypatched. See pypa/setuptools#2969.
364
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AyushP/PolicyCompareBot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: PolicyCompareBot
3
- emoji: 🌖
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/5 Documento De Pregunta Beca 2016 Pdf.md DELETED
@@ -1,76 +0,0 @@
1
-
2
- <h1>5th Scholarship Question Paper 2016 PDF Descargar</h1>
3
- <p>Si usted es un estudiante que aspira a obtener una beca para su educación superior, entonces usted podría estar interesado en tomar el quinto examen de beca. Se trata de un examen competitivo que llevan a cabo diversas autoridades de la India y Sri Lanka para estudiantes que están en su último año de primaria. El examen pone a prueba sus conocimientos, habilidades y aptitudes en diversas materias y le ayuda a obtener la admisión en escuelas y colegios de renombre. </p>
4
- <h2>5º documento de pregunta beca 2016 pdf</h2><br /><p><b><b>Download</b> &#10004; <a href="https://bltlly.com/2v6MD1">https://bltlly.com/2v6MD1</a></b></p><br /><br />
5
- <p>Sin embargo, prepararse para este examen no es una tarea fácil. Es necesario tener una clara comprensión del programa de estudios, temas y patrones de preguntas. También es necesario practicar una gran cantidad de documentos del año anterior para familiarizarse con el nivel de dificultad y la gestión del tiempo. Una de las mejores maneras de hacer esto es descargar y utilizar el quinto documento de pregunta beca 2016 PDF.</p>
6
- <p>En este artículo, le diremos cómo descargar el quinto documento de preguntas de becas 2016 PDF y cómo usarlo para su preparación. También compartiremos algunos consejos y trucos para resolver las preguntas y algunas preguntas de muestra y respuestas del documento. Al leer este artículo, podrás aumentar tu confianza y rendimiento en el examen. </p>
7
- <h2>Cómo descargar el quinto documento de preguntas de becas 2016 PDF</h2>
8
- <p>El primer paso para prepararse para el quinto examen de beca es descargar los documentos anteriores del examen. Los trabajos anteriores te ayudarán a entender el formato, el programa y el nivel de dificultad del examen. También le ayudarán a identificar sus fortalezas y debilidades y trabajar en consecuencia. </p>
9
- <p></p>
10
- <p>El quinto documento de preguntas de becas 2016 PDF está disponible en línea en varios sitios web. Puedes descargarlo desde cualquiera de estos sitios web siguiendo estos sencillos pasos:</p>
11
- <h3>Paso 1: Visita el sitio web oficial de la autoridad examinadora</h3>
12
-
13
- <h3>Paso 2: Encuentre el enlace para los documentos anteriores y haga clic en él</h3>
14
- <p>El siguiente paso es encontrar el enlace para descargar los documentos anteriores del examen en el sitio web. Por lo general, este enlace estará bajo una sección llamada "Descargas", "Recursos" o "Documentos anteriores". Haga clic en este enlace para ir a una página donde puede ver todos los documentos anteriores disponibles para su descarga. </p>
15
- <h3>Paso 3: Seleccione el año 2016 y el medio de su elección</h3>
16
- <p>El tercer paso es seleccionar el año 2016 de la lista de artículos anteriores. Esto le mostrará el archivo PDF del quinto documento de preguntas de becas 2016 en el medio de su elección. El medio puede ser inglés, hindi, marathi, tamil, cingalés o cualquier otro idioma en el que se realice el examen. Haga clic en el archivo PDF para verlo en línea o descargarlo en su dispositivo. </p>
17
- <h3>Paso 4: Descargue el archivo PDF y guárdelo en su dispositivo</h3>
18
- <p>El paso final es descargar el archivo PDF del quinto documento de preguntas de la beca 2016 y guardarlo en su dispositivo. Puede hacer esto haciendo clic derecho en el archivo PDF y eligiendo la opción "Guardar como" o "Descargar". También puede utilizar un gestor de descargas o una extensión del navegador para descargar el archivo más rápido y fácil. Asegúrate de tener suficiente espacio en tu dispositivo y una buena conexión a Internet para descargar el archivo sin errores. </p>
19
- <h2>Cómo utilizar el quinto documento de preguntas de becas 2016 PDF para la preparación</h2>
20
- <p>Ahora que ha descargado el quinto documento de pregunta beca 2016 PDF, es posible que se pregunte cómo usarlo para su preparación. Bueno, hay muchas maneras de usar el trabajo anterior para mejorar tus conocimientos, habilidades y confianza para el examen. Estas son algunas de ellas:</p>
21
- <h3>Consejos y trucos para resolver las preguntas</h3>
22
-
23
- <p>Algunos de los consejos y trucos que puedes aprender del trabajo anterior son:</p>
24
- <ul>
25
- <li>Lea la pregunta cuidadosamente y entienda lo que está pidiendo. </li>
26
- <li> Eliminar las opciones incorrectas o irrelevantes mediante el uso de la lógica, el sentido común o el método de eliminación. </li>
27
- <li> Utilice atajos, fórmulas o diagramas para resolver las preguntas más rápido y fácil. </li>
28
- <li>Comprueba tus respuestas usando métodos de cálculo inverso, sustitución o comprobación cruzada. </li>
29
- <li>Evite adivinar o marcar respuestas al azar. Si no está seguro sobre una respuesta, déjela en blanco o márquela para revisarla más tarde. </li>
30
- </ul>
31
- <h3>Temas y plan de estudios tratados en el documento</h3>
32
- <p>La segunda forma de utilizar el artículo anterior es revisar los temas y el programa de estudios cubiertos en el examen. El quinto examen de beca cubre varias materias como Matemáticas, Ciencias, Estudios Sociales, Inglés y Conocimientos Generales. Es necesario tener un conocimiento profundo de estos temas y sus conceptos para obtener una buena puntuación en el examen. </p>
33
- <p>El artículo anterior te ayudará a identificar los temas y subtemas importantes que se piden con frecuencia en el examen. También te ayudará a revisar los conceptos que ya has aprendido y a llenar cualquier vacío en tu conocimiento. Puedes usar el artículo anterior como una guía para planificar tu horario de estudio y asignar tiempo para cada tema en consecuencia. </p>
34
- <h3>Ejemplos de preguntas y respuestas del artículo</h3>
35
- <p>La tercera forma de usar el artículo anterior es practicar algunas preguntas y respuestas de muestra del artículo. Esta es la mejor manera de probar sus conocimientos, habilidades y velocidad para el examen. Al resolver las preguntas de muestra, podrá evaluar su rendimiento y precisión. También podrás aprender de tus errores y mejorar tus áreas débiles. </p>
36
-
37
- <p>Aquí hay algunas preguntas de muestra y respuestas del quinto documento de preguntas de becas 2016 PDF:</p>
38
- <tabla>
39
- <tr>
40
- <th>Pregunta</th>
41
- <th>Respuesta</th>
42
- </tr>
43
- <tr>
44
- <td>¿Cuál de los siguientes es un número primo? </td>
45
- <td>A) 15<br>B) 17<br>C) 21<br>D) 25<br><br><br>La respuesta correcta es B) 17. Un número primo es un número que tiene solo dos factores, 1 y sí mismo. 17 tiene solo dos factores, 1 y 17, por lo que es un número primo. Las otras opciones no son números primos porque tienen más de dos factores. </td>
46
- </tr>
47
- <tr>
48
- <td>¿Cuál de las siguientes es la capital de Sri Lanka? </td>
49
- <td>A) Colombo<br>B) Kandy<br>C) Jaffna<br>D) Galle<br><br>La respuesta correcta es A) Colombo. Colombo es la ciudad más grande y la capital comercial de Sri Lanka. Se encuentra en la costa oeste de la isla y tiene una población de alrededor de 5,6 millones de personas. Las otras opciones son otras ciudades en Sri Lanka, pero no son la capital. </td>
50
- </tr>
51
- <tr>
52
- <td>¿Cuál de los siguientes es sinónimo de "feliz"? </td>
53
- <td>A) Triste<br>B) Enojado<br>C) Contento<br>D) Asustado<br><br>La respuesta correcta es C) Contento. Un sinónimo es una palabra que tiene el mismo o similar significado que otra palabra. Alegre significa sentir placer, alegría o satisfacción, que es similar a feliz. Las otras opciones son antónimos de feliz, lo que significa que tienen significados opuestos. </td>
54
- </tr>
55
- </tabla>
56
- <h2>Conclusión</h2>
57
- <p>En conclusión, el quinto examen de beca es una gran oportunidad para los estudiantes que quieren continuar su educación superior con apoyo financiero y excelencia académica. Para prepararse para este examen, es necesario descargar y utilizar el quinto documento de pregunta beca 2016 PDF como un recurso valioso. El artículo anterior te ayudará a entender el formato, el programa y el nivel de dificultad del examen. También le ayudará a aprender algunos consejos y trucos para resolver las preguntas y practicar algunas preguntas de muestra y respuestas del documento. </p>
58
-
59
- <h2>Preguntas frecuentes</h2>
60
- <h3>¿Cuáles son los criterios de elegibilidad para el quinto examen de beca? </h3>
61
- <p>Los criterios de elegibilidad para el quinto examen de beca pueden variar dependiendo de la autoridad que lo lleve a cabo en su región. Sin embargo, generalmente necesitas ser un estudiante que esté en su último año de primaria (grado 5 o equivalente). También necesitas tener un buen expediente académico y alcanzar las calificaciones mínimas requeridas por la autoridad. </p>
62
- <h3>¿Cuál es el formato y la duración del quinto examen de beca? </h3>
63
- <p>El formato y la duración del quinto examen de beca también puede variar dependiendo de la autoridad que lo realice en su región. Sin embargo, generalmente, el examen consiste en preguntas de opción múltiple (MCQs) que cubren varias materias como Matemáticas, Ciencias, Estudios Sociales, Inglés y Conocimientos Generales. El examen puede tener uno o dos documentos dependiendo del medio y la autoridad. La duración del examen puede variar de 1 a 2 horas dependiendo del número y tipo de preguntas. </p>
64
- <h3>¿Cuáles son las recompensas y el reconocimiento para el quinto examen de beca? </h3>
65
- <p>Las recompensas y el reconocimiento para el quinto examen de beca también pueden variar dependiendo de la autoridad que lo lleva a cabo en su región. Sin embargo, generalmente, los estudiantes que califican el examen reciben becas que cubren sus cuotas de matrícula, libros y otros gastos para su educación superior. También reciben certificados, medallas y trofeos que reconocen sus logros académicos y méritos. También se les da preferencia y admisión en escuelas y universidades de renombre que ofrecen educación e instalaciones de calidad. </p>
66
- <h3>¿Cómo solicitar el quinto examen de beca? </h3>
67
-
68
- <h3>¿Dónde encontrar más recursos y orientación para el quinto examen de beca? </h3>
69
- <p>Si quieres encontrar más recursos y orientación para el quinto examen de beca, puedes visitar algunos de estos sitios web que proporcionan información útil, consejos, materiales de estudio, pruebas simuladas y entrenamiento en línea para el examen:</p>
70
- <ul>
71
- <li>[Examen Nacional de Becas]: Este es un sitio web que proporciona entrenamiento en línea, pruebas simuladas, materiales de estudio y orientación para el Examen Nacional de Becas (NSE) realizado por NICE en India.</li>
72
- <li>[Examen de becas]: Este es un sitio web que proporciona información, plan de estudios, trabajos anteriores, documentos modelo y resultados para el examen de becas realizado por el Departamento de Exámenes en Sri Lanka.</li>
73
- <li>[Guía de becas]: Este es un sitio web que proporciona información, consejos, consejos y recursos para diversos exámenes de becas realizadas en la India y en el extranjero. </li>
74
- </ul></p> 64aa2da5cf<br />
75
- <br />
76
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/util.py DELETED
@@ -1,1932 +0,0 @@
1
- #
2
- # Copyright (C) 2012-2021 The Python Software Foundation.
3
- # See LICENSE.txt and CONTRIBUTORS.txt.
4
- #
5
- import codecs
6
- from collections import deque
7
- import contextlib
8
- import csv
9
- from glob import iglob as std_iglob
10
- import io
11
- import json
12
- import logging
13
- import os
14
- import py_compile
15
- import re
16
- import socket
17
- try:
18
- import ssl
19
- except ImportError: # pragma: no cover
20
- ssl = None
21
- import subprocess
22
- import sys
23
- import tarfile
24
- import tempfile
25
- import textwrap
26
-
27
- try:
28
- import threading
29
- except ImportError: # pragma: no cover
30
- import dummy_threading as threading
31
- import time
32
-
33
- from . import DistlibException
34
- from .compat import (string_types, text_type, shutil, raw_input, StringIO,
35
- cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
36
- splittype, HTTPHandler, BaseConfigurator, valid_ident,
37
- Container, configparser, URLError, ZipFile, fsdecode,
38
- unquote, urlparse)
39
-
40
- logger = logging.getLogger(__name__)
41
-
42
- #
43
- # Requirement parsing code as per PEP 508
44
- #
45
-
46
- IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
47
- VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
48
- COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
49
- MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
50
- OR = re.compile(r'^or\b\s*')
51
- AND = re.compile(r'^and\b\s*')
52
- NON_SPACE = re.compile(r'(\S+)\s*')
53
- STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
54
-
55
-
56
- def parse_marker(marker_string):
57
- """
58
- Parse a marker string and return a dictionary containing a marker expression.
59
-
60
- The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
61
- the expression grammar, or strings. A string contained in quotes is to be
62
- interpreted as a literal string, and a string not contained in quotes is a
63
- variable (such as os_name).
64
- """
65
- def marker_var(remaining):
66
- # either identifier, or literal string
67
- m = IDENTIFIER.match(remaining)
68
- if m:
69
- result = m.groups()[0]
70
- remaining = remaining[m.end():]
71
- elif not remaining:
72
- raise SyntaxError('unexpected end of input')
73
- else:
74
- q = remaining[0]
75
- if q not in '\'"':
76
- raise SyntaxError('invalid expression: %s' % remaining)
77
- oq = '\'"'.replace(q, '')
78
- remaining = remaining[1:]
79
- parts = [q]
80
- while remaining:
81
- # either a string chunk, or oq, or q to terminate
82
- if remaining[0] == q:
83
- break
84
- elif remaining[0] == oq:
85
- parts.append(oq)
86
- remaining = remaining[1:]
87
- else:
88
- m = STRING_CHUNK.match(remaining)
89
- if not m:
90
- raise SyntaxError('error in string literal: %s' % remaining)
91
- parts.append(m.groups()[0])
92
- remaining = remaining[m.end():]
93
- else:
94
- s = ''.join(parts)
95
- raise SyntaxError('unterminated string: %s' % s)
96
- parts.append(q)
97
- result = ''.join(parts)
98
- remaining = remaining[1:].lstrip() # skip past closing quote
99
- return result, remaining
100
-
101
- def marker_expr(remaining):
102
- if remaining and remaining[0] == '(':
103
- result, remaining = marker(remaining[1:].lstrip())
104
- if remaining[0] != ')':
105
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
106
- remaining = remaining[1:].lstrip()
107
- else:
108
- lhs, remaining = marker_var(remaining)
109
- while remaining:
110
- m = MARKER_OP.match(remaining)
111
- if not m:
112
- break
113
- op = m.groups()[0]
114
- remaining = remaining[m.end():]
115
- rhs, remaining = marker_var(remaining)
116
- lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
117
- result = lhs
118
- return result, remaining
119
-
120
- def marker_and(remaining):
121
- lhs, remaining = marker_expr(remaining)
122
- while remaining:
123
- m = AND.match(remaining)
124
- if not m:
125
- break
126
- remaining = remaining[m.end():]
127
- rhs, remaining = marker_expr(remaining)
128
- lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
129
- return lhs, remaining
130
-
131
- def marker(remaining):
132
- lhs, remaining = marker_and(remaining)
133
- while remaining:
134
- m = OR.match(remaining)
135
- if not m:
136
- break
137
- remaining = remaining[m.end():]
138
- rhs, remaining = marker_and(remaining)
139
- lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
140
- return lhs, remaining
141
-
142
- return marker(marker_string)
143
-
144
-
145
- def parse_requirement(req):
146
- """
147
- Parse a requirement passed in as a string. Return a Container
148
- whose attributes contain the various parts of the requirement.
149
- """
150
- remaining = req.strip()
151
- if not remaining or remaining.startswith('#'):
152
- return None
153
- m = IDENTIFIER.match(remaining)
154
- if not m:
155
- raise SyntaxError('name expected: %s' % remaining)
156
- distname = m.groups()[0]
157
- remaining = remaining[m.end():]
158
- extras = mark_expr = versions = uri = None
159
- if remaining and remaining[0] == '[':
160
- i = remaining.find(']', 1)
161
- if i < 0:
162
- raise SyntaxError('unterminated extra: %s' % remaining)
163
- s = remaining[1:i]
164
- remaining = remaining[i + 1:].lstrip()
165
- extras = []
166
- while s:
167
- m = IDENTIFIER.match(s)
168
- if not m:
169
- raise SyntaxError('malformed extra: %s' % s)
170
- extras.append(m.groups()[0])
171
- s = s[m.end():]
172
- if not s:
173
- break
174
- if s[0] != ',':
175
- raise SyntaxError('comma expected in extras: %s' % s)
176
- s = s[1:].lstrip()
177
- if not extras:
178
- extras = None
179
- if remaining:
180
- if remaining[0] == '@':
181
- # it's a URI
182
- remaining = remaining[1:].lstrip()
183
- m = NON_SPACE.match(remaining)
184
- if not m:
185
- raise SyntaxError('invalid URI: %s' % remaining)
186
- uri = m.groups()[0]
187
- t = urlparse(uri)
188
- # there are issues with Python and URL parsing, so this test
189
- # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
190
- # always parse invalid URLs correctly - it should raise
191
- # exceptions for malformed URLs
192
- if not (t.scheme and t.netloc):
193
- raise SyntaxError('Invalid URL: %s' % uri)
194
- remaining = remaining[m.end():].lstrip()
195
- else:
196
-
197
- def get_versions(ver_remaining):
198
- """
199
- Return a list of operator, version tuples if any are
200
- specified, else None.
201
- """
202
- m = COMPARE_OP.match(ver_remaining)
203
- versions = None
204
- if m:
205
- versions = []
206
- while True:
207
- op = m.groups()[0]
208
- ver_remaining = ver_remaining[m.end():]
209
- m = VERSION_IDENTIFIER.match(ver_remaining)
210
- if not m:
211
- raise SyntaxError('invalid version: %s' % ver_remaining)
212
- v = m.groups()[0]
213
- versions.append((op, v))
214
- ver_remaining = ver_remaining[m.end():]
215
- if not ver_remaining or ver_remaining[0] != ',':
216
- break
217
- ver_remaining = ver_remaining[1:].lstrip()
218
- # Some packages have a trailing comma which would break things
219
- # See issue #148
220
- if not ver_remaining:
221
- break
222
- m = COMPARE_OP.match(ver_remaining)
223
- if not m:
224
- raise SyntaxError('invalid constraint: %s' % ver_remaining)
225
- if not versions:
226
- versions = None
227
- return versions, ver_remaining
228
-
229
- if remaining[0] != '(':
230
- versions, remaining = get_versions(remaining)
231
- else:
232
- i = remaining.find(')', 1)
233
- if i < 0:
234
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
235
- s = remaining[1:i]
236
- remaining = remaining[i + 1:].lstrip()
237
- # As a special diversion from PEP 508, allow a version number
238
- # a.b.c in parentheses as a synonym for ~= a.b.c (because this
239
- # is allowed in earlier PEPs)
240
- if COMPARE_OP.match(s):
241
- versions, _ = get_versions(s)
242
- else:
243
- m = VERSION_IDENTIFIER.match(s)
244
- if not m:
245
- raise SyntaxError('invalid constraint: %s' % s)
246
- v = m.groups()[0]
247
- s = s[m.end():].lstrip()
248
- if s:
249
- raise SyntaxError('invalid constraint: %s' % s)
250
- versions = [('~=', v)]
251
-
252
- if remaining:
253
- if remaining[0] != ';':
254
- raise SyntaxError('invalid requirement: %s' % remaining)
255
- remaining = remaining[1:].lstrip()
256
-
257
- mark_expr, remaining = parse_marker(remaining)
258
-
259
- if remaining and remaining[0] != '#':
260
- raise SyntaxError('unexpected trailing data: %s' % remaining)
261
-
262
- if not versions:
263
- rs = distname
264
- else:
265
- rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
266
- return Container(name=distname, extras=extras, constraints=versions,
267
- marker=mark_expr, url=uri, requirement=rs)
268
-
269
-
270
- def get_resources_dests(resources_root, rules):
271
- """Find destinations for resources files"""
272
-
273
- def get_rel_path(root, path):
274
- # normalizes and returns a lstripped-/-separated path
275
- root = root.replace(os.path.sep, '/')
276
- path = path.replace(os.path.sep, '/')
277
- assert path.startswith(root)
278
- return path[len(root):].lstrip('/')
279
-
280
- destinations = {}
281
- for base, suffix, dest in rules:
282
- prefix = os.path.join(resources_root, base)
283
- for abs_base in iglob(prefix):
284
- abs_glob = os.path.join(abs_base, suffix)
285
- for abs_path in iglob(abs_glob):
286
- resource_file = get_rel_path(resources_root, abs_path)
287
- if dest is None: # remove the entry if it was here
288
- destinations.pop(resource_file, None)
289
- else:
290
- rel_path = get_rel_path(abs_base, abs_path)
291
- rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
292
- destinations[resource_file] = rel_dest + '/' + rel_path
293
- return destinations
294
-
295
-
296
- def in_venv():
297
- if hasattr(sys, 'real_prefix'):
298
- # virtualenv venvs
299
- result = True
300
- else:
301
- # PEP 405 venvs
302
- result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
303
- return result
304
-
305
-
306
- def get_executable():
307
- # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
308
- # changes to the stub launcher mean that sys.executable always points
309
- # to the stub on OS X
310
- # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
311
- # in os.environ):
312
- # result = os.environ['__PYVENV_LAUNCHER__']
313
- # else:
314
- # result = sys.executable
315
- # return result
316
- # Avoid normcasing: see issue #143
317
- # result = os.path.normcase(sys.executable)
318
- result = sys.executable
319
- if not isinstance(result, text_type):
320
- result = fsdecode(result)
321
- return result
322
-
323
-
324
- def proceed(prompt, allowed_chars, error_prompt=None, default=None):
325
- p = prompt
326
- while True:
327
- s = raw_input(p)
328
- p = prompt
329
- if not s and default:
330
- s = default
331
- if s:
332
- c = s[0].lower()
333
- if c in allowed_chars:
334
- break
335
- if error_prompt:
336
- p = '%c: %s\n%s' % (c, error_prompt, prompt)
337
- return c
338
-
339
-
340
- def extract_by_key(d, keys):
341
- if isinstance(keys, string_types):
342
- keys = keys.split()
343
- result = {}
344
- for key in keys:
345
- if key in d:
346
- result[key] = d[key]
347
- return result
348
-
349
- def read_exports(stream):
350
- if sys.version_info[0] >= 3:
351
- # needs to be a text stream
352
- stream = codecs.getreader('utf-8')(stream)
353
- # Try to load as JSON, falling back on legacy format
354
- data = stream.read()
355
- stream = StringIO(data)
356
- try:
357
- jdata = json.load(stream)
358
- result = jdata['extensions']['python.exports']['exports']
359
- for group, entries in result.items():
360
- for k, v in entries.items():
361
- s = '%s = %s' % (k, v)
362
- entry = get_export_entry(s)
363
- assert entry is not None
364
- entries[k] = entry
365
- return result
366
- except Exception:
367
- stream.seek(0, 0)
368
-
369
- def read_stream(cp, stream):
370
- if hasattr(cp, 'read_file'):
371
- cp.read_file(stream)
372
- else:
373
- cp.readfp(stream)
374
-
375
- cp = configparser.ConfigParser()
376
- try:
377
- read_stream(cp, stream)
378
- except configparser.MissingSectionHeaderError:
379
- stream.close()
380
- data = textwrap.dedent(data)
381
- stream = StringIO(data)
382
- read_stream(cp, stream)
383
-
384
- result = {}
385
- for key in cp.sections():
386
- result[key] = entries = {}
387
- for name, value in cp.items(key):
388
- s = '%s = %s' % (name, value)
389
- entry = get_export_entry(s)
390
- assert entry is not None
391
- #entry.dist = self
392
- entries[name] = entry
393
- return result
394
-
395
-
396
- def write_exports(exports, stream):
397
- if sys.version_info[0] >= 3:
398
- # needs to be a text stream
399
- stream = codecs.getwriter('utf-8')(stream)
400
- cp = configparser.ConfigParser()
401
- for k, v in exports.items():
402
- # TODO check k, v for valid values
403
- cp.add_section(k)
404
- for entry in v.values():
405
- if entry.suffix is None:
406
- s = entry.prefix
407
- else:
408
- s = '%s:%s' % (entry.prefix, entry.suffix)
409
- if entry.flags:
410
- s = '%s [%s]' % (s, ', '.join(entry.flags))
411
- cp.set(k, entry.name, s)
412
- cp.write(stream)
413
-
414
-
415
- @contextlib.contextmanager
416
- def tempdir():
417
- td = tempfile.mkdtemp()
418
- try:
419
- yield td
420
- finally:
421
- shutil.rmtree(td)
422
-
423
- @contextlib.contextmanager
424
- def chdir(d):
425
- cwd = os.getcwd()
426
- try:
427
- os.chdir(d)
428
- yield
429
- finally:
430
- os.chdir(cwd)
431
-
432
-
433
- @contextlib.contextmanager
434
- def socket_timeout(seconds=15):
435
- cto = socket.getdefaulttimeout()
436
- try:
437
- socket.setdefaulttimeout(seconds)
438
- yield
439
- finally:
440
- socket.setdefaulttimeout(cto)
441
-
442
-
443
- class cached_property(object):
444
- def __init__(self, func):
445
- self.func = func
446
- #for attr in ('__name__', '__module__', '__doc__'):
447
- # setattr(self, attr, getattr(func, attr, None))
448
-
449
- def __get__(self, obj, cls=None):
450
- if obj is None:
451
- return self
452
- value = self.func(obj)
453
- object.__setattr__(obj, self.func.__name__, value)
454
- #obj.__dict__[self.func.__name__] = value = self.func(obj)
455
- return value
456
-
457
- def convert_path(pathname):
458
- """Return 'pathname' as a name that will work on the native filesystem.
459
-
460
- The path is split on '/' and put back together again using the current
461
- directory separator. Needed because filenames in the setup script are
462
- always supplied in Unix style, and have to be converted to the local
463
- convention before we can actually use them in the filesystem. Raises
464
- ValueError on non-Unix-ish systems if 'pathname' either starts or
465
- ends with a slash.
466
- """
467
- if os.sep == '/':
468
- return pathname
469
- if not pathname:
470
- return pathname
471
- if pathname[0] == '/':
472
- raise ValueError("path '%s' cannot be absolute" % pathname)
473
- if pathname[-1] == '/':
474
- raise ValueError("path '%s' cannot end with '/'" % pathname)
475
-
476
- paths = pathname.split('/')
477
- while os.curdir in paths:
478
- paths.remove(os.curdir)
479
- if not paths:
480
- return os.curdir
481
- return os.path.join(*paths)
482
-
483
-
484
- class FileOperator(object):
485
- def __init__(self, dry_run=False):
486
- self.dry_run = dry_run
487
- self.ensured = set()
488
- self._init_record()
489
-
490
- def _init_record(self):
491
- self.record = False
492
- self.files_written = set()
493
- self.dirs_created = set()
494
-
495
- def record_as_written(self, path):
496
- if self.record:
497
- self.files_written.add(path)
498
-
499
- def newer(self, source, target):
500
- """Tell if the target is newer than the source.
501
-
502
- Returns true if 'source' exists and is more recently modified than
503
- 'target', or if 'source' exists and 'target' doesn't.
504
-
505
- Returns false if both exist and 'target' is the same age or younger
506
- than 'source'. Raise PackagingFileError if 'source' does not exist.
507
-
508
- Note that this test is not very accurate: files created in the same
509
- second will have the same "age".
510
- """
511
- if not os.path.exists(source):
512
- raise DistlibException("file '%r' does not exist" %
513
- os.path.abspath(source))
514
- if not os.path.exists(target):
515
- return True
516
-
517
- return os.stat(source).st_mtime > os.stat(target).st_mtime
518
-
519
- def copy_file(self, infile, outfile, check=True):
520
- """Copy a file respecting dry-run and force flags.
521
- """
522
- self.ensure_dir(os.path.dirname(outfile))
523
- logger.info('Copying %s to %s', infile, outfile)
524
- if not self.dry_run:
525
- msg = None
526
- if check:
527
- if os.path.islink(outfile):
528
- msg = '%s is a symlink' % outfile
529
- elif os.path.exists(outfile) and not os.path.isfile(outfile):
530
- msg = '%s is a non-regular file' % outfile
531
- if msg:
532
- raise ValueError(msg + ' which would be overwritten')
533
- shutil.copyfile(infile, outfile)
534
- self.record_as_written(outfile)
535
-
536
- def copy_stream(self, instream, outfile, encoding=None):
537
- assert not os.path.isdir(outfile)
538
- self.ensure_dir(os.path.dirname(outfile))
539
- logger.info('Copying stream %s to %s', instream, outfile)
540
- if not self.dry_run:
541
- if encoding is None:
542
- outstream = open(outfile, 'wb')
543
- else:
544
- outstream = codecs.open(outfile, 'w', encoding=encoding)
545
- try:
546
- shutil.copyfileobj(instream, outstream)
547
- finally:
548
- outstream.close()
549
- self.record_as_written(outfile)
550
-
551
- def write_binary_file(self, path, data):
552
- self.ensure_dir(os.path.dirname(path))
553
- if not self.dry_run:
554
- if os.path.exists(path):
555
- os.remove(path)
556
- with open(path, 'wb') as f:
557
- f.write(data)
558
- self.record_as_written(path)
559
-
560
- def write_text_file(self, path, data, encoding):
561
- self.write_binary_file(path, data.encode(encoding))
562
-
563
- def set_mode(self, bits, mask, files):
564
- if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
565
- # Set the executable bits (owner, group, and world) on
566
- # all the files specified.
567
- for f in files:
568
- if self.dry_run:
569
- logger.info("changing mode of %s", f)
570
- else:
571
- mode = (os.stat(f).st_mode | bits) & mask
572
- logger.info("changing mode of %s to %o", f, mode)
573
- os.chmod(f, mode)
574
-
575
- set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
576
-
577
- def ensure_dir(self, path):
578
- path = os.path.abspath(path)
579
- if path not in self.ensured and not os.path.exists(path):
580
- self.ensured.add(path)
581
- d, f = os.path.split(path)
582
- self.ensure_dir(d)
583
- logger.info('Creating %s' % path)
584
- if not self.dry_run:
585
- os.mkdir(path)
586
- if self.record:
587
- self.dirs_created.add(path)
588
-
589
- def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
590
- dpath = cache_from_source(path, not optimize)
591
- logger.info('Byte-compiling %s to %s', path, dpath)
592
- if not self.dry_run:
593
- if force or self.newer(path, dpath):
594
- if not prefix:
595
- diagpath = None
596
- else:
597
- assert path.startswith(prefix)
598
- diagpath = path[len(prefix):]
599
- compile_kwargs = {}
600
- if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
601
- compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
602
- py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
603
- self.record_as_written(dpath)
604
- return dpath
605
-
606
- def ensure_removed(self, path):
607
- if os.path.exists(path):
608
- if os.path.isdir(path) and not os.path.islink(path):
609
- logger.debug('Removing directory tree at %s', path)
610
- if not self.dry_run:
611
- shutil.rmtree(path)
612
- if self.record:
613
- if path in self.dirs_created:
614
- self.dirs_created.remove(path)
615
- else:
616
- if os.path.islink(path):
617
- s = 'link'
618
- else:
619
- s = 'file'
620
- logger.debug('Removing %s %s', s, path)
621
- if not self.dry_run:
622
- os.remove(path)
623
- if self.record:
624
- if path in self.files_written:
625
- self.files_written.remove(path)
626
-
627
- def is_writable(self, path):
628
- result = False
629
- while not result:
630
- if os.path.exists(path):
631
- result = os.access(path, os.W_OK)
632
- break
633
- parent = os.path.dirname(path)
634
- if parent == path:
635
- break
636
- path = parent
637
- return result
638
-
639
- def commit(self):
640
- """
641
- Commit recorded changes, turn off recording, return
642
- changes.
643
- """
644
- assert self.record
645
- result = self.files_written, self.dirs_created
646
- self._init_record()
647
- return result
648
-
649
- def rollback(self):
650
- if not self.dry_run:
651
- for f in list(self.files_written):
652
- if os.path.exists(f):
653
- os.remove(f)
654
- # dirs should all be empty now, except perhaps for
655
- # __pycache__ subdirs
656
- # reverse so that subdirs appear before their parents
657
- dirs = sorted(self.dirs_created, reverse=True)
658
- for d in dirs:
659
- flist = os.listdir(d)
660
- if flist:
661
- assert flist == ['__pycache__']
662
- sd = os.path.join(d, flist[0])
663
- os.rmdir(sd)
664
- os.rmdir(d) # should fail if non-empty
665
- self._init_record()
666
-
667
- def resolve(module_name, dotted_path):
668
- if module_name in sys.modules:
669
- mod = sys.modules[module_name]
670
- else:
671
- mod = __import__(module_name)
672
- if dotted_path is None:
673
- result = mod
674
- else:
675
- parts = dotted_path.split('.')
676
- result = getattr(mod, parts.pop(0))
677
- for p in parts:
678
- result = getattr(result, p)
679
- return result
680
-
681
-
682
- class ExportEntry(object):
683
- def __init__(self, name, prefix, suffix, flags):
684
- self.name = name
685
- self.prefix = prefix
686
- self.suffix = suffix
687
- self.flags = flags
688
-
689
- @cached_property
690
- def value(self):
691
- return resolve(self.prefix, self.suffix)
692
-
693
- def __repr__(self): # pragma: no cover
694
- return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
695
- self.suffix, self.flags)
696
-
697
- def __eq__(self, other):
698
- if not isinstance(other, ExportEntry):
699
- result = False
700
- else:
701
- result = (self.name == other.name and
702
- self.prefix == other.prefix and
703
- self.suffix == other.suffix and
704
- self.flags == other.flags)
705
- return result
706
-
707
- __hash__ = object.__hash__
708
-
709
-
710
- ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
711
- \s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
712
- \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
713
- ''', re.VERBOSE)
714
-
715
- def get_export_entry(specification):
716
- m = ENTRY_RE.search(specification)
717
- if not m:
718
- result = None
719
- if '[' in specification or ']' in specification:
720
- raise DistlibException("Invalid specification "
721
- "'%s'" % specification)
722
- else:
723
- d = m.groupdict()
724
- name = d['name']
725
- path = d['callable']
726
- colons = path.count(':')
727
- if colons == 0:
728
- prefix, suffix = path, None
729
- else:
730
- if colons != 1:
731
- raise DistlibException("Invalid specification "
732
- "'%s'" % specification)
733
- prefix, suffix = path.split(':')
734
- flags = d['flags']
735
- if flags is None:
736
- if '[' in specification or ']' in specification:
737
- raise DistlibException("Invalid specification "
738
- "'%s'" % specification)
739
- flags = []
740
- else:
741
- flags = [f.strip() for f in flags.split(',')]
742
- result = ExportEntry(name, prefix, suffix, flags)
743
- return result
744
-
745
-
746
- def get_cache_base(suffix=None):
747
- """
748
- Return the default base location for distlib caches. If the directory does
749
- not exist, it is created. Use the suffix provided for the base directory,
750
- and default to '.distlib' if it isn't provided.
751
-
752
- On Windows, if LOCALAPPDATA is defined in the environment, then it is
753
- assumed to be a directory, and will be the parent directory of the result.
754
- On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
755
- directory - using os.expanduser('~') - will be the parent directory of
756
- the result.
757
-
758
- The result is just the directory '.distlib' in the parent directory as
759
- determined above, or with the name specified with ``suffix``.
760
- """
761
- if suffix is None:
762
- suffix = '.distlib'
763
- if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
764
- result = os.path.expandvars('$localappdata')
765
- else:
766
- # Assume posix, or old Windows
767
- result = os.path.expanduser('~')
768
- # we use 'isdir' instead of 'exists', because we want to
769
- # fail if there's a file with that name
770
- if os.path.isdir(result):
771
- usable = os.access(result, os.W_OK)
772
- if not usable:
773
- logger.warning('Directory exists but is not writable: %s', result)
774
- else:
775
- try:
776
- os.makedirs(result)
777
- usable = True
778
- except OSError:
779
- logger.warning('Unable to create %s', result, exc_info=True)
780
- usable = False
781
- if not usable:
782
- result = tempfile.mkdtemp()
783
- logger.warning('Default location unusable, using %s', result)
784
- return os.path.join(result, suffix)
785
-
786
-
787
- def path_to_cache_dir(path):
788
- """
789
- Convert an absolute path to a directory name for use in a cache.
790
-
791
- The algorithm used is:
792
-
793
- #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
794
- #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
795
- #. ``'.cache'`` is appended.
796
- """
797
- d, p = os.path.splitdrive(os.path.abspath(path))
798
- if d:
799
- d = d.replace(':', '---')
800
- p = p.replace(os.sep, '--')
801
- return d + p + '.cache'
802
-
803
-
804
- def ensure_slash(s):
805
- if not s.endswith('/'):
806
- return s + '/'
807
- return s
808
-
809
-
810
- def parse_credentials(netloc):
811
- username = password = None
812
- if '@' in netloc:
813
- prefix, netloc = netloc.rsplit('@', 1)
814
- if ':' not in prefix:
815
- username = prefix
816
- else:
817
- username, password = prefix.split(':', 1)
818
- if username:
819
- username = unquote(username)
820
- if password:
821
- password = unquote(password)
822
- return username, password, netloc
823
-
824
-
825
- def get_process_umask():
826
- result = os.umask(0o22)
827
- os.umask(result)
828
- return result
829
-
830
- def is_string_sequence(seq):
831
- result = True
832
- i = None
833
- for i, s in enumerate(seq):
834
- if not isinstance(s, string_types):
835
- result = False
836
- break
837
- assert i is not None
838
- return result
839
-
840
- PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
841
- '([a-z0-9_.+-]+)', re.I)
842
- PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
843
-
844
-
845
- def split_filename(filename, project_name=None):
846
- """
847
- Extract name, version, python version from a filename (no extension)
848
-
849
- Return name, version, pyver or None
850
- """
851
- result = None
852
- pyver = None
853
- filename = unquote(filename).replace(' ', '-')
854
- m = PYTHON_VERSION.search(filename)
855
- if m:
856
- pyver = m.group(1)
857
- filename = filename[:m.start()]
858
- if project_name and len(filename) > len(project_name) + 1:
859
- m = re.match(re.escape(project_name) + r'\b', filename)
860
- if m:
861
- n = m.end()
862
- result = filename[:n], filename[n + 1:], pyver
863
- if result is None:
864
- m = PROJECT_NAME_AND_VERSION.match(filename)
865
- if m:
866
- result = m.group(1), m.group(3), pyver
867
- return result
868
-
869
- # Allow spaces in name because of legacy dists like "Twisted Core"
870
- NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
871
- r'\(\s*(?P<ver>[^\s)]+)\)$')
872
-
873
- def parse_name_and_version(p):
874
- """
875
- A utility method used to get name and version from a string.
876
-
877
- From e.g. a Provides-Dist value.
878
-
879
- :param p: A value in a form 'foo (1.0)'
880
- :return: The name and version as a tuple.
881
- """
882
- m = NAME_VERSION_RE.match(p)
883
- if not m:
884
- raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
885
- d = m.groupdict()
886
- return d['name'].strip().lower(), d['ver']
887
-
888
- def get_extras(requested, available):
889
- result = set()
890
- requested = set(requested or [])
891
- available = set(available or [])
892
- if '*' in requested:
893
- requested.remove('*')
894
- result |= available
895
- for r in requested:
896
- if r == '-':
897
- result.add(r)
898
- elif r.startswith('-'):
899
- unwanted = r[1:]
900
- if unwanted not in available:
901
- logger.warning('undeclared extra: %s' % unwanted)
902
- if unwanted in result:
903
- result.remove(unwanted)
904
- else:
905
- if r not in available:
906
- logger.warning('undeclared extra: %s' % r)
907
- result.add(r)
908
- return result
909
- #
910
- # Extended metadata functionality
911
- #
912
-
913
- def _get_external_data(url):
914
- result = {}
915
- try:
916
- # urlopen might fail if it runs into redirections,
917
- # because of Python issue #13696. Fixed in locators
918
- # using a custom redirect handler.
919
- resp = urlopen(url)
920
- headers = resp.info()
921
- ct = headers.get('Content-Type')
922
- if not ct.startswith('application/json'):
923
- logger.debug('Unexpected response for JSON request: %s', ct)
924
- else:
925
- reader = codecs.getreader('utf-8')(resp)
926
- #data = reader.read().decode('utf-8')
927
- #result = json.loads(data)
928
- result = json.load(reader)
929
- except Exception as e:
930
- logger.exception('Failed to get external data for %s: %s', url, e)
931
- return result
932
-
933
- _external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
934
-
935
- def get_project_data(name):
936
- url = '%s/%s/project.json' % (name[0].upper(), name)
937
- url = urljoin(_external_data_base_url, url)
938
- result = _get_external_data(url)
939
- return result
940
-
941
- def get_package_data(name, version):
942
- url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
943
- url = urljoin(_external_data_base_url, url)
944
- return _get_external_data(url)
945
-
946
-
947
- class Cache(object):
948
- """
949
- A class implementing a cache for resources that need to live in the file system
950
- e.g. shared libraries. This class was moved from resources to here because it
951
- could be used by other modules, e.g. the wheel module.
952
- """
953
-
954
- def __init__(self, base):
955
- """
956
- Initialise an instance.
957
-
958
- :param base: The base directory where the cache should be located.
959
- """
960
- # we use 'isdir' instead of 'exists', because we want to
961
- # fail if there's a file with that name
962
- if not os.path.isdir(base): # pragma: no cover
963
- os.makedirs(base)
964
- if (os.stat(base).st_mode & 0o77) != 0:
965
- logger.warning('Directory \'%s\' is not private', base)
966
- self.base = os.path.abspath(os.path.normpath(base))
967
-
968
- def prefix_to_dir(self, prefix):
969
- """
970
- Converts a resource prefix to a directory name in the cache.
971
- """
972
- return path_to_cache_dir(prefix)
973
-
974
- def clear(self):
975
- """
976
- Clear the cache.
977
- """
978
- not_removed = []
979
- for fn in os.listdir(self.base):
980
- fn = os.path.join(self.base, fn)
981
- try:
982
- if os.path.islink(fn) or os.path.isfile(fn):
983
- os.remove(fn)
984
- elif os.path.isdir(fn):
985
- shutil.rmtree(fn)
986
- except Exception:
987
- not_removed.append(fn)
988
- return not_removed
989
-
990
-
991
- class EventMixin(object):
992
- """
993
- A very simple publish/subscribe system.
994
- """
995
- def __init__(self):
996
- self._subscribers = {}
997
-
998
- def add(self, event, subscriber, append=True):
999
- """
1000
- Add a subscriber for an event.
1001
-
1002
- :param event: The name of an event.
1003
- :param subscriber: The subscriber to be added (and called when the
1004
- event is published).
1005
- :param append: Whether to append or prepend the subscriber to an
1006
- existing subscriber list for the event.
1007
- """
1008
- subs = self._subscribers
1009
- if event not in subs:
1010
- subs[event] = deque([subscriber])
1011
- else:
1012
- sq = subs[event]
1013
- if append:
1014
- sq.append(subscriber)
1015
- else:
1016
- sq.appendleft(subscriber)
1017
-
1018
- def remove(self, event, subscriber):
1019
- """
1020
- Remove a subscriber for an event.
1021
-
1022
- :param event: The name of an event.
1023
- :param subscriber: The subscriber to be removed.
1024
- """
1025
- subs = self._subscribers
1026
- if event not in subs:
1027
- raise ValueError('No subscribers: %r' % event)
1028
- subs[event].remove(subscriber)
1029
-
1030
- def get_subscribers(self, event):
1031
- """
1032
- Return an iterator for the subscribers for an event.
1033
- :param event: The event to return subscribers for.
1034
- """
1035
- return iter(self._subscribers.get(event, ()))
1036
-
1037
- def publish(self, event, *args, **kwargs):
1038
- """
1039
- Publish a event and return a list of values returned by its
1040
- subscribers.
1041
-
1042
- :param event: The event to publish.
1043
- :param args: The positional arguments to pass to the event's
1044
- subscribers.
1045
- :param kwargs: The keyword arguments to pass to the event's
1046
- subscribers.
1047
- """
1048
- result = []
1049
- for subscriber in self.get_subscribers(event):
1050
- try:
1051
- value = subscriber(event, *args, **kwargs)
1052
- except Exception:
1053
- logger.exception('Exception during event publication')
1054
- value = None
1055
- result.append(value)
1056
- logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
1057
- event, args, kwargs, result)
1058
- return result
1059
-
1060
- #
1061
- # Simple sequencing
1062
- #
1063
- class Sequencer(object):
1064
- def __init__(self):
1065
- self._preds = {}
1066
- self._succs = {}
1067
- self._nodes = set() # nodes with no preds/succs
1068
-
1069
- def add_node(self, node):
1070
- self._nodes.add(node)
1071
-
1072
- def remove_node(self, node, edges=False):
1073
- if node in self._nodes:
1074
- self._nodes.remove(node)
1075
- if edges:
1076
- for p in set(self._preds.get(node, ())):
1077
- self.remove(p, node)
1078
- for s in set(self._succs.get(node, ())):
1079
- self.remove(node, s)
1080
- # Remove empties
1081
- for k, v in list(self._preds.items()):
1082
- if not v:
1083
- del self._preds[k]
1084
- for k, v in list(self._succs.items()):
1085
- if not v:
1086
- del self._succs[k]
1087
-
1088
- def add(self, pred, succ):
1089
- assert pred != succ
1090
- self._preds.setdefault(succ, set()).add(pred)
1091
- self._succs.setdefault(pred, set()).add(succ)
1092
-
1093
- def remove(self, pred, succ):
1094
- assert pred != succ
1095
- try:
1096
- preds = self._preds[succ]
1097
- succs = self._succs[pred]
1098
- except KeyError: # pragma: no cover
1099
- raise ValueError('%r not a successor of anything' % succ)
1100
- try:
1101
- preds.remove(pred)
1102
- succs.remove(succ)
1103
- except KeyError: # pragma: no cover
1104
- raise ValueError('%r not a successor of %r' % (succ, pred))
1105
-
1106
- def is_step(self, step):
1107
- return (step in self._preds or step in self._succs or
1108
- step in self._nodes)
1109
-
1110
- def get_steps(self, final):
1111
- if not self.is_step(final):
1112
- raise ValueError('Unknown: %r' % final)
1113
- result = []
1114
- todo = []
1115
- seen = set()
1116
- todo.append(final)
1117
- while todo:
1118
- step = todo.pop(0)
1119
- if step in seen:
1120
- # if a step was already seen,
1121
- # move it to the end (so it will appear earlier
1122
- # when reversed on return) ... but not for the
1123
- # final step, as that would be confusing for
1124
- # users
1125
- if step != final:
1126
- result.remove(step)
1127
- result.append(step)
1128
- else:
1129
- seen.add(step)
1130
- result.append(step)
1131
- preds = self._preds.get(step, ())
1132
- todo.extend(preds)
1133
- return reversed(result)
1134
-
1135
- @property
1136
- def strong_connections(self):
1137
- #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
1138
- index_counter = [0]
1139
- stack = []
1140
- lowlinks = {}
1141
- index = {}
1142
- result = []
1143
-
1144
- graph = self._succs
1145
-
1146
- def strongconnect(node):
1147
- # set the depth index for this node to the smallest unused index
1148
- index[node] = index_counter[0]
1149
- lowlinks[node] = index_counter[0]
1150
- index_counter[0] += 1
1151
- stack.append(node)
1152
-
1153
- # Consider successors
1154
- try:
1155
- successors = graph[node]
1156
- except Exception:
1157
- successors = []
1158
- for successor in successors:
1159
- if successor not in lowlinks:
1160
- # Successor has not yet been visited
1161
- strongconnect(successor)
1162
- lowlinks[node] = min(lowlinks[node],lowlinks[successor])
1163
- elif successor in stack:
1164
- # the successor is in the stack and hence in the current
1165
- # strongly connected component (SCC)
1166
- lowlinks[node] = min(lowlinks[node],index[successor])
1167
-
1168
- # If `node` is a root node, pop the stack and generate an SCC
1169
- if lowlinks[node] == index[node]:
1170
- connected_component = []
1171
-
1172
- while True:
1173
- successor = stack.pop()
1174
- connected_component.append(successor)
1175
- if successor == node: break
1176
- component = tuple(connected_component)
1177
- # storing the result
1178
- result.append(component)
1179
-
1180
- for node in graph:
1181
- if node not in lowlinks:
1182
- strongconnect(node)
1183
-
1184
- return result
1185
-
1186
- @property
1187
- def dot(self):
1188
- result = ['digraph G {']
1189
- for succ in self._preds:
1190
- preds = self._preds[succ]
1191
- for pred in preds:
1192
- result.append(' %s -> %s;' % (pred, succ))
1193
- for node in self._nodes:
1194
- result.append(' %s;' % node)
1195
- result.append('}')
1196
- return '\n'.join(result)
1197
-
1198
- #
1199
- # Unarchiving functionality for zip, tar, tgz, tbz, whl
1200
- #
1201
-
1202
- ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
1203
- '.tgz', '.tbz', '.whl')
1204
-
1205
- def unarchive(archive_filename, dest_dir, format=None, check=True):
1206
-
1207
- def check_path(path):
1208
- if not isinstance(path, text_type):
1209
- path = path.decode('utf-8')
1210
- p = os.path.abspath(os.path.join(dest_dir, path))
1211
- if not p.startswith(dest_dir) or p[plen] != os.sep:
1212
- raise ValueError('path outside destination: %r' % p)
1213
-
1214
- dest_dir = os.path.abspath(dest_dir)
1215
- plen = len(dest_dir)
1216
- archive = None
1217
- if format is None:
1218
- if archive_filename.endswith(('.zip', '.whl')):
1219
- format = 'zip'
1220
- elif archive_filename.endswith(('.tar.gz', '.tgz')):
1221
- format = 'tgz'
1222
- mode = 'r:gz'
1223
- elif archive_filename.endswith(('.tar.bz2', '.tbz')):
1224
- format = 'tbz'
1225
- mode = 'r:bz2'
1226
- elif archive_filename.endswith('.tar'):
1227
- format = 'tar'
1228
- mode = 'r'
1229
- else: # pragma: no cover
1230
- raise ValueError('Unknown format for %r' % archive_filename)
1231
- try:
1232
- if format == 'zip':
1233
- archive = ZipFile(archive_filename, 'r')
1234
- if check:
1235
- names = archive.namelist()
1236
- for name in names:
1237
- check_path(name)
1238
- else:
1239
- archive = tarfile.open(archive_filename, mode)
1240
- if check:
1241
- names = archive.getnames()
1242
- for name in names:
1243
- check_path(name)
1244
- if format != 'zip' and sys.version_info[0] < 3:
1245
- # See Python issue 17153. If the dest path contains Unicode,
1246
- # tarfile extraction fails on Python 2.x if a member path name
1247
- # contains non-ASCII characters - it leads to an implicit
1248
- # bytes -> unicode conversion using ASCII to decode.
1249
- for tarinfo in archive.getmembers():
1250
- if not isinstance(tarinfo.name, text_type):
1251
- tarinfo.name = tarinfo.name.decode('utf-8')
1252
- archive.extractall(dest_dir)
1253
-
1254
- finally:
1255
- if archive:
1256
- archive.close()
1257
-
1258
-
1259
- def zip_dir(directory):
1260
- """zip a directory tree into a BytesIO object"""
1261
- result = io.BytesIO()
1262
- dlen = len(directory)
1263
- with ZipFile(result, "w") as zf:
1264
- for root, dirs, files in os.walk(directory):
1265
- for name in files:
1266
- full = os.path.join(root, name)
1267
- rel = root[dlen:]
1268
- dest = os.path.join(rel, name)
1269
- zf.write(full, dest)
1270
- return result
1271
-
1272
- #
1273
- # Simple progress bar
1274
- #
1275
-
1276
- UNITS = ('', 'K', 'M', 'G','T','P')
1277
-
1278
-
1279
- class Progress(object):
1280
- unknown = 'UNKNOWN'
1281
-
1282
- def __init__(self, minval=0, maxval=100):
1283
- assert maxval is None or maxval >= minval
1284
- self.min = self.cur = minval
1285
- self.max = maxval
1286
- self.started = None
1287
- self.elapsed = 0
1288
- self.done = False
1289
-
1290
- def update(self, curval):
1291
- assert self.min <= curval
1292
- assert self.max is None or curval <= self.max
1293
- self.cur = curval
1294
- now = time.time()
1295
- if self.started is None:
1296
- self.started = now
1297
- else:
1298
- self.elapsed = now - self.started
1299
-
1300
- def increment(self, incr):
1301
- assert incr >= 0
1302
- self.update(self.cur + incr)
1303
-
1304
- def start(self):
1305
- self.update(self.min)
1306
- return self
1307
-
1308
- def stop(self):
1309
- if self.max is not None:
1310
- self.update(self.max)
1311
- self.done = True
1312
-
1313
- @property
1314
- def maximum(self):
1315
- return self.unknown if self.max is None else self.max
1316
-
1317
- @property
1318
- def percentage(self):
1319
- if self.done:
1320
- result = '100 %'
1321
- elif self.max is None:
1322
- result = ' ?? %'
1323
- else:
1324
- v = 100.0 * (self.cur - self.min) / (self.max - self.min)
1325
- result = '%3d %%' % v
1326
- return result
1327
-
1328
- def format_duration(self, duration):
1329
- if (duration <= 0) and self.max is None or self.cur == self.min:
1330
- result = '??:??:??'
1331
- #elif duration < 1:
1332
- # result = '--:--:--'
1333
- else:
1334
- result = time.strftime('%H:%M:%S', time.gmtime(duration))
1335
- return result
1336
-
1337
- @property
1338
- def ETA(self):
1339
- if self.done:
1340
- prefix = 'Done'
1341
- t = self.elapsed
1342
- #import pdb; pdb.set_trace()
1343
- else:
1344
- prefix = 'ETA '
1345
- if self.max is None:
1346
- t = -1
1347
- elif self.elapsed == 0 or (self.cur == self.min):
1348
- t = 0
1349
- else:
1350
- #import pdb; pdb.set_trace()
1351
- t = float(self.max - self.min)
1352
- t /= self.cur - self.min
1353
- t = (t - 1) * self.elapsed
1354
- return '%s: %s' % (prefix, self.format_duration(t))
1355
-
1356
- @property
1357
- def speed(self):
1358
- if self.elapsed == 0:
1359
- result = 0.0
1360
- else:
1361
- result = (self.cur - self.min) / self.elapsed
1362
- for unit in UNITS:
1363
- if result < 1000:
1364
- break
1365
- result /= 1000.0
1366
- return '%d %sB/s' % (result, unit)
1367
-
1368
- #
1369
- # Glob functionality
1370
- #
1371
-
1372
- RICH_GLOB = re.compile(r'\{([^}]*)\}')
1373
- _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
1374
- _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
1375
-
1376
-
1377
- def iglob(path_glob):
1378
- """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
1379
- if _CHECK_RECURSIVE_GLOB.search(path_glob):
1380
- msg = """invalid glob %r: recursive glob "**" must be used alone"""
1381
- raise ValueError(msg % path_glob)
1382
- if _CHECK_MISMATCH_SET.search(path_glob):
1383
- msg = """invalid glob %r: mismatching set marker '{' or '}'"""
1384
- raise ValueError(msg % path_glob)
1385
- return _iglob(path_glob)
1386
-
1387
-
1388
- def _iglob(path_glob):
1389
- rich_path_glob = RICH_GLOB.split(path_glob, 1)
1390
- if len(rich_path_glob) > 1:
1391
- assert len(rich_path_glob) == 3, rich_path_glob
1392
- prefix, set, suffix = rich_path_glob
1393
- for item in set.split(','):
1394
- for path in _iglob(''.join((prefix, item, suffix))):
1395
- yield path
1396
- else:
1397
- if '**' not in path_glob:
1398
- for item in std_iglob(path_glob):
1399
- yield item
1400
- else:
1401
- prefix, radical = path_glob.split('**', 1)
1402
- if prefix == '':
1403
- prefix = '.'
1404
- if radical == '':
1405
- radical = '*'
1406
- else:
1407
- # we support both
1408
- radical = radical.lstrip('/')
1409
- radical = radical.lstrip('\\')
1410
- for path, dir, files in os.walk(prefix):
1411
- path = os.path.normpath(path)
1412
- for fn in _iglob(os.path.join(path, radical)):
1413
- yield fn
1414
-
1415
- if ssl:
1416
- from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
1417
- CertificateError)
1418
-
1419
-
1420
- #
1421
- # HTTPSConnection which verifies certificates/matches domains
1422
- #
1423
-
1424
- class HTTPSConnection(httplib.HTTPSConnection):
1425
- ca_certs = None # set this to the path to the certs file (.pem)
1426
- check_domain = True # only used if ca_certs is not None
1427
-
1428
- # noinspection PyPropertyAccess
1429
- def connect(self):
1430
- sock = socket.create_connection((self.host, self.port), self.timeout)
1431
- if getattr(self, '_tunnel_host', False):
1432
- self.sock = sock
1433
- self._tunnel()
1434
-
1435
- context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
1436
- if hasattr(ssl, 'OP_NO_SSLv2'):
1437
- context.options |= ssl.OP_NO_SSLv2
1438
- if self.cert_file:
1439
- context.load_cert_chain(self.cert_file, self.key_file)
1440
- kwargs = {}
1441
- if self.ca_certs:
1442
- context.verify_mode = ssl.CERT_REQUIRED
1443
- context.load_verify_locations(cafile=self.ca_certs)
1444
- if getattr(ssl, 'HAS_SNI', False):
1445
- kwargs['server_hostname'] = self.host
1446
-
1447
- self.sock = context.wrap_socket(sock, **kwargs)
1448
- if self.ca_certs and self.check_domain:
1449
- try:
1450
- match_hostname(self.sock.getpeercert(), self.host)
1451
- logger.debug('Host verified: %s', self.host)
1452
- except CertificateError: # pragma: no cover
1453
- self.sock.shutdown(socket.SHUT_RDWR)
1454
- self.sock.close()
1455
- raise
1456
-
1457
- class HTTPSHandler(BaseHTTPSHandler):
1458
- def __init__(self, ca_certs, check_domain=True):
1459
- BaseHTTPSHandler.__init__(self)
1460
- self.ca_certs = ca_certs
1461
- self.check_domain = check_domain
1462
-
1463
- def _conn_maker(self, *args, **kwargs):
1464
- """
1465
- This is called to create a connection instance. Normally you'd
1466
- pass a connection class to do_open, but it doesn't actually check for
1467
- a class, and just expects a callable. As long as we behave just as a
1468
- constructor would have, we should be OK. If it ever changes so that
1469
- we *must* pass a class, we'll create an UnsafeHTTPSConnection class
1470
- which just sets check_domain to False in the class definition, and
1471
- choose which one to pass to do_open.
1472
- """
1473
- result = HTTPSConnection(*args, **kwargs)
1474
- if self.ca_certs:
1475
- result.ca_certs = self.ca_certs
1476
- result.check_domain = self.check_domain
1477
- return result
1478
-
1479
- def https_open(self, req):
1480
- try:
1481
- return self.do_open(self._conn_maker, req)
1482
- except URLError as e:
1483
- if 'certificate verify failed' in str(e.reason):
1484
- raise CertificateError('Unable to verify server certificate '
1485
- 'for %s' % req.host)
1486
- else:
1487
- raise
1488
-
1489
- #
1490
- # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
1491
- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
1492
- # HTML containing a http://xyz link when it should be https://xyz),
1493
- # you can use the following handler class, which does not allow HTTP traffic.
1494
- #
1495
- # It works by inheriting from HTTPHandler - so build_opener won't add a
1496
- # handler for HTTP itself.
1497
- #
1498
- class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
1499
- def http_open(self, req):
1500
- raise URLError('Unexpected HTTP request on what should be a secure '
1501
- 'connection: %s' % req)
1502
-
1503
- #
1504
- # XML-RPC with timeouts
1505
- #
1506
- class Transport(xmlrpclib.Transport):
1507
- def __init__(self, timeout, use_datetime=0):
1508
- self.timeout = timeout
1509
- xmlrpclib.Transport.__init__(self, use_datetime)
1510
-
1511
- def make_connection(self, host):
1512
- h, eh, x509 = self.get_host_info(host)
1513
- if not self._connection or host != self._connection[0]:
1514
- self._extra_headers = eh
1515
- self._connection = host, httplib.HTTPConnection(h)
1516
- return self._connection[1]
1517
-
1518
- if ssl:
1519
- class SafeTransport(xmlrpclib.SafeTransport):
1520
- def __init__(self, timeout, use_datetime=0):
1521
- self.timeout = timeout
1522
- xmlrpclib.SafeTransport.__init__(self, use_datetime)
1523
-
1524
- def make_connection(self, host):
1525
- h, eh, kwargs = self.get_host_info(host)
1526
- if not kwargs:
1527
- kwargs = {}
1528
- kwargs['timeout'] = self.timeout
1529
- if not self._connection or host != self._connection[0]:
1530
- self._extra_headers = eh
1531
- self._connection = host, httplib.HTTPSConnection(h, None,
1532
- **kwargs)
1533
- return self._connection[1]
1534
-
1535
-
1536
- class ServerProxy(xmlrpclib.ServerProxy):
1537
- def __init__(self, uri, **kwargs):
1538
- self.timeout = timeout = kwargs.pop('timeout', None)
1539
- # The above classes only come into play if a timeout
1540
- # is specified
1541
- if timeout is not None:
1542
- # scheme = splittype(uri) # deprecated as of Python 3.8
1543
- scheme = urlparse(uri)[0]
1544
- use_datetime = kwargs.get('use_datetime', 0)
1545
- if scheme == 'https':
1546
- tcls = SafeTransport
1547
- else:
1548
- tcls = Transport
1549
- kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
1550
- self.transport = t
1551
- xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
1552
-
1553
- #
1554
- # CSV functionality. This is provided because on 2.x, the csv module can't
1555
- # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
1556
- #
1557
-
1558
- def _csv_open(fn, mode, **kwargs):
1559
- if sys.version_info[0] < 3:
1560
- mode += 'b'
1561
- else:
1562
- kwargs['newline'] = ''
1563
- # Python 3 determines encoding from locale. Force 'utf-8'
1564
- # file encoding to match other forced utf-8 encoding
1565
- kwargs['encoding'] = 'utf-8'
1566
- return open(fn, mode, **kwargs)
1567
-
1568
-
1569
- class CSVBase(object):
1570
- defaults = {
1571
- 'delimiter': str(','), # The strs are used because we need native
1572
- 'quotechar': str('"'), # str in the csv API (2.x won't take
1573
- 'lineterminator': str('\n') # Unicode)
1574
- }
1575
-
1576
- def __enter__(self):
1577
- return self
1578
-
1579
- def __exit__(self, *exc_info):
1580
- self.stream.close()
1581
-
1582
-
1583
- class CSVReader(CSVBase):
1584
- def __init__(self, **kwargs):
1585
- if 'stream' in kwargs:
1586
- stream = kwargs['stream']
1587
- if sys.version_info[0] >= 3:
1588
- # needs to be a text stream
1589
- stream = codecs.getreader('utf-8')(stream)
1590
- self.stream = stream
1591
- else:
1592
- self.stream = _csv_open(kwargs['path'], 'r')
1593
- self.reader = csv.reader(self.stream, **self.defaults)
1594
-
1595
- def __iter__(self):
1596
- return self
1597
-
1598
- def next(self):
1599
- result = next(self.reader)
1600
- if sys.version_info[0] < 3:
1601
- for i, item in enumerate(result):
1602
- if not isinstance(item, text_type):
1603
- result[i] = item.decode('utf-8')
1604
- return result
1605
-
1606
- __next__ = next
1607
-
1608
- class CSVWriter(CSVBase):
1609
- def __init__(self, fn, **kwargs):
1610
- self.stream = _csv_open(fn, 'w')
1611
- self.writer = csv.writer(self.stream, **self.defaults)
1612
-
1613
- def writerow(self, row):
1614
- if sys.version_info[0] < 3:
1615
- r = []
1616
- for item in row:
1617
- if isinstance(item, text_type):
1618
- item = item.encode('utf-8')
1619
- r.append(item)
1620
- row = r
1621
- self.writer.writerow(row)
1622
-
1623
- #
1624
- # Configurator functionality
1625
- #
1626
-
1627
- class Configurator(BaseConfigurator):
1628
-
1629
- value_converters = dict(BaseConfigurator.value_converters)
1630
- value_converters['inc'] = 'inc_convert'
1631
-
1632
- def __init__(self, config, base=None):
1633
- super(Configurator, self).__init__(config)
1634
- self.base = base or os.getcwd()
1635
-
1636
- def configure_custom(self, config):
1637
- def convert(o):
1638
- if isinstance(o, (list, tuple)):
1639
- result = type(o)([convert(i) for i in o])
1640
- elif isinstance(o, dict):
1641
- if '()' in o:
1642
- result = self.configure_custom(o)
1643
- else:
1644
- result = {}
1645
- for k in o:
1646
- result[k] = convert(o[k])
1647
- else:
1648
- result = self.convert(o)
1649
- return result
1650
-
1651
- c = config.pop('()')
1652
- if not callable(c):
1653
- c = self.resolve(c)
1654
- props = config.pop('.', None)
1655
- # Check for valid identifiers
1656
- args = config.pop('[]', ())
1657
- if args:
1658
- args = tuple([convert(o) for o in args])
1659
- items = [(k, convert(config[k])) for k in config if valid_ident(k)]
1660
- kwargs = dict(items)
1661
- result = c(*args, **kwargs)
1662
- if props:
1663
- for n, v in props.items():
1664
- setattr(result, n, convert(v))
1665
- return result
1666
-
1667
- def __getitem__(self, key):
1668
- result = self.config[key]
1669
- if isinstance(result, dict) and '()' in result:
1670
- self.config[key] = result = self.configure_custom(result)
1671
- return result
1672
-
1673
- def inc_convert(self, value):
1674
- """Default converter for the inc:// protocol."""
1675
- if not os.path.isabs(value):
1676
- value = os.path.join(self.base, value)
1677
- with codecs.open(value, 'r', encoding='utf-8') as f:
1678
- result = json.load(f)
1679
- return result
1680
-
1681
-
1682
- class SubprocessMixin(object):
1683
- """
1684
- Mixin for running subprocesses and capturing their output
1685
- """
1686
- def __init__(self, verbose=False, progress=None):
1687
- self.verbose = verbose
1688
- self.progress = progress
1689
-
1690
- def reader(self, stream, context):
1691
- """
1692
- Read lines from a subprocess' output stream and either pass to a progress
1693
- callable (if specified) or write progress information to sys.stderr.
1694
- """
1695
- progress = self.progress
1696
- verbose = self.verbose
1697
- while True:
1698
- s = stream.readline()
1699
- if not s:
1700
- break
1701
- if progress is not None:
1702
- progress(s, context)
1703
- else:
1704
- if not verbose:
1705
- sys.stderr.write('.')
1706
- else:
1707
- sys.stderr.write(s.decode('utf-8'))
1708
- sys.stderr.flush()
1709
- stream.close()
1710
-
1711
- def run_command(self, cmd, **kwargs):
1712
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
1713
- stderr=subprocess.PIPE, **kwargs)
1714
- t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
1715
- t1.start()
1716
- t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
1717
- t2.start()
1718
- p.wait()
1719
- t1.join()
1720
- t2.join()
1721
- if self.progress is not None:
1722
- self.progress('done.', 'main')
1723
- elif self.verbose:
1724
- sys.stderr.write('done.\n')
1725
- return p
1726
-
1727
-
1728
- def normalize_name(name):
1729
- """Normalize a python package name a la PEP 503"""
1730
- # https://www.python.org/dev/peps/pep-0503/#normalized-names
1731
- return re.sub('[-_.]+', '-', name).lower()
1732
-
1733
- # def _get_pypirc_command():
1734
- # """
1735
- # Get the distutils command for interacting with PyPI configurations.
1736
- # :return: the command.
1737
- # """
1738
- # from distutils.core import Distribution
1739
- # from distutils.config import PyPIRCCommand
1740
- # d = Distribution()
1741
- # return PyPIRCCommand(d)
1742
-
1743
- class PyPIRCFile(object):
1744
-
1745
- DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
1746
- DEFAULT_REALM = 'pypi'
1747
-
1748
- def __init__(self, fn=None, url=None):
1749
- if fn is None:
1750
- fn = os.path.join(os.path.expanduser('~'), '.pypirc')
1751
- self.filename = fn
1752
- self.url = url
1753
-
1754
- def read(self):
1755
- result = {}
1756
-
1757
- if os.path.exists(self.filename):
1758
- repository = self.url or self.DEFAULT_REPOSITORY
1759
-
1760
- config = configparser.RawConfigParser()
1761
- config.read(self.filename)
1762
- sections = config.sections()
1763
- if 'distutils' in sections:
1764
- # let's get the list of servers
1765
- index_servers = config.get('distutils', 'index-servers')
1766
- _servers = [server.strip() for server in
1767
- index_servers.split('\n')
1768
- if server.strip() != '']
1769
- if _servers == []:
1770
- # nothing set, let's try to get the default pypi
1771
- if 'pypi' in sections:
1772
- _servers = ['pypi']
1773
- else:
1774
- for server in _servers:
1775
- result = {'server': server}
1776
- result['username'] = config.get(server, 'username')
1777
-
1778
- # optional params
1779
- for key, default in (('repository', self.DEFAULT_REPOSITORY),
1780
- ('realm', self.DEFAULT_REALM),
1781
- ('password', None)):
1782
- if config.has_option(server, key):
1783
- result[key] = config.get(server, key)
1784
- else:
1785
- result[key] = default
1786
-
1787
- # work around people having "repository" for the "pypi"
1788
- # section of their config set to the HTTP (rather than
1789
- # HTTPS) URL
1790
- if (server == 'pypi' and
1791
- repository in (self.DEFAULT_REPOSITORY, 'pypi')):
1792
- result['repository'] = self.DEFAULT_REPOSITORY
1793
- elif (result['server'] != repository and
1794
- result['repository'] != repository):
1795
- result = {}
1796
- elif 'server-login' in sections:
1797
- # old format
1798
- server = 'server-login'
1799
- if config.has_option(server, 'repository'):
1800
- repository = config.get(server, 'repository')
1801
- else:
1802
- repository = self.DEFAULT_REPOSITORY
1803
- result = {
1804
- 'username': config.get(server, 'username'),
1805
- 'password': config.get(server, 'password'),
1806
- 'repository': repository,
1807
- 'server': server,
1808
- 'realm': self.DEFAULT_REALM
1809
- }
1810
- return result
1811
-
1812
- def update(self, username, password):
1813
- # import pdb; pdb.set_trace()
1814
- config = configparser.RawConfigParser()
1815
- fn = self.filename
1816
- config.read(fn)
1817
- if not config.has_section('pypi'):
1818
- config.add_section('pypi')
1819
- config.set('pypi', 'username', username)
1820
- config.set('pypi', 'password', password)
1821
- with open(fn, 'w') as f:
1822
- config.write(f)
1823
-
1824
- def _load_pypirc(index):
1825
- """
1826
- Read the PyPI access configuration as supported by distutils.
1827
- """
1828
- return PyPIRCFile(url=index.url).read()
1829
-
1830
- def _store_pypirc(index):
1831
- PyPIRCFile().update(index.username, index.password)
1832
-
1833
- #
1834
- # get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
1835
- # tweaks
1836
- #
1837
-
1838
- def get_host_platform():
1839
- """Return a string that identifies the current platform. This is used mainly to
1840
- distinguish platform-specific build directories and platform-specific built
1841
- distributions. Typically includes the OS name and version and the
1842
- architecture (as supplied by 'os.uname()'), although the exact information
1843
- included depends on the OS; eg. on Linux, the kernel version isn't
1844
- particularly important.
1845
-
1846
- Examples of returned values:
1847
- linux-i586
1848
- linux-alpha (?)
1849
- solaris-2.6-sun4u
1850
-
1851
- Windows will return one of:
1852
- win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
1853
- win32 (all others - specifically, sys.platform is returned)
1854
-
1855
- For other non-POSIX platforms, currently just returns 'sys.platform'.
1856
-
1857
- """
1858
- if os.name == 'nt':
1859
- if 'amd64' in sys.version.lower():
1860
- return 'win-amd64'
1861
- if '(arm)' in sys.version.lower():
1862
- return 'win-arm32'
1863
- if '(arm64)' in sys.version.lower():
1864
- return 'win-arm64'
1865
- return sys.platform
1866
-
1867
- # Set for cross builds explicitly
1868
- if "_PYTHON_HOST_PLATFORM" in os.environ:
1869
- return os.environ["_PYTHON_HOST_PLATFORM"]
1870
-
1871
- if os.name != 'posix' or not hasattr(os, 'uname'):
1872
- # XXX what about the architecture? NT is Intel or Alpha,
1873
- # Mac OS is M68k or PPC, etc.
1874
- return sys.platform
1875
-
1876
- # Try to distinguish various flavours of Unix
1877
-
1878
- (osname, host, release, version, machine) = os.uname()
1879
-
1880
- # Convert the OS name to lowercase, remove '/' characters, and translate
1881
- # spaces (for "Power Macintosh")
1882
- osname = osname.lower().replace('/', '')
1883
- machine = machine.replace(' ', '_').replace('/', '-')
1884
-
1885
- if osname[:5] == 'linux':
1886
- # At least on Linux/Intel, 'machine' is the processor --
1887
- # i386, etc.
1888
- # XXX what about Alpha, SPARC, etc?
1889
- return "%s-%s" % (osname, machine)
1890
-
1891
- elif osname[:5] == 'sunos':
1892
- if release[0] >= '5': # SunOS 5 == Solaris 2
1893
- osname = 'solaris'
1894
- release = '%d.%s' % (int(release[0]) - 3, release[2:])
1895
- # We can't use 'platform.architecture()[0]' because a
1896
- # bootstrap problem. We use a dict to get an error
1897
- # if some suspicious happens.
1898
- bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
1899
- machine += '.%s' % bitness[sys.maxsize]
1900
- # fall through to standard osname-release-machine representation
1901
- elif osname[:3] == 'aix':
1902
- from _aix_support import aix_platform
1903
- return aix_platform()
1904
- elif osname[:6] == 'cygwin':
1905
- osname = 'cygwin'
1906
- rel_re = re.compile (r'[\d.]+', re.ASCII)
1907
- m = rel_re.match(release)
1908
- if m:
1909
- release = m.group()
1910
- elif osname[:6] == 'darwin':
1911
- import _osx_support, distutils.sysconfig
1912
- osname, release, machine = _osx_support.get_platform_osx(
1913
- distutils.sysconfig.get_config_vars(),
1914
- osname, release, machine)
1915
-
1916
- return '%s-%s-%s' % (osname, release, machine)
1917
-
1918
-
1919
- _TARGET_TO_PLAT = {
1920
- 'x86' : 'win32',
1921
- 'x64' : 'win-amd64',
1922
- 'arm' : 'win-arm32',
1923
- }
1924
-
1925
-
1926
- def get_platform():
1927
- if os.name != 'nt':
1928
- return get_host_platform()
1929
- cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
1930
- if cross_compilation_target not in _TARGET_TO_PLAT:
1931
- return get_host_platform()
1932
- return _TARGET_TO_PLAT[cross_compilation_target]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/_musllinux.py DELETED
@@ -1,136 +0,0 @@
1
- """PEP 656 support.
2
-
3
- This module implements logic to detect if the currently running Python is
4
- linked against musl, and what musl version is used.
5
- """
6
-
7
- import contextlib
8
- import functools
9
- import operator
10
- import os
11
- import re
12
- import struct
13
- import subprocess
14
- import sys
15
- from typing import IO, Iterator, NamedTuple, Optional, Tuple
16
-
17
-
18
- def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
19
- return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
20
-
21
-
22
- def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
23
- """Detect musl libc location by parsing the Python executable.
24
-
25
- Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
26
- ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
27
- """
28
- f.seek(0)
29
- try:
30
- ident = _read_unpacked(f, "16B")
31
- except struct.error:
32
- return None
33
- if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
34
- return None
35
- f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
36
-
37
- try:
38
- # e_fmt: Format for program header.
39
- # p_fmt: Format for section header.
40
- # p_idx: Indexes to find p_type, p_offset, and p_filesz.
41
- e_fmt, p_fmt, p_idx = {
42
- 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
43
- 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
44
- }[ident[4]]
45
- except KeyError:
46
- return None
47
- else:
48
- p_get = operator.itemgetter(*p_idx)
49
-
50
- # Find the interpreter section and return its content.
51
- try:
52
- _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
53
- except struct.error:
54
- return None
55
- for i in range(e_phnum + 1):
56
- f.seek(e_phoff + e_phentsize * i)
57
- try:
58
- p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
59
- except struct.error:
60
- return None
61
- if p_type != 3: # Not PT_INTERP.
62
- continue
63
- f.seek(p_offset)
64
- interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
65
- if "musl" not in interpreter:
66
- return None
67
- return interpreter
68
- return None
69
-
70
-
71
- class _MuslVersion(NamedTuple):
72
- major: int
73
- minor: int
74
-
75
-
76
- def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
77
- lines = [n for n in (n.strip() for n in output.splitlines()) if n]
78
- if len(lines) < 2 or lines[0][:4] != "musl":
79
- return None
80
- m = re.match(r"Version (\d+)\.(\d+)", lines[1])
81
- if not m:
82
- return None
83
- return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
84
-
85
-
86
- @functools.lru_cache()
87
- def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
88
- """Detect currently-running musl runtime version.
89
-
90
- This is done by checking the specified executable's dynamic linking
91
- information, and invoking the loader to parse its output for a version
92
- string. If the loader is musl, the output would be something like::
93
-
94
- musl libc (x86_64)
95
- Version 1.2.2
96
- Dynamic Program Loader
97
- """
98
- with contextlib.ExitStack() as stack:
99
- try:
100
- f = stack.enter_context(open(executable, "rb"))
101
- except OSError:
102
- return None
103
- ld = _parse_ld_musl_from_elf(f)
104
- if not ld:
105
- return None
106
- proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
107
- return _parse_musl_version(proc.stderr)
108
-
109
-
110
- def platform_tags(arch: str) -> Iterator[str]:
111
- """Generate musllinux tags compatible to the current platform.
112
-
113
- :param arch: Should be the part of platform tag after the ``linux_``
114
- prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
115
- prerequisite for the current platform to be musllinux-compatible.
116
-
117
- :returns: An iterator of compatible musllinux tags.
118
- """
119
- sys_musl = _get_musl_version(sys.executable)
120
- if sys_musl is None: # Python not dynamically linked against musl.
121
- return
122
- for minor in range(sys_musl.minor, -1, -1):
123
- yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
124
-
125
-
126
- if __name__ == "__main__": # pragma: no cover
127
- import sysconfig
128
-
129
- plat = sysconfig.get_platform()
130
- assert plat.startswith("linux-"), "not linux"
131
-
132
- print("plat:", plat)
133
- print("musl:", _get_musl_version(sys.executable))
134
- print("tags:", end=" ")
135
- for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
136
- print(t, end="\n ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/abc.py DELETED
@@ -1,33 +0,0 @@
1
- from abc import ABC
2
-
3
-
4
- class RichRenderable(ABC):
5
- """An abstract base class for Rich renderables.
6
-
7
- Note that there is no need to extend this class, the intended use is to check if an
8
- object supports the Rich renderable protocol. For example::
9
-
10
- if isinstance(my_object, RichRenderable):
11
- console.print(my_object)
12
-
13
- """
14
-
15
- @classmethod
16
- def __subclasshook__(cls, other: type) -> bool:
17
- """Check if this class supports the rich render protocol."""
18
- return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
19
-
20
-
21
- if __name__ == "__main__": # pragma: no cover
22
- from pip._vendor.rich.text import Text
23
-
24
- t = Text()
25
- print(isinstance(Text, RichRenderable))
26
- print(isinstance(t, RichRenderable))
27
-
28
- class Foo:
29
- pass
30
-
31
- f = Foo()
32
- print(isinstance(f, RichRenderable))
33
- print(isinstance("", RichRenderable))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py DELETED
@@ -1,216 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module contains provisional support for SOCKS proxies from within
4
- urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
5
- SOCKS5. To enable its functionality, either install PySocks or install this
6
- module with the ``socks`` extra.
7
-
8
- The SOCKS implementation supports the full range of urllib3 features. It also
9
- supports the following SOCKS features:
10
-
11
- - SOCKS4A (``proxy_url='socks4a://...``)
12
- - SOCKS4 (``proxy_url='socks4://...``)
13
- - SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
14
- - SOCKS5 with local DNS (``proxy_url='socks5://...``)
15
- - Usernames and passwords for the SOCKS proxy
16
-
17
- .. note::
18
- It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
19
- your ``proxy_url`` to ensure that DNS resolution is done from the remote
20
- server instead of client-side when connecting to a domain name.
21
-
22
- SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
23
- supports IPv4, IPv6, and domain names.
24
-
25
- When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
26
- will be sent as the ``userid`` section of the SOCKS request:
27
-
28
- .. code-block:: python
29
-
30
- proxy_url="socks4a://<userid>@proxy-host"
31
-
32
- When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
33
- of the ``proxy_url`` will be sent as the username/password to authenticate
34
- with the proxy:
35
-
36
- .. code-block:: python
37
-
38
- proxy_url="socks5h://<username>:<password>@proxy-host"
39
-
40
- """
41
- from __future__ import absolute_import
42
-
43
- try:
44
- import socks
45
- except ImportError:
46
- import warnings
47
-
48
- from ..exceptions import DependencyWarning
49
-
50
- warnings.warn(
51
- (
52
- "SOCKS support in urllib3 requires the installation of optional "
53
- "dependencies: specifically, PySocks. For more information, see "
54
- "https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies"
55
- ),
56
- DependencyWarning,
57
- )
58
- raise
59
-
60
- from socket import error as SocketError
61
- from socket import timeout as SocketTimeout
62
-
63
- from ..connection import HTTPConnection, HTTPSConnection
64
- from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
65
- from ..exceptions import ConnectTimeoutError, NewConnectionError
66
- from ..poolmanager import PoolManager
67
- from ..util.url import parse_url
68
-
69
- try:
70
- import ssl
71
- except ImportError:
72
- ssl = None
73
-
74
-
75
- class SOCKSConnection(HTTPConnection):
76
- """
77
- A plain-text HTTP connection that connects via a SOCKS proxy.
78
- """
79
-
80
- def __init__(self, *args, **kwargs):
81
- self._socks_options = kwargs.pop("_socks_options")
82
- super(SOCKSConnection, self).__init__(*args, **kwargs)
83
-
84
- def _new_conn(self):
85
- """
86
- Establish a new connection via the SOCKS proxy.
87
- """
88
- extra_kw = {}
89
- if self.source_address:
90
- extra_kw["source_address"] = self.source_address
91
-
92
- if self.socket_options:
93
- extra_kw["socket_options"] = self.socket_options
94
-
95
- try:
96
- conn = socks.create_connection(
97
- (self.host, self.port),
98
- proxy_type=self._socks_options["socks_version"],
99
- proxy_addr=self._socks_options["proxy_host"],
100
- proxy_port=self._socks_options["proxy_port"],
101
- proxy_username=self._socks_options["username"],
102
- proxy_password=self._socks_options["password"],
103
- proxy_rdns=self._socks_options["rdns"],
104
- timeout=self.timeout,
105
- **extra_kw
106
- )
107
-
108
- except SocketTimeout:
109
- raise ConnectTimeoutError(
110
- self,
111
- "Connection to %s timed out. (connect timeout=%s)"
112
- % (self.host, self.timeout),
113
- )
114
-
115
- except socks.ProxyError as e:
116
- # This is fragile as hell, but it seems to be the only way to raise
117
- # useful errors here.
118
- if e.socket_err:
119
- error = e.socket_err
120
- if isinstance(error, SocketTimeout):
121
- raise ConnectTimeoutError(
122
- self,
123
- "Connection to %s timed out. (connect timeout=%s)"
124
- % (self.host, self.timeout),
125
- )
126
- else:
127
- raise NewConnectionError(
128
- self, "Failed to establish a new connection: %s" % error
129
- )
130
- else:
131
- raise NewConnectionError(
132
- self, "Failed to establish a new connection: %s" % e
133
- )
134
-
135
- except SocketError as e: # Defensive: PySocks should catch all these.
136
- raise NewConnectionError(
137
- self, "Failed to establish a new connection: %s" % e
138
- )
139
-
140
- return conn
141
-
142
-
143
- # We don't need to duplicate the Verified/Unverified distinction from
144
- # urllib3/connection.py here because the HTTPSConnection will already have been
145
- # correctly set to either the Verified or Unverified form by that module. This
146
- # means the SOCKSHTTPSConnection will automatically be the correct type.
147
- class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
148
- pass
149
-
150
-
151
- class SOCKSHTTPConnectionPool(HTTPConnectionPool):
152
- ConnectionCls = SOCKSConnection
153
-
154
-
155
- class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
156
- ConnectionCls = SOCKSHTTPSConnection
157
-
158
-
159
- class SOCKSProxyManager(PoolManager):
160
- """
161
- A version of the urllib3 ProxyManager that routes connections via the
162
- defined SOCKS proxy.
163
- """
164
-
165
- pool_classes_by_scheme = {
166
- "http": SOCKSHTTPConnectionPool,
167
- "https": SOCKSHTTPSConnectionPool,
168
- }
169
-
170
- def __init__(
171
- self,
172
- proxy_url,
173
- username=None,
174
- password=None,
175
- num_pools=10,
176
- headers=None,
177
- **connection_pool_kw
178
- ):
179
- parsed = parse_url(proxy_url)
180
-
181
- if username is None and password is None and parsed.auth is not None:
182
- split = parsed.auth.split(":")
183
- if len(split) == 2:
184
- username, password = split
185
- if parsed.scheme == "socks5":
186
- socks_version = socks.PROXY_TYPE_SOCKS5
187
- rdns = False
188
- elif parsed.scheme == "socks5h":
189
- socks_version = socks.PROXY_TYPE_SOCKS5
190
- rdns = True
191
- elif parsed.scheme == "socks4":
192
- socks_version = socks.PROXY_TYPE_SOCKS4
193
- rdns = False
194
- elif parsed.scheme == "socks4a":
195
- socks_version = socks.PROXY_TYPE_SOCKS4
196
- rdns = True
197
- else:
198
- raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
199
-
200
- self.proxy_url = proxy_url
201
-
202
- socks_options = {
203
- "socks_version": socks_version,
204
- "proxy_host": parsed.host,
205
- "proxy_port": parsed.port,
206
- "username": username,
207
- "password": password,
208
- "rdns": rdns,
209
- }
210
- connection_pool_kw["_socks_options"] = socks_options
211
-
212
- super(SOCKSProxyManager, self).__init__(
213
- num_pools, headers, **connection_pool_kw
214
- )
215
-
216
- self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/cityscapes_evaluation.py DELETED
@@ -1,112 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import glob
3
- import logging
4
- import os
5
- import tempfile
6
- from collections import OrderedDict
7
- import torch
8
- from fvcore.common.file_io import PathManager
9
- from PIL import Image
10
-
11
- from detectron2.data import MetadataCatalog
12
- from detectron2.utils import comm
13
-
14
- from .evaluator import DatasetEvaluator
15
-
16
-
17
- class CityscapesEvaluator(DatasetEvaluator):
18
- """
19
- Evaluate instance segmentation results using cityscapes API.
20
-
21
- Note:
22
- * It does not work in multi-machine distributed training.
23
- * It contains a synchronization, therefore has to be used on all ranks.
24
- * Only the main process runs evaluation.
25
- """
26
-
27
- def __init__(self, dataset_name):
28
- """
29
- Args:
30
- dataset_name (str): the name of the dataset.
31
- It must have the following metadata associated with it:
32
- "thing_classes", "gt_dir".
33
- """
34
- self._metadata = MetadataCatalog.get(dataset_name)
35
- self._cpu_device = torch.device("cpu")
36
- self._logger = logging.getLogger(__name__)
37
-
38
- def reset(self):
39
- self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
40
- self._temp_dir = self._working_dir.name
41
- # All workers will write to the same results directory
42
- # TODO this does not work in distributed training
43
- self._temp_dir = comm.all_gather(self._temp_dir)[0]
44
- if self._temp_dir != self._working_dir.name:
45
- self._working_dir.cleanup()
46
- self._logger.info(
47
- "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
48
- )
49
-
50
- def process(self, inputs, outputs):
51
- from cityscapesscripts.helpers.labels import name2label
52
-
53
- for input, output in zip(inputs, outputs):
54
- file_name = input["file_name"]
55
- basename = os.path.splitext(os.path.basename(file_name))[0]
56
- pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
57
-
58
- output = output["instances"].to(self._cpu_device)
59
- num_instances = len(output)
60
- with open(pred_txt, "w") as fout:
61
- for i in range(num_instances):
62
- pred_class = output.pred_classes[i]
63
- classes = self._metadata.thing_classes[pred_class]
64
- class_id = name2label[classes].id
65
- score = output.scores[i]
66
- mask = output.pred_masks[i].numpy().astype("uint8")
67
- png_filename = os.path.join(
68
- self._temp_dir, basename + "_{}_{}.png".format(i, classes)
69
- )
70
-
71
- Image.fromarray(mask * 255).save(png_filename)
72
- fout.write("{} {} {}\n".format(os.path.basename(png_filename), class_id, score))
73
-
74
- def evaluate(self):
75
- """
76
- Returns:
77
- dict: has a key "segm", whose value is a dict of "AP" and "AP50".
78
- """
79
- comm.synchronize()
80
- if comm.get_rank() > 0:
81
- return
82
- import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
83
-
84
- self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
85
-
86
- # set some global states in cityscapes evaluation API, before evaluating
87
- cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
88
- cityscapes_eval.args.predictionWalk = None
89
- cityscapes_eval.args.JSONOutput = False
90
- cityscapes_eval.args.colorized = False
91
- cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
92
-
93
- # These lines are adopted from
94
- # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
95
- gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
96
- groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
97
- assert len(
98
- groundTruthImgList
99
- ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
100
- cityscapes_eval.args.groundTruthSearch
101
- )
102
- predictionImgList = []
103
- for gt in groundTruthImgList:
104
- predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
105
- results = cityscapes_eval.evaluateImgLists(
106
- predictionImgList, groundTruthImgList, cityscapes_eval.args
107
- )["averages"]
108
-
109
- ret = OrderedDict()
110
- ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
111
- self._working_dir.cleanup()
112
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/allocate_unique.h DELETED
@@ -1,444 +0,0 @@
1
- // Copyright (c) 2018 NVIDIA Corporation
2
- // Author: Bryce Adelstein Lelbach <[email protected]>
3
- //
4
- // Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
5
-
6
- #pragma once
7
-
8
- #include <thrust/detail/config.h>
9
- #include <thrust/detail/cpp11_required.h>
10
-
11
- #if THRUST_CPP_DIALECT >= 2011
12
-
13
- #include <thrust/detail/raw_pointer_cast.h>
14
- #include <thrust/detail/type_deduction.h>
15
- #include <thrust/detail/memory_algorithms.h>
16
- #include <thrust/detail/allocator/allocator_traits.h>
17
-
18
- #include <utility>
19
- #include <thrust/detail/memory_wrapper.h>
20
-
21
- namespace thrust
22
- {
23
-
24
- // wg21.link/p0316r0
25
-
26
- ///////////////////////////////////////////////////////////////////////////////
27
-
28
- namespace detail
29
- {
30
-
31
- template <typename Allocator, typename Pointer>
32
- void allocator_delete_impl(
33
- Allocator const& alloc, Pointer p, std::false_type
34
- )
35
- {
36
- using traits = typename detail::allocator_traits<
37
- typename std::remove_cv<
38
- typename std::remove_reference<Allocator>::type
39
- >::type
40
- >;
41
-
42
- typename traits::allocator_type alloc_T(alloc);
43
-
44
- if (nullptr != pointer_traits<Pointer>::get(p))
45
- {
46
- traits::destroy(alloc_T, thrust::raw_pointer_cast(p));
47
- traits::deallocate(alloc_T, p, 1);
48
- }
49
- }
50
-
51
- template <typename Allocator, typename Pointer>
52
- void allocator_delete_impl(
53
- Allocator const& alloc, Pointer p, std::true_type
54
- )
55
- {
56
- using traits = typename detail::allocator_traits<
57
- typename std::remove_cv<
58
- typename std::remove_reference<Allocator>::type
59
- >::type
60
- >;
61
-
62
- typename traits::allocator_type alloc_T(alloc);
63
-
64
- if (nullptr != pointer_traits<Pointer>::get(p))
65
- {
66
- traits::deallocate(alloc_T, p, 1);
67
- }
68
- }
69
-
70
- } // namespace detail
71
-
72
- template <typename T, typename Allocator, bool Uninitialized = false>
73
- struct allocator_delete final
74
- {
75
- using allocator_type
76
- = typename std::remove_cv<
77
- typename std::remove_reference<Allocator>::type
78
- >::type::template rebind<T>::other;
79
- using pointer = typename detail::allocator_traits<allocator_type>::pointer;
80
-
81
- template <typename UAllocator>
82
- allocator_delete(UAllocator&& other) noexcept
83
- : alloc_(THRUST_FWD(other))
84
- {}
85
-
86
- template <typename U, typename UAllocator>
87
- allocator_delete(
88
- allocator_delete<U, UAllocator> const& other
89
- ) noexcept
90
- : alloc_(other.get_allocator())
91
- {}
92
- template <typename U, typename UAllocator>
93
- allocator_delete(
94
- allocator_delete<U, UAllocator>&& other
95
- ) noexcept
96
- : alloc_(std::move(other.get_allocator()))
97
- {}
98
-
99
- template <typename U, typename UAllocator>
100
- allocator_delete& operator=(
101
- allocator_delete<U, UAllocator> const& other
102
- ) noexcept
103
- {
104
- alloc_ = other.get_allocator();
105
- return *this;
106
- }
107
- template <typename U, typename UAllocator>
108
- allocator_delete& operator=(
109
- allocator_delete<U, UAllocator>&& other
110
- ) noexcept
111
- {
112
- alloc_ = std::move(other.get_allocator());
113
- return *this;
114
- }
115
-
116
- void operator()(pointer p)
117
- {
118
- std::integral_constant<bool, Uninitialized> ic;
119
-
120
- detail::allocator_delete_impl(get_allocator(), p, ic);
121
- }
122
-
123
- allocator_type& get_allocator() noexcept { return alloc_; }
124
- allocator_type const& get_allocator() const noexcept { return alloc_; }
125
-
126
- void swap(allocator_delete& other) noexcept
127
- {
128
- using std::swap;
129
- swap(alloc_, other.alloc_);
130
- }
131
-
132
- private:
133
- allocator_type alloc_;
134
- };
135
-
136
- template <typename T, typename Allocator>
137
- using uninitialized_allocator_delete = allocator_delete<T, Allocator, true>;
138
-
139
- namespace detail {
140
-
141
- template <typename Allocator, typename Pointer, typename Size>
142
- void array_allocator_delete_impl(
143
- Allocator const& alloc, Pointer p, Size count, std::false_type
144
- )
145
- {
146
- using traits = typename detail::allocator_traits<
147
- typename std::remove_cv<
148
- typename std::remove_reference<Allocator>::type
149
- >::type
150
- >;
151
-
152
- typename traits::allocator_type alloc_T(alloc);
153
-
154
- if (nullptr != pointer_traits<Pointer>::get(p))
155
- {
156
- destroy_n(alloc_T, p, count);
157
- traits::deallocate(alloc_T, p, count);
158
- }
159
- }
160
-
161
- template <typename Allocator, typename Pointer, typename Size>
162
- void array_allocator_delete_impl(
163
- Allocator const& alloc, Pointer p, Size count, std::true_type
164
- )
165
- {
166
- using traits = typename detail::allocator_traits<
167
- typename std::remove_cv<
168
- typename std::remove_reference<Allocator>::type
169
- >::type
170
- >;
171
-
172
- typename traits::allocator_type alloc_T(alloc);
173
-
174
- if (nullptr != pointer_traits<Pointer>::get(p))
175
- {
176
- traits::deallocate(alloc_T, p, count);
177
- }
178
- }
179
-
180
- } // namespace detail
181
-
182
- template <typename T, typename Allocator, bool Uninitialized = false>
183
- struct array_allocator_delete final
184
- {
185
- using allocator_type
186
- = typename std::remove_cv<
187
- typename std::remove_reference<Allocator>::type
188
- >::type::template rebind<T>::other;
189
- using pointer = typename detail::allocator_traits<allocator_type>::pointer;
190
-
191
- template <typename UAllocator>
192
- array_allocator_delete(UAllocator&& other, std::size_t n) noexcept
193
- : alloc_(THRUST_FWD(other)), count_(n)
194
- {}
195
-
196
- template <typename U, typename UAllocator>
197
- array_allocator_delete(
198
- array_allocator_delete<U, UAllocator> const& other
199
- ) noexcept
200
- : alloc_(other.get_allocator()), count_(other.count_)
201
- {}
202
- template <typename U, typename UAllocator>
203
- array_allocator_delete(
204
- array_allocator_delete<U, UAllocator>&& other
205
- ) noexcept
206
- : alloc_(std::move(other.get_allocator())), count_(other.count_)
207
- {}
208
-
209
- template <typename U, typename UAllocator>
210
- array_allocator_delete& operator=(
211
- array_allocator_delete<U, UAllocator> const& other
212
- ) noexcept
213
- {
214
- alloc_ = other.get_allocator();
215
- count_ = other.count_;
216
- return *this;
217
- }
218
- template <typename U, typename UAllocator>
219
- array_allocator_delete& operator=(
220
- array_allocator_delete<U, UAllocator>&& other
221
- ) noexcept
222
- {
223
- alloc_ = std::move(other.get_allocator());
224
- count_ = other.count_;
225
- return *this;
226
- }
227
-
228
- void operator()(pointer p)
229
- {
230
- std::integral_constant<bool, Uninitialized> ic;
231
-
232
- detail::array_allocator_delete_impl(get_allocator(), p, count_, ic);
233
- }
234
-
235
- allocator_type& get_allocator() noexcept { return alloc_; }
236
- allocator_type const& get_allocator() const noexcept { return alloc_; }
237
-
238
- void swap(array_allocator_delete& other) noexcept
239
- {
240
- using std::swap;
241
- swap(alloc_, other.alloc_);
242
- swap(count_, other.count_);
243
- }
244
-
245
- private:
246
- allocator_type alloc_;
247
- std::size_t count_;
248
- };
249
-
250
- template <typename T, typename Allocator>
251
- using uninitialized_array_allocator_delete
252
- = array_allocator_delete<T, Allocator, true>;
253
-
254
- ///////////////////////////////////////////////////////////////////////////////
255
-
256
- template <typename Pointer, typename Lambda>
257
- struct tagged_deleter : Lambda
258
- {
259
- __host__ __device__
260
- tagged_deleter(Lambda&& l) : Lambda(THRUST_FWD(l)) {}
261
-
262
- using pointer = Pointer;
263
- };
264
-
265
- template <typename Pointer, typename Lambda>
266
- __host__ __device__
267
- tagged_deleter<Pointer, Lambda>
268
- make_tagged_deleter(Lambda&& l)
269
- {
270
- return tagged_deleter<Pointer, Lambda>(THRUST_FWD(l));
271
- }
272
-
273
- ///////////////////////////////////////////////////////////////////////////////
274
-
275
- template <typename T, typename Allocator, typename... Args>
276
- __host__
277
- std::unique_ptr<
278
- T,
279
- allocator_delete<
280
- T
281
- , typename detail::allocator_traits<
282
- typename std::remove_cv<
283
- typename std::remove_reference<Allocator>::type
284
- >::type
285
- >::template rebind_traits<T>::allocator_type
286
- >
287
- >
288
- allocate_unique(
289
- Allocator const& alloc, Args&&... args
290
- )
291
- {
292
- using traits = typename detail::allocator_traits<
293
- typename std::remove_cv<
294
- typename std::remove_reference<Allocator>::type
295
- >::type
296
- >::template rebind_traits<T>;
297
-
298
- typename traits::allocator_type alloc_T(alloc);
299
-
300
- auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
301
- [&alloc_T] (typename traits::pointer p) {
302
- traits::deallocate(alloc_T, p, 1);
303
- }
304
- );
305
- using hold_t = std::unique_ptr<T, decltype(hold_deleter)>;
306
- auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter);
307
-
308
- traits::construct(
309
- alloc_T, thrust::raw_pointer_cast(hold.get()), THRUST_FWD(args)...
310
- );
311
- auto deleter = allocator_delete<T, typename traits::allocator_type>(alloc);
312
- return std::unique_ptr<T, decltype(deleter)>
313
- (hold.release(), std::move(deleter));
314
- }
315
-
316
- template <typename T, typename Allocator>
317
- __host__
318
- std::unique_ptr<
319
- T,
320
- uninitialized_allocator_delete<
321
- T
322
- , typename detail::allocator_traits<
323
- typename std::remove_cv<
324
- typename std::remove_reference<Allocator>::type
325
- >::type
326
- >::template rebind_traits<T>::allocator_type
327
- >
328
- >
329
- uninitialized_allocate_unique(
330
- Allocator const& alloc
331
- )
332
- {
333
- using traits = typename detail::allocator_traits<
334
- typename std::remove_cv<
335
- typename std::remove_reference<Allocator>::type
336
- >::type
337
- >::template rebind_traits<T>;
338
-
339
- typename traits::allocator_type alloc_T(alloc);
340
-
341
- auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
342
- [&alloc_T] (typename traits::pointer p) {
343
- traits::deallocate(alloc_T, p, 1);
344
- }
345
- );
346
- using hold_t = std::unique_ptr<T, decltype(hold_deleter)>;
347
- auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter);
348
-
349
- auto deleter = uninitialized_allocator_delete<
350
- T, typename traits::allocator_type
351
- >(alloc_T);
352
- return std::unique_ptr<T, decltype(deleter)>
353
- (hold.release(), std::move(deleter));
354
- }
355
-
356
- template <typename T, typename Allocator, typename Size, typename... Args>
357
- __host__
358
- std::unique_ptr<
359
- T[],
360
- array_allocator_delete<
361
- T
362
- , typename detail::allocator_traits<
363
- typename std::remove_cv<
364
- typename std::remove_reference<Allocator>::type
365
- >::type
366
- >::template rebind_traits<T>::allocator_type
367
- >
368
- >
369
- allocate_unique_n(
370
- Allocator const& alloc, Size n, Args&&... args
371
- )
372
- {
373
- using traits = typename detail::allocator_traits<
374
- typename std::remove_cv<
375
- typename std::remove_reference<Allocator>::type
376
- >::type
377
- >::template rebind_traits<T>;
378
-
379
- typename traits::allocator_type alloc_T(alloc);
380
-
381
- auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
382
- [n, &alloc_T] (typename traits::pointer p) {
383
- traits::deallocate(alloc_T, p, n);
384
- }
385
- );
386
- using hold_t = std::unique_ptr<T[], decltype(hold_deleter)>;
387
- auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter);
388
-
389
- uninitialized_construct_n_with_allocator(
390
- alloc_T, hold.get(), n, THRUST_FWD(args)...
391
- );
392
- auto deleter = array_allocator_delete<
393
- T, typename traits::allocator_type
394
- >(alloc_T, n);
395
- return std::unique_ptr<T[], decltype(deleter)>
396
- (hold.release(), std::move(deleter));
397
- }
398
-
399
- template <typename T, typename Allocator, typename Size>
400
- __host__
401
- std::unique_ptr<
402
- T[],
403
- uninitialized_array_allocator_delete<
404
- T
405
- , typename detail::allocator_traits<
406
- typename std::remove_cv<
407
- typename std::remove_reference<Allocator>::type
408
- >::type
409
- >::template rebind_traits<T>::allocator_type
410
- >
411
- >
412
- uninitialized_allocate_unique_n(
413
- Allocator const& alloc, Size n
414
- )
415
- {
416
- using traits = typename detail::allocator_traits<
417
- typename std::remove_cv<
418
- typename std::remove_reference<Allocator>::type
419
- >::type
420
- >::template rebind_traits<T>;
421
-
422
- typename traits::allocator_type alloc_T(alloc);
423
-
424
- auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
425
- [n, &alloc_T] (typename traits::pointer p) {
426
- traits::deallocate(alloc_T, p, n);
427
- }
428
- );
429
- using hold_t = std::unique_ptr<T[], decltype(hold_deleter)>;
430
- auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter);
431
-
432
- auto deleter = uninitialized_array_allocator_delete<
433
- T, typename traits::allocator_type
434
- >(alloc_T, n);
435
- return std::unique_ptr<T[], decltype(deleter)>
436
- (hold.release(), std::move(deleter));
437
- }
438
-
439
- ///////////////////////////////////////////////////////////////////////////////
440
-
441
- } // end namespace thrust
442
-
443
- #endif // THRUST_CPP_DIALECT >= 2011
444
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/reverse.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the reverse.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch reverse
24
-
25
- #include <thrust/system/detail/sequential/reverse.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/reverse.h>
32
- #include <thrust/system/cuda/detail/reverse.h>
33
- #include <thrust/system/omp/detail/reverse.h>
34
- #include <thrust/system/tbb/detail/reverse.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_REVERSE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/reverse.h>
38
- #include __THRUST_HOST_SYSTEM_REVERSE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_REVERSE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_REVERSE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/reverse.h>
42
- #include __THRUST_DEVICE_SYSTEM_REVERSE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_REVERSE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChillyFaze/runwayml-stable-diffusion-v1-5/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
 
 
 
 
spaces/CrabApple/prompthero-openjourney-v2/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/prompthero/openjourney-v2").launch()
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/middleware/wsgi.py DELETED
@@ -1 +0,0 @@
1
- from starlette.middleware.wsgi import WSGIMiddleware as WSGIMiddleware # noqa
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/filterPen.py DELETED
@@ -1,164 +0,0 @@
1
- from fontTools.pens.basePen import AbstractPen
2
- from fontTools.pens.pointPen import AbstractPointPen
3
- from fontTools.pens.recordingPen import RecordingPen
4
-
5
-
6
- class _PassThruComponentsMixin(object):
7
- def addComponent(self, glyphName, transformation, **kwargs):
8
- self._outPen.addComponent(glyphName, transformation, **kwargs)
9
-
10
-
11
- class FilterPen(_PassThruComponentsMixin, AbstractPen):
12
-
13
- """Base class for pens that apply some transformation to the coordinates
14
- they receive and pass them to another pen.
15
-
16
- You can override any of its methods. The default implementation does
17
- nothing, but passes the commands unmodified to the other pen.
18
-
19
- >>> from fontTools.pens.recordingPen import RecordingPen
20
- >>> rec = RecordingPen()
21
- >>> pen = FilterPen(rec)
22
- >>> v = iter(rec.value)
23
-
24
- >>> pen.moveTo((0, 0))
25
- >>> next(v)
26
- ('moveTo', ((0, 0),))
27
-
28
- >>> pen.lineTo((1, 1))
29
- >>> next(v)
30
- ('lineTo', ((1, 1),))
31
-
32
- >>> pen.curveTo((2, 2), (3, 3), (4, 4))
33
- >>> next(v)
34
- ('curveTo', ((2, 2), (3, 3), (4, 4)))
35
-
36
- >>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8))
37
- >>> next(v)
38
- ('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8)))
39
-
40
- >>> pen.closePath()
41
- >>> next(v)
42
- ('closePath', ())
43
-
44
- >>> pen.moveTo((9, 9))
45
- >>> next(v)
46
- ('moveTo', ((9, 9),))
47
-
48
- >>> pen.endPath()
49
- >>> next(v)
50
- ('endPath', ())
51
-
52
- >>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0))
53
- >>> next(v)
54
- ('addComponent', ('foo', (1, 0, 0, 1, 0, 0)))
55
- """
56
-
57
- def __init__(self, outPen):
58
- self._outPen = outPen
59
- self.current_pt = None
60
-
61
- def moveTo(self, pt):
62
- self._outPen.moveTo(pt)
63
- self.current_pt = pt
64
-
65
- def lineTo(self, pt):
66
- self._outPen.lineTo(pt)
67
- self.current_pt = pt
68
-
69
- def curveTo(self, *points):
70
- self._outPen.curveTo(*points)
71
- self.current_pt = points[-1]
72
-
73
- def qCurveTo(self, *points):
74
- self._outPen.qCurveTo(*points)
75
- self.current_pt = points[-1]
76
-
77
- def closePath(self):
78
- self._outPen.closePath()
79
- self.current_pt = None
80
-
81
- def endPath(self):
82
- self._outPen.endPath()
83
- self.current_pt = None
84
-
85
-
86
- class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
87
- """A "buffered" filter pen that accumulates contour data, passes
88
- it through a ``filterContour`` method when the contour is closed or ended,
89
- and finally draws the result with the output pen.
90
-
91
- Components are passed through unchanged.
92
- """
93
-
94
- def __init__(self, outPen):
95
- super(ContourFilterPen, self).__init__()
96
- self._outPen = outPen
97
-
98
- def closePath(self):
99
- super(ContourFilterPen, self).closePath()
100
- self._flushContour()
101
-
102
- def endPath(self):
103
- super(ContourFilterPen, self).endPath()
104
- self._flushContour()
105
-
106
- def _flushContour(self):
107
- result = self.filterContour(self.value)
108
- if result is not None:
109
- self.value = result
110
- self.replay(self._outPen)
111
- self.value = []
112
-
113
- def filterContour(self, contour):
114
- """Subclasses must override this to perform the filtering.
115
-
116
- The contour is a list of pen (operator, operands) tuples.
117
- Operators are strings corresponding to the AbstractPen methods:
118
- "moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and
119
- "endPath". The operands are the positional arguments that are
120
- passed to each method.
121
-
122
- If the method doesn't return a value (i.e. returns None), it's
123
- assumed that the argument was modified in-place.
124
- Otherwise, the return value is drawn with the output pen.
125
- """
126
- return # or return contour
127
-
128
-
129
- class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
130
- """Baseclass for point pens that apply some transformation to the
131
- coordinates they receive and pass them to another point pen.
132
-
133
- You can override any of its methods. The default implementation does
134
- nothing, but passes the commands unmodified to the other pen.
135
-
136
- >>> from fontTools.pens.recordingPen import RecordingPointPen
137
- >>> rec = RecordingPointPen()
138
- >>> pen = FilterPointPen(rec)
139
- >>> v = iter(rec.value)
140
- >>> pen.beginPath(identifier="abc")
141
- >>> next(v)
142
- ('beginPath', (), {'identifier': 'abc'})
143
- >>> pen.addPoint((1, 2), "line", False)
144
- >>> next(v)
145
- ('addPoint', ((1, 2), 'line', False, None), {})
146
- >>> pen.addComponent("a", (2, 0, 0, 2, 10, -10), identifier="0001")
147
- >>> next(v)
148
- ('addComponent', ('a', (2, 0, 0, 2, 10, -10)), {'identifier': '0001'})
149
- >>> pen.endPath()
150
- >>> next(v)
151
- ('endPath', (), {})
152
- """
153
-
154
- def __init__(self, outPointPen):
155
- self._outPen = outPointPen
156
-
157
- def beginPath(self, **kwargs):
158
- self._outPen.beginPath(**kwargs)
159
-
160
- def endPath(self):
161
- self._outPen.endPath()
162
-
163
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
164
- self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-22108117.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/DaleChen/AutoGPT/autogpt/chat.py DELETED
@@ -1,175 +0,0 @@
1
- import time
2
-
3
- from openai.error import RateLimitError
4
-
5
- from autogpt import token_counter
6
- from autogpt.config import Config
7
- from autogpt.llm_utils import create_chat_completion
8
- from autogpt.logs import logger
9
-
10
- cfg = Config()
11
-
12
-
13
- def create_chat_message(role, content):
14
- """
15
- Create a chat message with the given role and content.
16
-
17
- Args:
18
- role (str): The role of the message sender, e.g., "system", "user", or "assistant".
19
- content (str): The content of the message.
20
-
21
- Returns:
22
- dict: A dictionary containing the role and content of the message.
23
- """
24
- return {"role": role, "content": content}
25
-
26
-
27
- def generate_context(prompt, relevant_memory, full_message_history, model):
28
- current_context = [
29
- create_chat_message("system", prompt),
30
- create_chat_message(
31
- "system", f"The current time and date is {time.strftime('%c')}"
32
- ),
33
- create_chat_message(
34
- "system",
35
- f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
36
- ),
37
- ]
38
-
39
- # Add messages from the full message history until we reach the token limit
40
- next_message_to_add_index = len(full_message_history) - 1
41
- insertion_index = len(current_context)
42
- # Count the currently used tokens
43
- current_tokens_used = token_counter.count_message_tokens(current_context, model)
44
- return (
45
- next_message_to_add_index,
46
- current_tokens_used,
47
- insertion_index,
48
- current_context,
49
- )
50
-
51
-
52
- # TODO: Change debug from hardcode to argument
53
- def chat_with_ai(
54
- prompt, user_input, full_message_history, permanent_memory, token_limit
55
- ):
56
- """Interact with the OpenAI API, sending the prompt, user input, message history,
57
- and permanent memory."""
58
- while True:
59
- try:
60
- """
61
- Interact with the OpenAI API, sending the prompt, user input,
62
- message history, and permanent memory.
63
-
64
- Args:
65
- prompt (str): The prompt explaining the rules to the AI.
66
- user_input (str): The input from the user.
67
- full_message_history (list): The list of all messages sent between the
68
- user and the AI.
69
- permanent_memory (Obj): The memory object containing the permanent
70
- memory.
71
- token_limit (int): The maximum number of tokens allowed in the API call.
72
-
73
- Returns:
74
- str: The AI's response.
75
- """
76
- model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
77
- # Reserve 1000 tokens for the response
78
-
79
- logger.debug(f"Token limit: {token_limit}")
80
- send_token_limit = token_limit - 1000
81
-
82
- relevant_memory = (
83
- ""
84
- if len(full_message_history) == 0
85
- else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
86
- )
87
-
88
- logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
89
-
90
- (
91
- next_message_to_add_index,
92
- current_tokens_used,
93
- insertion_index,
94
- current_context,
95
- ) = generate_context(prompt, relevant_memory, full_message_history, model)
96
-
97
- while current_tokens_used > 2500:
98
- # remove memories until we are under 2500 tokens
99
- relevant_memory = relevant_memory[:-1]
100
- (
101
- next_message_to_add_index,
102
- current_tokens_used,
103
- insertion_index,
104
- current_context,
105
- ) = generate_context(
106
- prompt, relevant_memory, full_message_history, model
107
- )
108
-
109
- current_tokens_used += token_counter.count_message_tokens(
110
- [create_chat_message("user", user_input)], model
111
- ) # Account for user input (appended later)
112
-
113
- while next_message_to_add_index >= 0:
114
- # print (f"CURRENT TOKENS USED: {current_tokens_used}")
115
- message_to_add = full_message_history[next_message_to_add_index]
116
-
117
- tokens_to_add = token_counter.count_message_tokens(
118
- [message_to_add], model
119
- )
120
- if current_tokens_used + tokens_to_add > send_token_limit:
121
- break
122
-
123
- # Add the most recent message to the start of the current context,
124
- # after the two system prompts.
125
- current_context.insert(
126
- insertion_index, full_message_history[next_message_to_add_index]
127
- )
128
-
129
- # Count the currently used tokens
130
- current_tokens_used += tokens_to_add
131
-
132
- # Move to the next most recent message in the full message history
133
- next_message_to_add_index -= 1
134
-
135
- # Append user input, the length of this is accounted for above
136
- current_context.extend([create_chat_message("user", user_input)])
137
-
138
- # Calculate remaining tokens
139
- tokens_remaining = token_limit - current_tokens_used
140
- # assert tokens_remaining >= 0, "Tokens remaining is negative.
141
- # This should never happen, please submit a bug report at
142
- # https://www.github.com/Torantulino/Auto-GPT"
143
-
144
- # Debug print the current context
145
- logger.debug(f"Token limit: {token_limit}")
146
- logger.debug(f"Send Token Count: {current_tokens_used}")
147
- logger.debug(f"Tokens remaining for response: {tokens_remaining}")
148
- logger.debug("------------ CONTEXT SENT TO AI ---------------")
149
- for message in current_context:
150
- # Skip printing the prompt
151
- if message["role"] == "system" and message["content"] == prompt:
152
- continue
153
- logger.debug(f"{message['role'].capitalize()}: {message['content']}")
154
- logger.debug("")
155
- logger.debug("----------- END OF CONTEXT ----------------")
156
-
157
- # TODO: use a model defined elsewhere, so that model can contain
158
- # temperature and other settings we care about
159
- assistant_reply = create_chat_completion(
160
- model=model,
161
- messages=current_context,
162
- max_tokens=tokens_remaining,
163
- )
164
-
165
- # Update full message history
166
- full_message_history.append(create_chat_message("user", user_input))
167
- full_message_history.append(
168
- create_chat_message("assistant", assistant_reply)
169
- )
170
-
171
- return assistant_reply
172
- except RateLimitError:
173
- # TODO: When we switch to langchain, this is built in
174
- print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
175
- time.sleep(10)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/post_process.py DELETED
@@ -1,34 +0,0 @@
1
- """
2
- @Date: 2021/10/08
3
- @description:
4
- """
5
- import numpy as np
6
- import cv2
7
-
8
- from postprocessing.dula.layout import fit_layout
9
- from postprocessing.dula.layout_old import fit_layout_old
10
- from utils.conversion import depth2xyz, xyz2depth
11
-
12
-
13
- def post_process(b_depth, type_name='manhattan', need_cube=False):
14
- plan_y = 1
15
- b_xyz = depth2xyz(b_depth, plan_y)
16
-
17
- b_processed_xyz = []
18
- for xyz in b_xyz:
19
- if type_name == 'manhattan':
20
- processed_xz = fit_layout(floor_xz=xyz[..., ::2], need_cube=need_cube, show=False)
21
- elif type_name == 'manhattan_old':
22
- processed_xz = fit_layout_old(floor_xz=xyz[..., ::2], need_cube=need_cube, show=False)
23
- elif type_name == 'atalanta':
24
- processed_xz = cv2.approxPolyDP(xyz[..., ::2].astype(np.float32), 0.1, False)[:, 0, :]
25
- else:
26
- raise NotImplementedError("Unknown post-processing type")
27
-
28
- if need_cube:
29
- assert len(processed_xz) == 4
30
-
31
- processed_xyz = np.insert(processed_xz, 1, plan_y, axis=1)
32
- b_processed_xyz.append(processed_xyz)
33
-
34
- return np.array(b_processed_xyz)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Denevan/BingAI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: BingAI
3
- emoji: 📉
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- app_port: 8080
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/torch_utils/training_stats.py DELETED
@@ -1,268 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Facilities for reporting and collecting training statistics across
10
- multiple processes and devices. The interface is designed to minimize
11
- synchronization overhead as well as the amount of boilerplate in user
12
- code."""
13
-
14
- import re
15
- import numpy as np
16
- import torch
17
- import dnnlib
18
-
19
- from . import misc
20
-
21
- #----------------------------------------------------------------------------
22
-
23
- _num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
24
- _reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
25
- _counter_dtype = torch.float64 # Data type to use for the internal counters.
26
- _rank = 0 # Rank of the current process.
27
- _sync_device = None # Device to use for multiprocess communication. None = single-process.
28
- _sync_called = False # Has _sync() been called yet?
29
- _counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
30
- _cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
31
-
32
- #----------------------------------------------------------------------------
33
-
34
- def init_multiprocessing(rank, sync_device):
35
- r"""Initializes `torch_utils.training_stats` for collecting statistics
36
- across multiple processes.
37
-
38
- This function must be called after
39
- `torch.distributed.init_process_group()` and before `Collector.update()`.
40
- The call is not necessary if multi-process collection is not needed.
41
-
42
- Args:
43
- rank: Rank of the current process.
44
- sync_device: PyTorch device to use for inter-process
45
- communication, or None to disable multi-process
46
- collection. Typically `torch.device('cuda', rank)`.
47
- """
48
- global _rank, _sync_device
49
- assert not _sync_called
50
- _rank = rank
51
- _sync_device = sync_device
52
-
53
- #----------------------------------------------------------------------------
54
-
55
- @misc.profiled_function
56
- def report(name, value):
57
- r"""Broadcasts the given set of scalars to all interested instances of
58
- `Collector`, across device and process boundaries.
59
-
60
- This function is expected to be extremely cheap and can be safely
61
- called from anywhere in the training loop, loss function, or inside a
62
- `torch.nn.Module`.
63
-
64
- Warning: The current implementation expects the set of unique names to
65
- be consistent across processes. Please make sure that `report()` is
66
- called at least once for each unique name by each process, and in the
67
- same order. If a given process has no scalars to broadcast, it can do
68
- `report(name, [])` (empty list).
69
-
70
- Args:
71
- name: Arbitrary string specifying the name of the statistic.
72
- Averages are accumulated separately for each unique name.
73
- value: Arbitrary set of scalars. Can be a list, tuple,
74
- NumPy array, PyTorch tensor, or Python scalar.
75
-
76
- Returns:
77
- The same `value` that was passed in.
78
- """
79
- if name not in _counters:
80
- _counters[name] = dict()
81
-
82
- elems = torch.as_tensor(value)
83
- if elems.numel() == 0:
84
- return value
85
-
86
- elems = elems.detach().flatten().to(_reduce_dtype)
87
- moments = torch.stack([
88
- torch.ones_like(elems).sum(),
89
- elems.sum(),
90
- elems.square().sum(),
91
- ])
92
- assert moments.ndim == 1 and moments.shape[0] == _num_moments
93
- moments = moments.to(_counter_dtype)
94
-
95
- device = moments.device
96
- if device not in _counters[name]:
97
- _counters[name][device] = torch.zeros_like(moments)
98
- _counters[name][device].add_(moments)
99
- return value
100
-
101
- #----------------------------------------------------------------------------
102
-
103
- def report0(name, value):
104
- r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
105
- but ignores any scalars provided by the other processes.
106
- See `report()` for further details.
107
- """
108
- report(name, value if _rank == 0 else [])
109
- return value
110
-
111
- #----------------------------------------------------------------------------
112
-
113
- class Collector:
114
- r"""Collects the scalars broadcasted by `report()` and `report0()` and
115
- computes their long-term averages (mean and standard deviation) over
116
- user-defined periods of time.
117
-
118
- The averages are first collected into internal counters that are not
119
- directly visible to the user. They are then copied to the user-visible
120
- state as a result of calling `update()` and can then be queried using
121
- `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
122
- internal counters for the next round, so that the user-visible state
123
- effectively reflects averages collected between the last two calls to
124
- `update()`.
125
-
126
- Args:
127
- regex: Regular expression defining which statistics to
128
- collect. The default is to collect everything.
129
- keep_previous: Whether to retain the previous averages if no
130
- scalars were collected on a given round
131
- (default: True).
132
- """
133
- def __init__(self, regex='.*', keep_previous=True):
134
- self._regex = re.compile(regex)
135
- self._keep_previous = keep_previous
136
- self._cumulative = dict()
137
- self._moments = dict()
138
- self.update()
139
- self._moments.clear()
140
-
141
- def names(self):
142
- r"""Returns the names of all statistics broadcasted so far that
143
- match the regular expression specified at construction time.
144
- """
145
- return [name for name in _counters if self._regex.fullmatch(name)]
146
-
147
- def update(self):
148
- r"""Copies current values of the internal counters to the
149
- user-visible state and resets them for the next round.
150
-
151
- If `keep_previous=True` was specified at construction time, the
152
- operation is skipped for statistics that have received no scalars
153
- since the last update, retaining their previous averages.
154
-
155
- This method performs a number of GPU-to-CPU transfers and one
156
- `torch.distributed.all_reduce()`. It is intended to be called
157
- periodically in the main training loop, typically once every
158
- N training steps.
159
- """
160
- if not self._keep_previous:
161
- self._moments.clear()
162
- for name, cumulative in _sync(self.names()):
163
- if name not in self._cumulative:
164
- self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
165
- delta = cumulative - self._cumulative[name]
166
- self._cumulative[name].copy_(cumulative)
167
- if float(delta[0]) != 0:
168
- self._moments[name] = delta
169
-
170
- def _get_delta(self, name):
171
- r"""Returns the raw moments that were accumulated for the given
172
- statistic between the last two calls to `update()`, or zero if
173
- no scalars were collected.
174
- """
175
- assert self._regex.fullmatch(name)
176
- if name not in self._moments:
177
- self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
178
- return self._moments[name]
179
-
180
- def num(self, name):
181
- r"""Returns the number of scalars that were accumulated for the given
182
- statistic between the last two calls to `update()`, or zero if
183
- no scalars were collected.
184
- """
185
- delta = self._get_delta(name)
186
- return int(delta[0])
187
-
188
- def mean(self, name):
189
- r"""Returns the mean of the scalars that were accumulated for the
190
- given statistic between the last two calls to `update()`, or NaN if
191
- no scalars were collected.
192
- """
193
- delta = self._get_delta(name)
194
- if int(delta[0]) == 0:
195
- return float('nan')
196
- return float(delta[1] / delta[0])
197
-
198
- def std(self, name):
199
- r"""Returns the standard deviation of the scalars that were
200
- accumulated for the given statistic between the last two calls to
201
- `update()`, or NaN if no scalars were collected.
202
- """
203
- delta = self._get_delta(name)
204
- if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
205
- return float('nan')
206
- if int(delta[0]) == 1:
207
- return float(0)
208
- mean = float(delta[1] / delta[0])
209
- raw_var = float(delta[2] / delta[0])
210
- return np.sqrt(max(raw_var - np.square(mean), 0))
211
-
212
- def as_dict(self):
213
- r"""Returns the averages accumulated between the last two calls to
214
- `update()` as an `dnnlib.EasyDict`. The contents are as follows:
215
-
216
- dnnlib.EasyDict(
217
- NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
218
- ...
219
- )
220
- """
221
- stats = dnnlib.EasyDict()
222
- for name in self.names():
223
- stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
224
- return stats
225
-
226
- def __getitem__(self, name):
227
- r"""Convenience getter.
228
- `collector[name]` is a synonym for `collector.mean(name)`.
229
- """
230
- return self.mean(name)
231
-
232
- #----------------------------------------------------------------------------
233
-
234
- def _sync(names):
235
- r"""Synchronize the global cumulative counters across devices and
236
- processes. Called internally by `Collector.update()`.
237
- """
238
- if len(names) == 0:
239
- return []
240
- global _sync_called
241
- _sync_called = True
242
-
243
- # Collect deltas within current rank.
244
- deltas = []
245
- device = _sync_device if _sync_device is not None else torch.device('cpu')
246
- for name in names:
247
- delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
248
- for counter in _counters[name].values():
249
- delta.add_(counter.to(device))
250
- counter.copy_(torch.zeros_like(counter))
251
- deltas.append(delta)
252
- deltas = torch.stack(deltas)
253
-
254
- # Sum deltas across ranks.
255
- if _sync_device is not None:
256
- torch.distributed.all_reduce(deltas)
257
-
258
- # Update cumulative values.
259
- deltas = deltas.cpu()
260
- for idx, name in enumerate(names):
261
- if name not in _cumulative:
262
- _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
263
- _cumulative[name].add_(deltas[idx])
264
-
265
- # Return name-value pairs.
266
- return [(name, _cumulative[name]) for name in names]
267
-
268
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Duskfallcrew/Duskfallcrew-Osenayan_Mix/app.py DELETED
@@ -1,19 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/Duskfallcrew/Osenayan_Mix").launch()
4
-
5
- css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
6
- """
7
- with gr.Blocks(css=css) as demo:
8
- gr.HTML(
9
- f"""
10
- <div class="main-div">
11
- <div>
12
- <h1>Osenayan Mix</h1>
13
- </div>
14
- <p>
15
- Demo for <a href="https://huggingface.co/Duskfallcrew/Osenayan_Mix">Osenayan Mix</a> Stable Diffusion model. We stream a lot of our testing on <a href="https://www.twitch.tv/duskfallcrew"> Twitch </a>. Any chance you can spare a coffee or three? <a href="https://ko-fi.com/DUSKFALLcrew">Ko-Fi Anyone?</a>. Request image gens via our <a href="https://www.pixiv.net/en/users/70748346"> Pixiv</a>. Hang with us on discord: <a href="https://discord.gg/Da7s8d3KJ7"> Earth & Dusk Discord </a>. No tokens are required. <br>
16
-
17
- </div>
18
- """
19
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/PSG/OpenPSG/configs/_base_/schedules/schedule_1x.py DELETED
@@ -1,10 +0,0 @@
1
- # optimizer
2
- optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
3
- optimizer_config = dict(grad_clip=None)
4
- # learning policy
5
- lr_config = dict(policy='step',
6
- warmup='linear',
7
- warmup_iters=500,
8
- warmup_ratio=0.001,
9
- step=[8, 11])
10
- runner = dict(type='EpochBasedRunner', max_epochs=12)