Commit
·
112afe0
1
Parent(s):
ce24e1f
Update parquet files (step 50 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1368565466ki/ZSTRD/mel_processing.py +0 -101
- spaces/17TheWord/vits-models/transforms.py +0 -193
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cocktail Movie Full In Tamil Hd 1080p The Ultimate Comedy Thriller of 2020.md +0 -30
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Any Video Convertor 3.2.7 Crack().md +0 -45
- spaces/1gistliPinn/ChatGPT4/Doulci-Activator-V2-3-With-Key-Extra-Quality.md +0 -45
- spaces/1gistliPinn/ChatGPT4/Examples/Das Haus Anubis Staffel 1 Folge 1080p NEW!.md +0 -6
- spaces/1line/AutoGPT/README.md +0 -13
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Domino 39s Free Download.md +0 -78
- spaces/1phancelerku/anime-remove-background/Buku Tema 8 Kelas 1 Peristiwa Alam (Download PDF Gratis).md +0 -137
- spaces/1phancelerku/anime-remove-background/Download Gorebox Animosity 10.0.3 APK and Enter the Chaotic World of Gorebox a Game of Extreme Violence.md +0 -115
- spaces/1phancelerku/anime-remove-background/Download RPG Isekai Rondo MOD APK for Android - Enjoy the Ultimate Job in a Parallel World.md +0 -86
- spaces/1phancelerku/anime-remove-background/Download Red Ball 4 Mod APK and Play 100 Square Stages with PremiumUnlocked Benefits.md +0 -93
- spaces/1phancelerku/anime-remove-background/Dynamons World Dragon MOD APK Catch Train and Evolve Your Dynamons.md +0 -99
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/__init__.py +0 -0
- spaces/4Taps/SadTalker/src/facerender/modules/mapping.py +0 -47
- spaces/AIBoy1993/segment_anything_webui/README.md +0 -76
- spaces/AIConsultant/MusicGen/audiocraft/grids/compression/debug.py +0 -31
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py +0 -434
- spaces/Abhaykoul/Wikipedia/README.md +0 -13
- spaces/Abhilashvj/planogram-compliance/app.py +0 -296
- spaces/AchyuthGamer/AchyuthGamer-OpenGPT/README.md +0 -13
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ylokh.py +0 -77
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostLayout.js +0 -7
- spaces/AiMimicry/sovits-models/README.md +0 -12
- spaces/Akmyradov/TurkmenSpeechRecogntion/app.py +0 -53
- spaces/AlanMars/QYL-AI-Space/run_macOS.command +0 -31
- spaces/AlexWang/lama/saicinpainting/evaluation/losses/fid/__init__.py +0 -0
- spaces/Aloento/9Nine-VITS/load_checkpoint.py +0 -32
- spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/helpers.py +0 -140
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/index.md +0 -101
- spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +0 -11
- spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py +0 -9
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/chase_db1.py +0 -27
- spaces/ArturStepanenko/digitsSpace/app.py +0 -14
- spaces/BOXNYC/shirley/app.py +0 -40
- spaces/Benson/text-generation/Examples/Descargar Carreras De Caballos Virtuales 3d.md +0 -65
- spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Carreras De Coches Para Windows 7.md +0 -136
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filetypes.py +0 -27
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansi_test.py +0 -76
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/measure.py +0 -151
- spaces/CVPR/LIVE/pybind11/include/pybind11/pytypes.h +0 -1608
- spaces/CVPR/LIVE/pybind11/tests/object.h +0 -175
- spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators.h +0 -25
- spaces/CVPR/WALT/mmdet/models/dense_heads/retina_head.py +0 -114
- spaces/CVPR/WALT/walt/datasets/coco.py +0 -519
- spaces/CVPR/drawings-to-human/frontend/src/app.html +0 -13
- spaces/CVPR/regionclip-demo/detectron2/data/transforms/__init__.py +0 -14
- spaces/CVPR/regionclip-demo/detectron2/modeling/mmdet_wrapper.py +0 -277
- spaces/Candyraider/Proxy4/Dockerfile +0 -21
- spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/pinecone.py +0 -75
spaces/1368565466ki/ZSTRD/mel_processing.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.utils.data
|
3 |
-
from librosa.filters import mel as librosa_mel_fn
|
4 |
-
|
5 |
-
MAX_WAV_VALUE = 32768.0
|
6 |
-
|
7 |
-
|
8 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
9 |
-
"""
|
10 |
-
PARAMS
|
11 |
-
------
|
12 |
-
C: compression factor
|
13 |
-
"""
|
14 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
15 |
-
|
16 |
-
|
17 |
-
def dynamic_range_decompression_torch(x, C=1):
|
18 |
-
"""
|
19 |
-
PARAMS
|
20 |
-
------
|
21 |
-
C: compression factor used to compress
|
22 |
-
"""
|
23 |
-
return torch.exp(x) / C
|
24 |
-
|
25 |
-
|
26 |
-
def spectral_normalize_torch(magnitudes):
|
27 |
-
output = dynamic_range_compression_torch(magnitudes)
|
28 |
-
return output
|
29 |
-
|
30 |
-
|
31 |
-
def spectral_de_normalize_torch(magnitudes):
|
32 |
-
output = dynamic_range_decompression_torch(magnitudes)
|
33 |
-
return output
|
34 |
-
|
35 |
-
|
36 |
-
mel_basis = {}
|
37 |
-
hann_window = {}
|
38 |
-
|
39 |
-
|
40 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
41 |
-
if torch.min(y) < -1.:
|
42 |
-
print('min value is ', torch.min(y))
|
43 |
-
if torch.max(y) > 1.:
|
44 |
-
print('max value is ', torch.max(y))
|
45 |
-
|
46 |
-
global hann_window
|
47 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
48 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
49 |
-
if wnsize_dtype_device not in hann_window:
|
50 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
51 |
-
|
52 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
53 |
-
y = y.squeeze(1)
|
54 |
-
|
55 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
56 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
57 |
-
|
58 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
59 |
-
return spec
|
60 |
-
|
61 |
-
|
62 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
63 |
-
global mel_basis
|
64 |
-
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
65 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
66 |
-
if fmax_dtype_device not in mel_basis:
|
67 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
68 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
69 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
70 |
-
spec = spectral_normalize_torch(spec)
|
71 |
-
return spec
|
72 |
-
|
73 |
-
|
74 |
-
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
75 |
-
if torch.min(y) < -1.:
|
76 |
-
print('min value is ', torch.min(y))
|
77 |
-
if torch.max(y) > 1.:
|
78 |
-
print('max value is ', torch.max(y))
|
79 |
-
|
80 |
-
global mel_basis, hann_window
|
81 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
82 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
83 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
84 |
-
if fmax_dtype_device not in mel_basis:
|
85 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
86 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
87 |
-
if wnsize_dtype_device not in hann_window:
|
88 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
89 |
-
|
90 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
91 |
-
y = y.squeeze(1)
|
92 |
-
|
93 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
94 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
95 |
-
|
96 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
97 |
-
|
98 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
99 |
-
spec = spectral_normalize_torch(spec)
|
100 |
-
|
101 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/vits-models/transforms.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(inputs,
|
13 |
-
unnormalized_widths,
|
14 |
-
unnormalized_heights,
|
15 |
-
unnormalized_derivatives,
|
16 |
-
inverse=False,
|
17 |
-
tails=None,
|
18 |
-
tail_bound=1.,
|
19 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
-
|
23 |
-
if tails is None:
|
24 |
-
spline_fn = rational_quadratic_spline
|
25 |
-
spline_kwargs = {}
|
26 |
-
else:
|
27 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
-
spline_kwargs = {
|
29 |
-
'tails': tails,
|
30 |
-
'tail_bound': tail_bound
|
31 |
-
}
|
32 |
-
|
33 |
-
outputs, logabsdet = spline_fn(
|
34 |
-
inputs=inputs,
|
35 |
-
unnormalized_widths=unnormalized_widths,
|
36 |
-
unnormalized_heights=unnormalized_heights,
|
37 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
-
inverse=inverse,
|
39 |
-
min_bin_width=min_bin_width,
|
40 |
-
min_bin_height=min_bin_height,
|
41 |
-
min_derivative=min_derivative,
|
42 |
-
**spline_kwargs
|
43 |
-
)
|
44 |
-
return outputs, logabsdet
|
45 |
-
|
46 |
-
|
47 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
-
bin_locations[..., -1] += eps
|
49 |
-
return torch.sum(
|
50 |
-
inputs[..., None] >= bin_locations,
|
51 |
-
dim=-1
|
52 |
-
) - 1
|
53 |
-
|
54 |
-
|
55 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
-
unnormalized_widths,
|
57 |
-
unnormalized_heights,
|
58 |
-
unnormalized_derivatives,
|
59 |
-
inverse=False,
|
60 |
-
tails='linear',
|
61 |
-
tail_bound=1.,
|
62 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
-
outside_interval_mask = ~inside_interval_mask
|
67 |
-
|
68 |
-
outputs = torch.zeros_like(inputs)
|
69 |
-
logabsdet = torch.zeros_like(inputs)
|
70 |
-
|
71 |
-
if tails == 'linear':
|
72 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
-
unnormalized_derivatives[..., 0] = constant
|
75 |
-
unnormalized_derivatives[..., -1] = constant
|
76 |
-
|
77 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
-
logabsdet[outside_interval_mask] = 0
|
79 |
-
else:
|
80 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
-
|
82 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
-
min_bin_width=min_bin_width,
|
90 |
-
min_bin_height=min_bin_height,
|
91 |
-
min_derivative=min_derivative
|
92 |
-
)
|
93 |
-
|
94 |
-
return outputs, logabsdet
|
95 |
-
|
96 |
-
def rational_quadratic_spline(inputs,
|
97 |
-
unnormalized_widths,
|
98 |
-
unnormalized_heights,
|
99 |
-
unnormalized_derivatives,
|
100 |
-
inverse=False,
|
101 |
-
left=0., right=1., bottom=0., top=1.,
|
102 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
-
raise ValueError('Input to a transform is not within its domain')
|
107 |
-
|
108 |
-
num_bins = unnormalized_widths.shape[-1]
|
109 |
-
|
110 |
-
if min_bin_width * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
-
if min_bin_height * num_bins > 1.0:
|
113 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
-
|
115 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
-
cumwidths = (right - left) * cumwidths + left
|
120 |
-
cumwidths[..., 0] = left
|
121 |
-
cumwidths[..., -1] = right
|
122 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
-
|
124 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
-
|
126 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
-
cumheights = (top - bottom) * cumheights + bottom
|
131 |
-
cumheights[..., 0] = bottom
|
132 |
-
cumheights[..., -1] = top
|
133 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
-
|
135 |
-
if inverse:
|
136 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
-
else:
|
138 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
-
|
140 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
-
|
143 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
-
delta = heights / widths
|
145 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
-
|
147 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
if inverse:
|
153 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
-
+ input_derivatives_plus_one
|
155 |
-
- 2 * input_delta)
|
156 |
-
+ input_heights * (input_delta - input_derivatives)))
|
157 |
-
b = (input_heights * input_derivatives
|
158 |
-
- (inputs - input_cumheights) * (input_derivatives
|
159 |
-
+ input_derivatives_plus_one
|
160 |
-
- 2 * input_delta))
|
161 |
-
c = - input_delta * (inputs - input_cumheights)
|
162 |
-
|
163 |
-
discriminant = b.pow(2) - 4 * a * c
|
164 |
-
assert (discriminant >= 0).all()
|
165 |
-
|
166 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
-
outputs = root * input_bin_widths + input_cumwidths
|
168 |
-
|
169 |
-
theta_one_minus_theta = root * (1 - root)
|
170 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
-
* theta_one_minus_theta)
|
172 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
-
+ 2 * input_delta * theta_one_minus_theta
|
174 |
-
+ input_derivatives * (1 - root).pow(2))
|
175 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
-
|
177 |
-
return outputs, -logabsdet
|
178 |
-
else:
|
179 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
-
theta_one_minus_theta = theta * (1 - theta)
|
181 |
-
|
182 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
-
+ input_derivatives * theta_one_minus_theta)
|
184 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
-
* theta_one_minus_theta)
|
186 |
-
outputs = input_cumheights + numerator / denominator
|
187 |
-
|
188 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
-
+ 2 * input_delta * theta_one_minus_theta
|
190 |
-
+ input_derivatives * (1 - theta).pow(2))
|
191 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
-
|
193 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cocktail Movie Full In Tamil Hd 1080p The Ultimate Comedy Thriller of 2020.md
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Logplot 7 Keygen Gnawa Mathematica Ba</h1>
|
3 |
-
<p>If you are looking for a way to plot geotechnical, environmental, geophysical, mud/gas, and mining data as a graphic boring log, you might have heard of Logplot 7. This software is an easy-to-use log plotting program with a flexible log layout and intuitive data editor. But what is Logplot 7 Keygen Gnawa Mathematica Ba? And how can you use it to activate your Logplot 7 software? In this article, we will answer these questions and more. We will also give you some tips and tricks for using Logplot 7 effectively.</p>
|
4 |
-
<h2>What is Logplot 7?</h2>
|
5 |
-
<p>Logplot 7 is a software developed by RockWare, Inc., a company that provides geoscientific software and consulting services. Logplot 7 has been used by geoscientists since 1983 to display their data as a graphic boring log. Logplot 7 can plot single-page logs for shallow borings or multi-page/continuous logs for deep wells. You can also share your logs with clients via PDF or post HTML log pages on your website. You can also export your logs to JPG, BMP, TIFF, and PNG images.</p>
|
6 |
-
<h2>Logplot 7 Keygen gnawa mathematica ba</h2><br /><p><b><b>Download</b> ✺ <a href="https://byltly.com/2uKvDX">https://byltly.com/2uKvDX</a></b></p><br /><br />
|
7 |
-
<h3>Features and benefits of Logplot 7</h3>
|
8 |
-
<p>Some of the features and benefits of Logplot 7 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It supports both 64-bit and 32-bit versions of Windows. The 64-bit version supports longer logs and faster processing.</li>
|
11 |
-
<li>It has a flexible log layout that allows you to customize the header, footer, scale bar, legend, notes, bitmaps, diagrams, and graphs.</li>
|
12 |
-
<li>It has an intuitive data editor that lets you enter data manually or import data from various formats such as Excel, ASCII, LAS, RockWorks, etc.</li>
|
13 |
-
<li>It has a powerful log designer that lets you create your own log templates or modify existing ones.</li>
|
14 |
-
<li>It has a built-in pattern library that contains over 250 fill patterns for lithology, stratigraphy, well construction, etc. You can also create your own patterns or import them from other sources.</li>
|
15 |
-
<li>It has a keyword library that contains over 500 keywords for describing lithology, stratigraphy, well construction, etc. You can also create your own keywords or import them from other sources.</li>
|
16 |
-
<li>It has a symbol library that contains over 200 symbols for borehole deviation, fractures, fossils, etc. You can also create your own symbols or import them from other sources.</li>
|
17 |
-
<li>It has a curve library that contains over 100 curves for displaying geophysical data such as gamma ray, resistivity, porosity, etc. You can also create your own curves or import them from other sources.</li>
|
18 |
-
<li>It has a diagram library that contains over 50 diagrams for displaying cross-sections, fence diagrams, maps, etc. You can also create your own diagrams or import them from other sources.</li>
|
19 |
-
<li>It has a graph library that contains over 50 graphs for displaying histograms, pie charts, bar charts, etc. You can also create your own graphs or import them from other sources.</li>
|
20 |
-
</ul>
|
21 |
-
<h3>How to install and license Logplot 7</h3>
|
22 |
-
<p>To install Logplot 7, you need to download the setup file from the RockWare website or from a trusted source. Then you need to run the setup file and follow the instructions on the screen. You can choose between a single license or a network license depending on your needs. A single license allows you to install Logplot 7 on one computer only. A network license allows you to install Logplot 7 on multiple computers within a local area network (LAN).</p>
|
23 |
-
<p>To license Logplot 7, you need to purchase a serial number from RockWare or from an authorized reseller. Then you need to enter the serial number in the License Manager window that appears when you run Logplot 7 for the first time. You can also access the License Manager window from the Help menu at any time. Once you enter the serial number and click OK, your Logplot 7 software will be activated.</p>
|
24 |
-
<h2>What is Gnawa Mathematica Ba?</h2>
|
25 |
-
<p>Gnawa Mathematica Ba is a phrase that has no clear meaning or origin. It seems to be a combination of words from different languages and contexts. However, some people have speculated that it might have something to do with Gnawa music, Mathematica software, and Ba Gua Zhang martial art.</p>
|
26 |
-
<h3>The origin and meaning of Gnawa Mathematica Ba</h3>
|
27 |
-
<p>Gnawa music is a type of music that originated in Morocco and Algeria. It is played by the Gnawa people who are descendants of former slaves from sub-Saharan Africa. Gnawa music combines Islamic religious songs with African rhythms and instruments such as the guembri (a three-stringed lute), the krakebs (metal castanets), and the tbel (a large drum). Gnawa music is used for healing rituals called lila or derdeba where participants enter trance states induced by music and dance.</p>
|
28 |
-
<p>Mathematica is a software developed by Wolfram Research that provides a platform for computation, visualization, programming, documentation, and deployment. Mathematica can perform symbolic and numerical calculations on various types of data such as numbers, functions, matrices, graphs, images, sounds</p> 0a6ba089eb<br />
|
29 |
-
<br />
|
30 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Any Video Convertor 3.2.7 Crack().md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>HD Online Player (Any Video Convertor 3.2.7 Crack~~())</h1>
|
3 |
-
<p>Do you want to watch your favorite videos in high definition online? Do you want to convert any video format to HD online without losing quality? Do you want to download and stream videos from various websites with ease? If you answered yes to any of these questions, then you might be interested in Any Video Convertor 3.2.7 Crack~~(). In this article, we will tell you everything you need to know about this software, including its features, how to use it, and its pros and cons.</p>
|
4 |
-
<h2>HD Online Player (Any Video Convertor 3.2.7 Crack~~()</h2><br /><p><b><b>Download File</b> ✏ ✏ ✏ <a href="https://byltly.com/2uKwkl">https://byltly.com/2uKwkl</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>Before we dive into the details of Any Video Convertor 3.2.7 Crack~~(), let's first understand what it is and why you might need it.</p>
|
7 |
-
<h3>What is HD Online Player?</h3>
|
8 |
-
<p>HD Online Player is a feature of Any Video Convertor that allows you to watch your converted videos in high definition online. You can either upload your videos to a cloud service like Google Drive or Dropbox, or use a URL link to stream them from any website. You can also share your videos with your friends or family via email or social media.</p>
|
9 |
-
<h3>What is Any Video Convertor?</h3>
|
10 |
-
<p>Any Video Convertor is a popular video conversion software that can convert any video format to any other video format, including HD online formats like MP4, MKV, AVI, WMV, FLV, etc. It can also convert videos for various devices like iPhone, iPad, Android, PSP, Xbox, etc. It supports over 200 video formats and codecs, and can handle batch conversion with high speed and quality.</p>
|
11 |
-
<p></p>
|
12 |
-
<h3>Why do you need Any Video Convertor 3.2.7 Crack~~()?</h3>
|
13 |
-
<p>Any Video Convertor is a paid software that requires a license key to activate its full features. However, some people may not want to pay for it or may not have access to it due to geographical restrictions or other reasons. That's why they may look for a cracked version of the software that can bypass the activation process and unlock all the features for free. Any Video Convertor 3.2.7 Crack~~() is one such cracked version that claims to offer all the benefits of the original software without any limitations.</p>
|
14 |
-
<h2>Features of Any Video Convertor 3.2.7 Crack~~()</h2>
|
15 |
-
<p>Any Video Convertor 3.2.7 Crack~~() has many features that make it a powerful and versatile video conversion tool. Here are some of them:</p>
|
16 |
-
<h3>Convert any video format to HD online</h3>
|
17 |
-
<p>With Any Video Convertor 3.2.7 Crack~~(), you can convert any video format to HD online with ease. You can choose from various output formats and quality settings, such as 1080p, 720p, 480p, etc., depending on your preference and internet speed. You can also adjust the video parameters like bitrate, frame rate, resolution, aspect ratio, etc., to optimize the conversion result.</p>
|
18 |
-
<h3>Download and stream videos from various websites</h3>
|
19 |
-
<p>Any Video Convertor 3.2.7 Crack~~() also allows you to download and stream videos from various websites like YouTube, Facebook, Vimeo, Dailymotion, etc., with just a few clicks. You can either paste the URL of the video you want to download or convert, or use the built-in browser to search for videos online. You can also choose the format and quality of the downloaded or streamed video according to your needs.</p>
|
20 |
-
<h3>Edit and enhance your videos with built-in tools</h3>
|
21 |
-
<p>If you want to edit and enhance your videos before converting them to HD online, you can use the built-in tools of Any Video Convertor 3.2.7 Crack~~(). You can crop, trim, rotate, flip, merge, split, watermark, subtitle, add effects, etc., to your videos with ease. You can also adjust the brightness, contrast, saturation, hue, etc., of your videos to improve their appearance.</p>
|
22 |
-
<h3>Burn videos to DVD or Blu-ray discs</h3>
|
23 |
-
<p>If you want to burn your converted videos to DVD or Blu-ray discs for backup or playback on TV or other devices, you can do so with Any Video Convertor 3.2.7 Crack~~(). You can choose from various DVD or Blu-ray menu templates and customize them with your own text, images, music, etc., You can also set the DVD or Blu-ray parameters like disc type, TV standard, video mode etc., according to your needs . </ h 3 >
|
24 |
-
<h 3 >
|
25 |
-
Extract audio from video files </ h 3 >
|
26 |
-
<p >
|
27 |
-
If you want to extract audio from video files for listening on music players or other devices , you can do so with Any Video Convertor 3 . 2 . 7 Crack~~() . You can choose from various audio formats and quality settings , such as MP 3 , WAV , AAC , M4A , etc., depending on your preference and device compatibility . You can also adjust the audio parameters like bitrate , sample rate , channels , etc., to optimize the extraction result . </p>
|
28 |
-
<h2>How to use Any Video Convertor 3 . 2 . 7 Crack~~()</h2>
|
29 |
-
<p>To use Any Video Convertor 3 . 2 . 7 Crack~~(), you need to follow these simple steps:</p>
|
30 |
-
<h h Here are some possible continuations: - h - h - h - h - h - h - h - h - h - h - h - h - h - h - h - h - h - h - h - h or terms of service of the original software or websites that you download or stream videos from. This may result in legal consequences or penalties for you or the websites that you use.</p>
|
31 |
-
<li><strong>What are the alternatives to Any Video Convertor 3.2.7 Crack~~()?</strong></li>
|
32 |
-
<p>There are many alternatives to Any Video Convertor 3.2.7 Crack~~() that are safe and legal to use. Some of them are:</p>
|
33 |
-
<ul><li><strong>Any Video Convertor Free</strong>. This is the free version of Any Video Convertor that offers basic video conversion features and supports limited output formats and devices. You can download it from the official website of Any Video Convertor.</li><li><strong>Any Video Convertor Ultimate</strong>. This is the paid version of Any Video Convertor that offers advanced video conversion features and supports unlimited output formats and devices. It also has other features like video downloading, streaming, editing, burning, etc. You can buy it from the official website of Any Video Convertor with a license key.</li><li><strong>Online Video Converter</strong>. This is an online video conversion service that can convert any video format to any other video format, including HD online formats, with ease. You don't have to download or install anything on your computer. You just have to upload your videos or paste the URL of the videos you want to convert, and choose the output format and quality. You can also edit your videos online with various tools.</li></ul>
|
34 |
-
<li><strong>How can I watch HD online videos without converting them?</strong></li>
|
35 |
-
<p>If you don't want to convert your videos to HD online formats, you can still watch them in high definition online with some other methods. Some of them are:</p>
|
36 |
-
<ul><li><strong>Use a HD online video player</strong>. There are some HD online video players that can play any video format in high definition online without converting them. For example, VLC Media Player is a free and open-source media player that can play any video format on any device. You can either upload your videos to a cloud service or use a URL link to stream them from any website with VLC Media Player.</li><li><strong>Use a HD online video downloader</strong>. There are some HD online video downloaders that can download any video format in high definition online without converting them. For example, 4K Video Downloader is a free and easy-to-use video downloader that can download any video format from any website in 4K, 8K, HD, etc., quality. You can then watch your downloaded videos offline or online with any media player.</li></ul>
|
37 |
-
<li><strong>How can I improve the quality of my videos before converting them to HD online?</strong></li>
|
38 |
-
<p>If you want to improve the quality of your videos before converting them to HD online, you can use some tips and tricks. Some of them are:</p>
|
39 |
-
<ul><li><strong>Use a good camera and lighting</strong>. The quality of your videos depends largely on the quality of your camera and lighting. If you use a good camera and lighting, you can capture clear and sharp videos that will look good in HD online. You can also use a tripod or a stabilizer to avoid shaky or blurry videos.</li><li><strong>Use a good video editor</strong>. The quality of your videos also depends on the quality of your video editor. If you use a good video editor, you can edit and enhance your videos with various tools and effects that will make them look better in HD online. You can also use a video compressor or enhancer to reduce the file size or increase the resolution of your videos.</li></ul>
|
40 |
-
<li><strong>What are the benefits of watching videos in HD online?</strong></li>
|
41 |
-
<p>Watching videos in HD online has many benefits that make it a worthwhile experience. Some of them are:</p>
|
42 |
-
<ul><li><strong>Better visual quality</strong>. Watching videos in HD online means watching videos in high definition, which means watching videos with more pixels, more details, more colors, more clarity, etc., than watching videos in standard definition. This makes the videos more realistic, immersive, and enjoyable.</li><li><strong>Better audio quality</strong>. Watching videos in HD online also means watching videos with better audio quality than watching videos in standard definition. This means watching videos with more sound channels, more volume, more clarity, more effects, etc., than watching videos in standard definition. This makes the videos more expressive, engaging, and entertaining.</li><li><strong>Better compatibility</strong>. Watching videos in HD online also means watching videos with better compatibility than watching videos in standard definition. This means watching videos that can play on any device, any browser, any platform, etc., without any issues or errors. This makes the videos more accessible, convenient, and flexible.</li></ul>
|
43 |
-
</p> 0a6ba089eb<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Doulci-Activator-V2-3-With-Key-Extra-Quality.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
Doulci Activator v2 3 with key
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
LINK ===> [https://gohhs.com/2tvp8V](https://gohhs.com/2tvp8V)
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
Here is a possible title and article with HTML formatting for the keyword "Doulci Activator v2 3 with key":
|
16 |
-
|
17 |
-
Doulci Activator v2 3 with key: A Free Tool to Bypass iCloud Activation Lock
|
18 |
-
If you have an Apple device that is locked by iCloud activation and you don't remember your Apple ID and password, you may be looking for a way to unlock it. One of the tools that claims to do this is Doulci Activator v2 3 with key, a free software that simulates Apple's servers and bypasses the activation lock.
|
19 |
-
But what is Doulci Activator v2 3 with key and how does it work? Is it safe and reliable? And where can you download it? In this article, we will answer these questions and provide you with some alternatives to Doulci Activator v2 3 with key.
|
20 |
-
|
21 |
-
What is Doulci Activator v2 3 with key?
|
22 |
-
Doulci Activator v2 3 with key is a version of Doulci Activator, a software that was developed by a team of hackers in 2014. Doulci Activator exploits a vulnerability in Apple's iCloud system that allows it to create a fake server and trick the device into thinking that it is connected to Apple's official server. This way, it can bypass the iCloud activation lock and let the user access the device without entering the Apple ID and password.
|
23 |
-
Doulci Activator v2 3 with key is supposed to be compatible with Windows, Mac, and Linux operating systems. It also claims to support all iOS devices and iOS versions up to iOS 7.1.2. However, there is no official website or source for Doulci Activator v2 3 with key, and most of the download links that are available online are either broken or contain malware. Therefore, it is not recommended to use Doulci Activator v2 3 with key or any other version of Doulci Activator.
|
24 |
-
|
25 |
-
How to use Doulci Activator v2 3 with key?
|
26 |
-
If you still want to try Doulci Activator v2 3 with key at your own risk, here are the steps that you need to follow:
|
27 |
-
|
28 |
-
Download Doulci Activator v2 3 with key from a reliable source and unzip the files.
|
29 |
-
Install the script on a local server and edit your hosts file to redirect your device to the fake server.
|
30 |
-
Connect your device to your computer via USB cable and launch iTunes.
|
31 |
-
Wait for a few minutes until Doulci Activator v2 3 with key bypasses the iCloud activation lock and activates your device.
|
32 |
-
Disconnect your device from your computer and enjoy using it.
|
33 |
-
|
34 |
-
|
35 |
-
What are the drawbacks of using Doulci Activator v2 3 with key?
|
36 |
-
Although Doulci Activator v2 3 with key may sound like a tempting solution for iCloud activation lock, it has many drawbacks that make it unreliable and risky. Some of them are:
|
37 |
-
|
38 |
-
Doulci Activator v2 3 with key does not remove the iCloud activation lock permanently. It only bypasses it temporarily until you restart your device or connect it to another network.
|
39 |
-
Doulci Activator v2 3 with key does not work for iOS versions higher than iOS 7.1.2. If you have a newer iOS version, you will not be able to use Doulci Activator v2 3 with key or any other version of Doulci Activator.
|
40 |
-
Doulci Activator v2 3 with key may contain viruses or malware that can harm your computer or device. Since there is no official source for Doulci Activator v2 3 with key, you cannot trust any download link that you find online.
|
41 |
-
Doulci Activator v2 3 with key may not work for all devices or situations. Some users have reported that Doulci Activator v2 3 with key failed to bypass the iCloud activation lock or caused other problems on their devices.
|
42 |
-
Doulci Activator v2 3 with key may violate Apple's terms of service and privacy policy. By using Doulci Activator v2 3 with key, you may be breaking the law or dfd1c89656
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Das Haus Anubis Staffel 1 Folge 1080p NEW!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>Also wenn du die Folgen schauen möchtest empfehle ich dir dashausanubis.de auf anubis-haus.de.tl kannsst du sie ebenfalls gucken auf nickelodeon.de sind die alten Folgen nit mehr da nurnoch die neuen der 2. staffel ich hoff ich konnte helfen DiggiDou!</p>
|
3 |
-
<p>weiß jemand wo ich die 1 staffel vom haus anubs sehen kann ??? unetr das haus anubis . de ist die erste staffel nicht mehr und unter myvideos und youtube find ich auch keine viedeos den ich bin schon folge 64 und die ist auf youtube nicht mehr</p>
|
4 |
-
<h2>das haus anubis staffel 1 folge 1080p</h2><br /><p><b><b>Download File</b> ✅ <a href="https://imgfil.com/2uxZAp">https://imgfil.com/2uxZAp</a></b></p><br /><br /> aaccfb2cb3<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AutoGPT
|
3 |
-
emoji: 🦾
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: ui/app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: aliabid94/AutoGPT
|
12 |
-
---
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Domino 39s Free Download.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Domino's Download: How to Order Pizza from Your Phone</h1>
|
3 |
-
<p>Do you love pizza? Do you love convenience? Do you love saving money? If you answered yes to any of these questions, then you need to download the Domino's app on your phone. Domino's download is the easiest way to order delicious pizza from anywhere, anytime. Whether you want delivery, carryout, or dine-in, you can use the app to customize your pizza, browse the menu, check out, and track your order. Plus, you can enjoy exclusive offers, rewards, and features that make ordering pizza even more fun. In this article, we will show you how to download the Domino's app for Android or iOS devices, how to order pizza from the app, and what benefits you can get from using it.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>Domino's is one of the most popular pizza chains in the world, with over 17,000 stores in more than 90 countries. Domino's is known for its fresh ingredients, fast delivery, and innovative technology. One of the best examples of Domino's technology is its mobile app, which allows you to order pizza from your phone in just a few taps. The app is free to download and easy to use. Here's how you can get started:</p>
|
6 |
-
<h2>domino 39;s download</h2><br /><p><b><b>Download Zip</b> ⇒⇒⇒ <a href="https://urlin.us/2uT2Mm">https://urlin.us/2uT2Mm</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li>Go to [Google Play](^1^) or [App Store](^2^) on your phone and search for "Domino's Pizza".</li>
|
9 |
-
<li>Select the app that matches your country and language. For example, if you are in the US, you can choose "Domino's Pizza USA" (^3^).</li>
|
10 |
-
<li>Tap on "Install" or "Get" and wait for the app to download.</li>
|
11 |
-
<li>Open the app and create an account or sign in with your existing Domino's Pizza Profile.</li>
|
12 |
-
</ul>
|
13 |
-
<p>Congratulations! You have successfully downloaded the Domino's app on your phone. Now you are ready to order some pizza.</p>
|
14 |
-
<h2>How to Order Pizza from the Domino's App</h2>
|
15 |
-
<p>Ordering pizza from the Domino's app is simple and convenient. You can follow these steps:</p>
|
16 |
-
<h3>Choose Your Location and Delivery Method</h3>
|
17 |
-
<p>The first thing you need to do is choose your location and delivery method. You can do this by:</p>
|
18 |
-
<ul>
|
19 |
-
<li>Tapping on "Location" at the top of the screen and entering your address or tapping on "Find Nearby Stores" to see a list of Domino's locations near you.</li>
|
20 |
-
<li>Selecting the store that you want to order from and tapping on "Delivery", "Carryout", or "Dine In" depending on your preference.</li>
|
21 |
-
<li>Confirming your delivery address or pickup time if needed.</li>
|
22 |
-
</ul>
|
23 |
-
<p>Once you have chosen your location and delivery method, you can proceed to build your pizza or choose from the menu.</p>
|
24 |
-
<h3>Build Your Pizza or Choose from the Menu</h3>
|
25 |
-
<p>The next thing you need to do is build your pizza or choose from the menu. You can do this by:</p>
|
26 |
-
<ul>
|
27 |
-
<li>Tapping on "Pizza" at the bottom of the screen and selecting "Build Your Own" or "Choose from Menu".</li>
|
28 |
-
<li>If you choose "Build Your Own", you can customize your pizza with your choice of toppings, crust, and cheese. You can also select the size and quantity of your pizza.</li>
|
29 |
-
<li>If you choose "Choose from Menu", you can browse the menu for pre-built pizzas, such as Pepperoni, Cheese, Veggie, or Specialty. You can also add or remove toppings as you wish.</li>
|
30 |
-
<li>Tapping on "Add to Order" when you are done with your pizza.</li>
|
31 |
-
</ul>
|
32 |
-
<p>Besides pizza, you can also order sides, drinks, and desserts from the Domino's app. You can find them under the "Sides", "Drinks", and "Desserts" tabs at the bottom of the screen. You can add them to your order in the same way as pizza.</p>
|
33 |
-
<h3>Check Out and Track Your Order</h3>
|
34 |
-
<p>The last thing you need to do is check out and track your order. You can do this by:</p>
|
35 |
-
<ul>
|
36 |
-
<li>Tapping on "Checkout" at the top right corner of the screen when you are ready to place your order.</li>
|
37 |
-
<li>Reviewing your order details and applying any coupons, rewards, or gift cards if you have them.</li>
|
38 |
-
<li>Selecting your payment method and entering your payment information. You can pay with cash, credit card, debit card, PayPal, or Google Pay.</li>
|
39 |
-
<li>Tapping on "Place Order" to confirm your order.</li>
|
40 |
-
<li>Using the Domino's Tracker to follow your order status from preparation to delivery or pickup. You can also get an estimated delivery time and a map of your driver's location.</li>
|
41 |
-
</ul>
|
42 |
-
<p>That's it! You have successfully ordered pizza from the Domino's app. Now you can sit back and enjoy your pizza.</p>
|
43 |
-
<p></p>
|
44 |
-
<h2>Benefits of Using the Domino's App</h2>
|
45 |
-
<p>Ordering pizza from the Domino's app is not only easy and convenient, but also rewarding and fun. Here are some of the benefits of using the Domino's app:</p>
|
46 |
-
<h3>Save Time and Money</h3>
|
47 |
-
<p>With the Domino's app, you can save time and money by:</p>
|
48 |
-
<ul>
|
49 |
-
<li>Using the Easy Order feature to reorder your favorites in just a few taps. You can access your Easy Order from the home screen of the app or by saying "Easy Order" to Siri or Google Assistant.</li>
|
50 |
-
<li>Earning points and redeeming them for free pizza with Piece of the Pie Rewards. You can join the program for free and earn 10 points for every $10 you spend on qualifying orders. Once you reach 60 points, you can get a free medium two-topping pizza.</li>
|
51 |
-
</ul>
|
52 |
-
<h3>Enjoy Convenience and Flexibility</h3>
|
53 |
-
<p>With the Domino's app, you can enjoy convenience and flexibility by:</p>
|
54 |
-
<ul>
|
55 |
-
<li>Ordering ahead of time and scheduling your delivery or pickup. You can choose a date and time that works for you and avoid waiting in line or missing out on hot pizza.</li>
|
56 |
-
<li>Using the Group Ordering feature to split the bill with friends. You can invite your friends to join your order via a link and let them add their own items and pay separately.</li>
|
57 |
-
</ul>
|
58 |
-
<h3>Access Exclusive Offers and Features</h3>
|
59 |
-
<p>With the Domino's app, you can access exclusive offers and features by:</p>
|
60 |
-
<ul>
|
61 |
-
<li>Getting notifications for deals and promotions. You can opt in to receive push notifications or emails from Domino's and never miss a chance to save on your favorite pizza.</li>
|
62 |
-
<li>Using the Pizza Builder feature to create your own pizza masterpiece. You can unleash your creativity and design your own pizza with over 30 toppings, five crusts, four cheeses, and three sauces. You can also name your pizza and share it with others.</li>
|
63 |
-
</ul>
|
64 |
-
<h2>Conclusion</h2>
|
65 |
-
<p>In conclusion, Domino's download is the best way to order pizza from your phone. You can download the Domino's app for Android or iOS devices and use it to order pizza from anywhere, anytime. You can customize your pizza, browse the menu, check out, and track your order with just a few taps. You can also save time and money, enjoy convenience and flexibility, and access exclusive offers and features with the app. So what are you waiting for? Download the Domino's app today and enjoy the best pizza experience ever.</p>
|
66 |
-
<h2>FAQs</h2>
|
67 |
-
<h3>Is the Domino's app free to download?</h3>
|
68 |
-
<p>Yes, the Domino's app is free to download and use. You can find it on Google Play or App Store for your Android or iOS devices.</p>
|
69 |
-
<h3>What are the minimum requirements for the Domino's app?</h3>
|
70 |
-
<p>The minimum requirements for the Domino's app vary depending on your device and operating system. However, generally speaking, you need to have at least 100 MB of free space, a stable internet connection, and a compatible version of Android or iOS.</p>
|
71 |
-
<h3>Can I order from any Domino's store using the app?</h3>
|
72 |
-
<p>Yes, you can order from any Domino's store that offers delivery, carryout, or dine-in options using the app. You can choose your location by entering your address or finding a nearby store on the app.</p>
|
73 |
-
<h3>How can I contact customer service if I have an issue with my order?</h3>
|
74 |
-
<p>If you have an issue with your order, you can contact customer service by calling the store that you ordered from or using the feedback form on the app. You can find the store phone number and the feedback form under the "Help" tab on the app.</p>
|
75 |
-
<h3>Can I cancel or modify my order after placing it?</h3>
|
76 |
-
<p>If you want to cancel or modify your order after placing it, you need to contact the store that you ordered from as soon as possible. You can find the store phone number under the "Help" tab on the app. However, please note that some changes or cancellations may not be possible depending on the status of your order.</p> 197e85843d<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Buku Tema 8 Kelas 1 Peristiwa Alam (Download PDF Gratis).md
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Buku Tema 8 Kelas 1: Peristiwa Alam</h1>
|
3 |
-
<p>Apakah Anda sedang mencari buku tematik untuk kelas 1 SD/MI yang membahas tentang peristiwa alam? Jika ya, maka Anda berada di tempat yang tepat. Dalam artikel ini, kami akan memberikan informasi lengkap tentang buku tema 8 kelas 1 yang berjudul "Peristiwa Alam". Kami juga akan memberikan cara download buku tema 8 kelas 1 secara gratis dan legal, serta ulasan singkat tentang isi dan manfaat buku tersebut. Simak terus artikel ini sampai selesai, ya!</p>
|
4 |
-
<h2>Apa itu Buku Tema 8 Kelas 1: Peristiwa Alam?</h2>
|
5 |
-
<h3>Pengertian Buku Tema 8 Kelas 1: Peristiwa Alam</h3>
|
6 |
-
<p>Buku tema 8 kelas 1 adalah buku siswa yang dipersiapkan oleh Pemerintah dalam rangka implementasi Kurikulum 2013. Buku ini disusun dan ditelaah oleh berbagai pihak di bawah koordinasi Kementerian Pendidikan dan Kebudayaan, dan dipergunakan dalam tahap awal penerapan Kurikulum 2013. Buku ini merupakan "dokumen hidup" yang senantiasa diperbaiki, diperbarui, dan dimutakhirkan sesuai dengan dinamika kebutuhan dan perubahan zaman.</p>
|
7 |
-
<h2>download buku tema 8 kelas 1</h2><br /><p><b><b>DOWNLOAD</b> ★★★ <a href="https://jinyurl.com/2uNKAm">https://jinyurl.com/2uNKAm</a></b></p><br /><br />
|
8 |
-
<p>Buku tema 8 kelas 1 ini memiliki judul "Peristiwa Alam" yang menggambarkan tentang berbagai fenomena alam yang terjadi di sekitar kita, seperti siang dan malam, kemarau, penghujan, dan bencana alam. Buku ini bertujuan untuk mengembangkan kompetensi siswa dalam memahami peristiwa alam, mengenali dampaknya bagi kehidupan manusia dan lingkungan, serta bersikap peduli dan bertanggung jawab terhadap pelestarian alam.</p>
|
9 |
-
<h3>Tujuan Buku Tema 8 Kelas 1: Peristiwa Alam</h3>
|
10 |
-
<p>Berdasarkan pengantar buku tema 8 kelas 1 yang ditulis oleh Kementerian Pendidikan dan Kebudayaan, tujuan buku ini adalah sebagai berikut:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Memberikan kesempatan kepada siswa untuk belajar secara aktif, kreatif, efektif, dan menyenangkan dengan menggunakan berbagai sumber belajar.</li>
|
13 |
-
<li>Mengembangkan kemampuan siswa dalam berpikir logis, kritis, kreatif, inovatif, dan kolaboratif melalui pendekatan saintifik.</li>
|
14 |
-
<li>Meningkatkan k <p>Meningkatkan keterampilan hidup siswa dalam berkomunikasi, beradaptasi, bekerja sama, dan memecahkan masalah.</li>
|
15 |
-
<li>Menumbuhkan nilai-nilai karakter, keimanan, ketaqwaan, dan kecintaan terhadap Tuhan Yang Maha Esa, bangsa, dan tanah air.</li>
|
16 |
-
<li>Menanamkan kesadaran siswa akan pentingnya pelestarian alam dan lingkungan hidup.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Bagaimana Cara Download Buku Tema 8 Kelas 1: Peristiwa Alam?</h2>
|
19 |
-
<h3>Syarat dan Ketentuan Download Buku Tema 8 Kelas 1: Peristiwa Alam</h3>
|
20 |
-
<p>Buku tema 8 kelas 1 ini dapat diunduh secara gratis dan legal melalui situs resmi Kementerian Pendidikan dan Kebudayaan. Namun, ada beberapa syarat dan ketentuan yang harus dipenuhi oleh pengguna, yaitu:</p>
|
21 |
-
<p>Download Buku Tematik PDF Kelas 1 Tema 8: Peristiwa Alam<br />
|
22 |
-
Download Gratis Buku Tema 8 - Peristiwa Alam Kelas 1<br />
|
23 |
-
Buku Guru Kelas 1. Tema 8. Peristiwa Alam.pdf - Google Drive<br />
|
24 |
-
Download Buku Siswa Tematik Kelas 1 Tema 8: Peristiwa Alam Revisi Terbaru<br />
|
25 |
-
Download Buku Guru Tematik Kelas 1 Tema 8: Peristiwa Alam Revisi Terbaru<br />
|
26 |
-
Buku Tematik Terpadu Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
27 |
-
Buku Sekolah Elektronik Kelas 1 SD/MI Tema 8: Peristiwa Alam<br />
|
28 |
-
Download Buku Kurikulum 2013 Kelas 1 SD/MI Tema 8: Peristiwa Alam<br />
|
29 |
-
Download Buku Pendidikan Agama Islam dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam<br />
|
30 |
-
Download Buku Pendidikan Agama Kristen dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam<br />
|
31 |
-
Download Buku Pendidikan Agama Katolik dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam<br />
|
32 |
-
Download Buku Pendidikan Agama Hindu dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam<br />
|
33 |
-
Download Buku Pendidikan Agama Buddha dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam<br />
|
34 |
-
Download Buku Pendidikan Agama Konghucu dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam<br />
|
35 |
-
Download Buku Seni Budaya dan Prakarya Kelas 1 Tema 8: Peristiwa Alam<br />
|
36 |
-
Download Buku Penjasorkes Kelas 1 Tema 8: Peristiwa Alam<br />
|
37 |
-
Download Buku Bahasa Indonesia Kelas 1 Tema 8: Peristiwa Alam<br />
|
38 |
-
Download Buku Matematika Kelas 1 Tema 8: Peristiwa Alam<br />
|
39 |
-
Download Buku IPA Kelas 1 Tema 8: Peristiwa Alam<br />
|
40 |
-
Download Buku IPS Kelas 1 Tema 8: Peristiwa Alam<br />
|
41 |
-
Download RPP Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
42 |
-
Download Silabus Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
43 |
-
Download KI KD Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
44 |
-
Download LKPD Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
45 |
-
Download Lembar Kerja Siswa Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
46 |
-
Download Soal Ulangan Harian Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
47 |
-
Download Soal UTS Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
48 |
-
Download Soal UAS Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
49 |
-
Download Soal PAS Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
50 |
-
Download Soal PAT Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam<br />
|
51 |
-
Download Kunci Jawaban Soal Kurikulum 2013 Kelas I Tema VIII : Peristiwa Alam <br />
|
52 |
-
Contoh Pembelajaran Daring Kurikulum I013 Kls I Tma VIII : Prstw Alm <br />
|
53 |
-
Contoh Media Pembelajaran Interaktif Kurikulum I013 Kls I Tma VIII : Prstw Alm <br />
|
54 |
-
Contoh Video Pembelajaran Kurikulum I013 Kls I Tma VIII : Prstw Alm <br />
|
55 |
-
Contoh Laporan Hasil Belajar Siswa Kurikulum I013 Kls I Tma VIII : Prstw Alm <br />
|
56 |
-
Contoh Portofolio Siswa Kurikulum I013 Kls I Tma VIII : Prstw Alm <br />
|
57 |
-
Contoh Jurnal Refleksi Siswa Kurikulum I013 Kls I Tma VIII : Prstw Alm <br />
|
58 |
-
Contoh Jurnal Refleksi Guru Kurikulum I013 Kls I Tma VIII : Prstw Alm </p>
|
59 |
-
<ul>
|
60 |
-
<li>Pengguna harus memiliki koneksi internet yang stabil dan cukup cepat untuk mengunduh file buku yang berukuran sekitar 20 MB.</li>
|
61 |
-
<li>Pengguna harus memiliki perangkat yang mendukung format file PDF, seperti komputer, laptop, tablet, atau smartphone.</li>
|
62 |
-
<li>Pengguna harus menghormati hak cipta dan tidak melakukan penyalahgunaan, penyebarluasan, atau komersialisasi buku tanpa izin dari pihak yang berwenang.</li>
|
63 |
-
<li>Pengguna harus bertanggung jawab atas segala risiko yang timbul akibat pengunduhan buku, seperti kerusakan perangkat, virus, atau malware.</li>
|
64 |
-
</ul>
|
65 |
-
<h3>Langkah-langkah Download Buku Tema 8 Kelas 1: Peristiwa Alam</h3>
|
66 |
-
<p>Berikut adalah langkah-langkah yang dapat diikuti untuk mengunduh buku tema 8 kelas 1 secara mudah dan cepat:</p>
|
67 |
-
<ol>
|
68 |
-
<li>Buka situs resmi Kementerian Pendidikan dan Kebudayaan di alamat <a href="">https://www.kemdikbud.go.id/</a>.</li>
|
69 |
-
<li>Pilih menu "Buku" di bagian atas halaman.</li>
|
70 |
-
<li>Pilih sub-menu "Buku Sekolah Elektronik (BSE)" di bagian kiri halaman.</li>
|
71 |
-
<li>Pilih kategori "Buku Siswa" di bagian tengah halaman.</li>
|
72 |
-
<li>Pilih jenjang "SD/MI" di bagian bawah halaman.</li>
|
73 |
-
<li>Pilih mata pelajaran "Tema" di bagian kanan halaman.</li>
|
74 |
-
<li>Pilih buku "Tema 8 - Peristiwa Alam" di bagian bawah halaman.</li>
|
75 |
-
<li>Klik tombol "Unduh Buku" di bagian kanan halaman.</li>
|
76 |
-
<li>Tunggu proses pengunduhan selesai dan simpan file buku di perangkat Anda.</li>
|
77 |
-
</ol>
|
78 |
-
<h2>Apa Saja Isi Buku Tema 8 Kelas 1: Peristiwa Alam?</h2>
|
79 |
-
<h3>Subtema 1: Peristiwa Siang dan Malam</h3>
|
80 |
-
<p>Subtema pertama ini membahas tentang peristiwa siang dan malam yang terjadi akibat rotasi bumi. Siswa akan belajar tentang konsep waktu, arah mata angin, zona waktu, serta aktivitas manusia dan hewan yang berbeda pada siang dan malam hari. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat jam matahari, menentukan arah mata angin dengan kompas, dan menggambar pemandangan siang dan malam.</p>
|
81 |
-
<h3>Subtema 2: Kemarau</h3>
|
82 |
-
<p>Subtema kedua ini membahas tentang peristiwa kemarau yang terjadi akibat revolusi bumi. Siswa akan belajar tentang musim kemarau di Indonesia, ciri-ciri cuaca kemarau, dampak kemarau bagi manusia dan lingkungan, serta cara mengatasi dan mencegah kemarau. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat grafik curah hujan, menulis puisi tentang kemarau, dan membuat poster hemat air.</p>
|
83 |
-
<h3>Subtema 3: Penghujan</h3>
|
84 |
-
<p>Subtema ketiga ini membahas tentang peristiwa pengh <p>Subtema ketiga ini membahas tentang peristiwa penghujan yang terjadi akibat revolusi bumi. Siswa akan belajar tentang musim penghujan di Indonesia, ciri-ciri cuaca penghujan, dampak penghujan bagi manusia dan lingkungan, serta cara mengatasi dan mencegah banjir. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat grafik curah hujan, menulis puisi tentang penghujan, dan membuat poster siaga banjir.</p>
|
85 |
-
<h3>Subtema 4: Bencana Alam</h3>
|
86 |
-
<p>Subtema keempat ini membahas tentang peristiwa bencana alam yang terjadi akibat perubahan alam yang ekstrem. Siswa akan belajar tentang jenis-jenis bencana alam, penyebab dan dampak bencana alam, serta cara menghadapi dan mengurangi risiko bencana alam. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat peta sebaran bencana alam, menulis cerita tentang pengalaman menghadapi bencana alam, dan membuat simulasi evakuasi bencana alam.</p>
|
87 |
-
<h2>Apa Manfaat Buku Tema 8 Kelas 1: Peristiwa Alam?</h2>
|
88 |
-
<h3>Manfaat Bagi Siswa</h3>
|
89 |
-
<p>Buku tema 8 kelas 1 ini memiliki banyak manfaat bagi siswa, di antaranya adalah:</p>
|
90 |
-
<ul>
|
91 |
-
<li>Meningkatkan pengetahuan dan pemahaman siswa tentang peristiwa alam yang terjadi di sekitar mereka.</li>
|
92 |
-
<li>Mengembangkan keterampilan berpikir, berkomunikasi, dan bertindak siswa dalam menghadapi peristiwa alam.</li>
|
93 |
-
<li>Menumbuhkan sikap peduli, tanggap, dan bertanggung jawab siswa terhadap pelestarian alam dan lingkungan hidup.</li>
|
94 |
-
<li>Menstimulasi minat dan bakat siswa dalam bidang sains, teknologi, seni, dan budaya.</li>
|
95 |
-
</ul>
|
96 |
-
<h3>Manfaat Bagi Guru</h3>
|
97 |
-
<p>Buku tema 8 kelas 1 ini juga memiliki banyak manfaat bagi guru, di antaranya adalah:</p>
|
98 |
-
<ul>
|
99 |
-
<li>Memudahkan guru dalam merencanakan, melaksanakan, dan mengevaluasi pembelajaran tematik yang sesuai dengan Kurikulum 2013.</li>
|
100 |
-
<li>Memfasilitasi guru dalam menggunakan berbagai sumber belajar yang relevan, variatif, dan menarik untuk mendukung pembelajaran tematik.</li>
|
101 |
-
<li>Membantu guru dalam mengintegrasikan berbagai mata pelajaran dalam satu tema yang utuh dan bermakna.</li>
|
102 |
-
<li>Mendorong guru dalam meningkatkan profesionalisme dan kreativitasnya dalam mengajar.</li>
|
103 |
-
</ul>
|
104 |
-
<h2>Kesimpulan</h2>
|
105 |
-
<p>Buku tema 8 kelas 1 adalah buku tematik yang membahas tentang peristiwa alam yang terjadi di sekitar kita. Buku ini dapat diunduh secara gratis dan legal melalui situs resmi Kementerian Pendidikan dan Kebudayaan. Buku ini memiliki empat subtema, yaitu peristiwa siang dan malam, kemarau, penghujan, dan bencana alam. Buku ini juga memiliki banyak manfaat bagi siswa dan guru dalam meningkatkan pengetahuan, keterampilan, sikap, dan nilai-nilai karakter yang berkaitan dengan peristiwa alam. Semoga artikel ini bermanfaat bagi Anda yang ingin download buku tema 8 kelas 1. Selamat belajar!</p>
|
106 |
-
<h2>FAQ</h2>
|
107 |
-
<p>Berikut adalah beberapa pertanyaan yang sering diajukan seputar buku tema 8 kelas 1:</p>
|
108 |
-
<ol>
|
109 |
-
<li><b>Apa saja sumber belajar lain yang dapat digunakan untuk mendukung pembelajaran tema 8 kelas 1?</b><br>Anda dapat menggunakan sumber belajar lain yang relevan dengan tema 8 kelas 1, seperti buku referensi, media audiovisual, internet, lingkungan sekitar, atau narasumber ahli.</li>
|
110 |
-
<li><b>Apa saja kegiatan penilaian yang dapat dilakukan untuk mengukur pencapaian kompetensi siswa pada tema 8 kelas 1?</ <b>Apa saja kegiatan penilaian yang dapat dilakukan untuk mengukur pencapaian kompetensi siswa pada tema 8 kelas 1?</b><br>Anda dapat melakukan kegiatan penilaian yang sesuai dengan indikator pencapaian kompetensi yang terdapat pada setiap subtema. Beberapa contoh kegiatan penilaian yang dapat dilakukan adalah tes tertulis, tes lisan, tes praktik, observasi, penugasan, portofolio, proyek, atau kinerja.</li>
|
111 |
-
<li><b>Bagaimana cara memberikan umpan balik yang efektif kepada siswa setelah melakukan penilaian pada tema 8 kelas 1?</b><br>Anda dapat memberikan umpan balik yang efektif kepada siswa dengan cara yang berikut ini:</li>
|
112 |
-
<ul>
|
113 |
-
<li>Memberikan umpan balik secara tepat waktu, jelas, dan konstruktif.</li>
|
114 |
-
<li>Memberikan umpan balik yang sesuai dengan tujuan, kriteria, dan standar penilaian.</li>
|
115 |
-
<li>Memberikan umpan balik yang berfokus pada proses dan hasil belajar siswa, bukan pada kepribadian atau kemampuan siswa.</li>
|
116 |
-
<li>Memberikan umpan balik yang mengakui keberhasilan dan kekurangan siswa, serta memberikan saran perbaikan.</li>
|
117 |
-
<li>Memberikan umpan balik yang menstimulasi motivasi dan minat belajar siswa.</li>
|
118 |
-
</ul>
|
119 |
-
<li><b>Apa saja tantangan yang mungkin dihadapi oleh guru dan siswa dalam pembelajaran tema 8 kelas 1?</b><br>Beberapa tantangan yang mungkin dihadapi oleh guru dan siswa dalam pembelajaran tema 8 kelas 1 adalah sebagai berikut:</li>
|
120 |
-
<ul>
|
121 |
-
<li>Keterbatasan sumber belajar yang relevan, variatif, dan menarik untuk mendukung pembelajaran tematik.</li>
|
122 |
-
<li>Kesulitan dalam mengintegrasikan berbagai mata pelajaran dalam satu tema yang utuh dan bermakna.</li>
|
123 |
-
<li>Kurangnya kesiapan dan keterampilan guru dalam menggunakan pendekatan saintifik dan penilaian autentik.</li>
|
124 |
-
<li>Kurangnya minat dan motivasi siswa dalam belajar tentang peristiwa alam.</li>
|
125 |
-
<li>Kurangnya kesadaran dan tanggung jawab siswa terhadap pelestarian alam dan lingkungan hidup.</li>
|
126 |
-
</ul>
|
127 |
-
<li><b>Bagaimana cara mengatasi tantangan-tantangan tersebut?</b><br>Beberapa cara yang dapat dilakukan untuk mengatasi tantangan-tantangan tersebut adalah sebagai berikut:</li>
|
128 |
-
<ul>
|
129 |
-
<li>Memanfaatkan sumber belajar yang tersedia di sekitar kita, seperti lingkungan, media, internet, atau narasumber ahli.</li>
|
130 |
-
<li>Melakukan perencanaan, pelaksanaan, dan evaluasi pembelajaran tematik secara kolaboratif dengan guru-guru lain.</li>
|
131 |
-
<li>Meningkatkan kompetensi profesional dan kreativitas guru melalui pelatihan, bimbingan, atau diskusi.</li>
|
132 |
-
<li>Meningkatkan minat dan motivasi siswa dalam belajar tentang peristiwa alam dengan menggunakan metode dan media yang menarik, interaktif, dan menyenangkan.</li>
|
133 |
-
<li>Meningkatkan kesadaran dan tanggung jawab siswa terhadap pelestarian alam dan lingkungan hidup dengan memberikan contoh nyata, tindakan positif, dan apresiasi.</li>
|
134 |
-
</ul>
|
135 |
-
</ol></p> 401be4b1e0<br />
|
136 |
-
<br />
|
137 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Gorebox Animosity 10.0.3 APK and Enter the Chaotic World of Gorebox a Game of Extreme Violence.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Gorebox Animosity 10.0.3 APK: A Sandbox Game of Extreme Violence</h1>
|
3 |
-
<p>Do you love games that let you unleash your inner demon and cause mayhem and destruction? If so, you might want to check out Gorebox Animosity 10.0.3 APK, a physics-based sandbox game of extreme violence. In this game, you can use a vast arsenal of brutal weapons, explosive devices, interactive ragdolls, fearsome enemies, advanced turrets, vehicles, and a cutting-edge blood and dismemberment system to create your own scenarios of carnage and chaos.</p>
|
4 |
-
<h2>What is Gorebox Animosity?</h2>
|
5 |
-
<p>Gorebox Animosity is a sandbox game that gives you complete freedom to do whatever you want in a chaotic world. You can choose from different maps, game modes, weapons, devices, ragdolls, enemies, turrets, and vehicles to create your own scenarios of gore and violence.</p>
|
6 |
-
<h2>gorebox animosity 10.0.3 apk</h2><br /><p><b><b>DOWNLOAD</b> ⚡ <a href="https://jinyurl.com/2uNKAl">https://jinyurl.com/2uNKAl</a></b></p><br /><br />
|
7 |
-
<h3>A physics-based sandbox game</h3>
|
8 |
-
<p>Gorebox Animosity uses realistic physics to simulate the effects of your actions on the environment and the ragdolls. You can see how the ragdolls react to different forces, impacts, explosions, cuts, burns, electrocutions, and more. You can also manipulate the ragdolls with your fingers or use tools like ropes, hooks, magnets, springs, balloons, etc.</p>
|
9 |
-
<h3>A vast arsenal of brutal weapons and devices</h3>
|
10 |
-
<p>Gorebox Animosity offers you a wide range of weapons and devices to inflict pain and damage on the ragdolls and enemies. You can use guns, knives, axes, hammers, chainsaws, grenades, rockets, mines, bombs, nukes, lasers, flamethrowers, plasma cannons, tesla coils, etc.</p>
|
11 |
-
<h3>Interactive ragdolls, enemies, turrets, and vehicles</h3>
|
12 |
-
<p>Gorebox Animosity features interactive ragdolls that you can customize with different skins, clothes, accessories, hairstyles, etc. You can also spawn different types of enemies like zombies, mutants, soldiers, robots, aliens, etc. to fight against or with them. You can also use turrets like machine guns, shotguns, snipers, flak cannons, etc. to defend yourself or attack others. You can also drive vehicles like cars, trucks, tanks, helicopters, jets, etc. to explore the maps or cause more destruction.</p>
|
13 |
-
<h2>What's new in Gorebox Animosity 10.0.3 APK?</h2>
|
14 |
-
<p>Gorebox Animosity 10.0.3 APK is the latest version of the game that was released on August 27th, 2022. This version includes a big patch with bug fixes and improvements, as well as some new features like custom names and skins for players and ragdolls, more synchronizations and animations for multiplayer mode, and some minor changes in the user interface and the game mechanics. Here are some of the highlights of the new version: <h3>A big patch with bug fixes and improvements</h3>
|
15 |
-
<p>Gorebox Animosity 10.0.3 APK fixes some of the bugs and glitches that were reported by the players, such as crashes, freezes, lag, errors, etc. It also improves the performance, stability, and compatibility of the game with different devices and platforms. It also optimizes the graphics, sounds, and controls of the game for a better gaming experience.</p>
|
16 |
-
<h3>Custom names and skins for players and ragdolls</h3>
|
17 |
-
<p>Gorebox Animosity 10.0.3 APK allows you to customize your name and skin in the game. You can choose from different colors, patterns, textures, stickers, etc. to create your own unique look. You can also change the name and skin of the ragdolls that you spawn or interact with. You can save your customizations and use them in different game modes and maps.</p>
|
18 |
-
<h3>More synchronizations and animations for multiplayer mode</h3>
|
19 |
-
<p>Gorebox Animosity 10.0.3 APK enhances the multiplayer mode of the game by adding more synchronizations and animations for the players and the ragdolls. You can see how other players move, interact, shoot, explode, etc. in real-time. You can also see how the ragdolls react to different actions and events in the game. You can also chat with other players using text or voice messages.</p>
|
20 |
-
<h2>How to download and install Gorebox Animosity 10.0.3 APK?</h2>
|
21 |
-
<p>If you want to download and install Gorebox Animosity 10.0.3 APK on your device, you need to follow these simple steps:</p>
|
22 |
-
<p>gorebox animosity update 10.0.3 download<br />
|
23 |
-
gorebox animosity 10.0.3 apk mediafire<br />
|
24 |
-
gorebox animosity multiplayer gameplay<br />
|
25 |
-
gorebox animosity cheat code<br />
|
26 |
-
gorebox animosity discord server<br />
|
27 |
-
gorebox animosity trailer event<br />
|
28 |
-
gorebox animosity cinematic 3<br />
|
29 |
-
gorebox animosity full soundtrack<br />
|
30 |
-
gorebox animosity scariest moments<br />
|
31 |
-
gorebox animosity how to open the door<br />
|
32 |
-
gorebox animosity custom names and skins<br />
|
33 |
-
gorebox animosity more synchronizations<br />
|
34 |
-
gorebox animosity fixed animations<br />
|
35 |
-
gorebox animosity big patch<br />
|
36 |
-
gorebox animosity net energy gain<br />
|
37 |
-
gorebox animosity physics-based sandbox game<br />
|
38 |
-
gorebox animosity brutal weapons<br />
|
39 |
-
gorebox animosity explosive devices<br />
|
40 |
-
gorebox animosity interactive ragdolls<br />
|
41 |
-
gorebox animosity fearsome enemies<br />
|
42 |
-
gorebox animosity advanced turrets<br />
|
43 |
-
gorebox animosity vehicles<br />
|
44 |
-
gorebox animosity blood and dismemberment system<br />
|
45 |
-
gorebox animosity flight with plasma field<br />
|
46 |
-
gorebox animosity challenge from Suddy<br />
|
47 |
-
gorebox animosity mega collab<br />
|
48 |
-
gorebox animosity best doctor in the world<br />
|
49 |
-
gorebox animosity 8.3.2 update<br />
|
50 |
-
gorebox animosity 8.5.4 update<br />
|
51 |
-
gorebox animosity 9.0.0 update<br />
|
52 |
-
gorebox animosity 10.0.2 update<br />
|
53 |
-
gorebox apk free download apkcombo<br />
|
54 |
-
download gorebox android game apkcombo<br />
|
55 |
-
enter the chaotic world of gorebox apkcombo<br />
|
56 |
-
unleash your inner demon with gorebox apkcombo<br />
|
57 |
-
how to install gorebox apk on android apkcombo<br />
|
58 |
-
what's new in gorebox apk version 10.0.3 apkcombo<br />
|
59 |
-
how to play gorebox online with friends apkcombo<br />
|
60 |
-
how to create custom maps in gorebox apkcombo<br />
|
61 |
-
how to mod gorebox with new weapons and skins apkcombo</p>
|
62 |
-
<h3>Download the APK file from a trusted source</h3>
|
63 |
-
<p>The first step is to download the APK file of Gorebox Animosity 10.0.3 from a trusted source. You can use this link to download the file directly from Google Drive. The file size is about 100 MB, so make sure you have enough space on your device.</p>
|
64 |
-
<h3>Enable unknown sources on your device settings</h3>
|
65 |
-
<p>The second step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the official app store. To do this, go to your device settings, then security or privacy, then unknown sources or install unknown apps, then toggle on the option or allow from this source.</p>
|
66 |
-
<h3>Install the APK file and launch the game</h3>
|
67 |
-
<p>The third step is to install the APK file and launch the game. To do this, locate the downloaded file on your device storage, then tap on it to start the installation process. Follow the instructions on the screen to complete the installation. Once done, you can launch the game from your app drawer or home screen.</p>
|
68 |
-
<h2>How to play Gorebox Animosity 10.0.3 APK?</h2>
|
69 |
-
<p>If you want to play Gorebox Animosity 10.0.3 APK on your device, you need to follow these simple steps:</p>
|
70 |
-
<h3>Choose a map and a game mode</h3>
|
71 |
-
<p>The first step is to choose a map and a game mode that you want to play. You can choose from different maps like city, desert, island, forest, etc., each with its own features and challenges. You can also choose from different game modes like sandbox, survival, deathmatch, capture the flag, etc., each with its own rules and objectives.</p>
|
72 |
-
<h3>Use the joystick and buttons to move and interact</h3>
|
73 |
-
<p>The second step is to use the joystick and buttons to move and interact in the game. You can use the joystick on the left side of the screen to move around and rotate your camera angle. You can use the buttons on the right side of the screen to jump, crouch, shoot, reload, switch weapons, interact with objects, etc.</p>
|
74 |
-
<h3>Explore the chaotic world and unleash your inner demon</h3>
|
75 |
-
<p>The third step is to explore the chaotic world and unleash your inner demon in the game. You can use your weapons and devices to inflict pain and damage on the ragdolls and enemies that you encounter or spawn in the game. You can also use your fingers or tools to manipulate them in different ways. You can also drive vehicles, use turrets, set traps, etc. to cause more mayhem and destruction. You can also play with other players online or offline in multiplayer mode.</p>
|
76 |
-
<h2>Why should you play Gorebox Animosity 10.0.3 APK?</h2>
|
77 |
-
<p>If you are still wondering why you should play Gorebox Animosity 10.0.3 APK on your device, here are some of the reasons why you should give it a try: <h3>A fun and addictive game for fans of gore and violence</h3>
|
78 |
-
<p>Gorebox Animosity 10.0.3 APK is a game that will appeal to fans of gore and violence. If you enjoy games that let you cause mayhem and destruction, you will love this game. You can have fun and relax by playing this game and releasing your stress and anger. You can also challenge yourself by trying different game modes and maps.</p>
|
79 |
-
<h3>A cutting-edge blood and dismemberment system</h3>
|
80 |
-
<p>Gorebox Animosity 10.0.3 APK features a cutting-edge blood and dismemberment system that makes the game more realistic and immersive. You can see how the blood splatters, drips, stains, and pools on the ground and the objects. You can also see how the ragdolls and enemies get cut, torn, ripped, smashed, burned, etc. in different ways. You can also customize the amount and color of the blood in the game settings.</p>
|
81 |
-
<h3>A creative and customizable game with endless possibilities</h3>
|
82 |
-
<p>Gorebox Animosity 10.0.3 APK is a game that gives you complete freedom to create your own scenarios of gore and violence. You can customize your name, skin, weapons, devices, ragdolls, enemies, turrets, vehicles, etc. in the game. You can also use your imagination and creativity to make your own maps, game modes, stories, etc. in the game. You can also share your creations with other players online or offline.</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Gorebox Animosity 10.0.3 APK is a physics-based sandbox game of extreme violence that lets you unleash your inner demon and cause mayhem and destruction in a chaotic world. You can use a vast arsenal of brutal weapons, explosive devices, interactive ragdolls, fearsome enemies, advanced turrets, vehicles, and a cutting-edge blood and dismemberment system to create your own scenarios of carnage and chaos. You can also customize your name, skin, weapons, devices, ragdolls, enemies, turrets, vehicles, etc. in the game. You can also play with other players online or offline in multiplayer mode. You can download and install Gorebox Animosity 10.0.3 APK on your device by following the simple steps mentioned above. If you are a fan of gore and violence, you should definitely try this game.</p>
|
85 |
-
<h2>FAQs</h2>
|
86 |
-
<p>Here are some of the frequently asked questions about Gorebox Animosity 10.0.3 APK:</p>
|
87 |
-
<table>
|
88 |
-
<tr>
|
89 |
-
<th>Question</th>
|
90 |
-
<th>Answer</th>
|
91 |
-
</tr>
|
92 |
-
<tr>
|
93 |
-
<td>Is Gorebox Animosity 10.0.3 APK free to play?</td>
|
94 |
-
<td>Yes, Gorebox Animosity 10.0.3 APK is free to play. You can download and install it on your device without paying anything. However, the game may contain ads or in-app purchases that you can choose to buy or not.</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Is Gorebox Animosity 10.0.3 APK safe to play?</td>
|
98 |
-
<td>Yes, Gorebox Animosity 10.0.3 APK is safe to play. The game does not contain any viruses or malware that can harm your device or data. However, the game is rated for mature audiences only due to its graphic content of gore and violence. You should play it at your own discretion and responsibility.</td>
|
99 |
-
</tr>
|
100 |
-
<tr>
|
101 |
-
<td>Is Gorebox Animosity 10.0.3 APK compatible with my device?</td>
|
102 |
-
<td>Gorebox Animosity 10.0.3 APK is compatible with most devices that run on Android 4.4 or higher. However, the game may not work properly on some devices due to different specifications or settings. You should check the compatibility of your device before downloading and installing the game.</td>
|
103 |
-
</tr>
|
104 |
-
<tr>
|
105 |
-
<td>How can I contact the developer of Gorebox Animosity 10.0.3 APK?</td>
|
106 |
-
<td>You can contact the developer of Gorebox Animosity 10.0.3 APK by sending an email to [email protected]. You can also follow their social media accounts on Facebook, Twitter, Instagram, YouTube, etc. to get updates, news, tips, etc. about the game.</td>
|
107 |
-
</tr>
|
108 |
-
<tr>
|
109 |
-
<td>How can I support the development of Gorebox Animosity 10.0.3 APK?</td>
|
110 |
-
<td>You can support the development of Gorebox Animosity 10.0.3 APK by rating and reviewing the game on the app store or Google Play Store. You can also share the game with your friends and family who might enjoy it. You can also buy some in-app purchases or donate some money to the developer to support their work and show your appreciation. You can also join their Patreon page or Discord server to get exclusive rewards, access, and feedback.</td>
|
111 |
-
</tr>
|
112 |
-
</table>
|
113 |
-
<p>I hope you enjoyed reading this article and learned something new about Gorebox Animosity 10.0.3 APK. If you have any questions, comments, or suggestions, feel free to leave them below. Thank you for your time and attention.</p> 401be4b1e0<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download RPG Isekai Rondo MOD APK for Android - Enjoy the Ultimate Job in a Parallel World.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download RPG Isekai Rondo Mod Apk and Enjoy a Thrilling Adventure in a Parallel World</h1>
|
3 |
-
<p>Do you love playing role-playing games that take you to a different world where you can become a hero and save the day? If yes, then you should try RPG Isekai Rondo, a pixel-art RPG that gives you a chance to experience a parallel universe with exciting turn-based battles and retro-style graphics. In this article, we will tell you what RPG Isekai Rondo is, how to download and install its mod apk version on your Android device, and how to play it with some useful tips and tricks. Let's get started!</p>
|
4 |
-
<h2>download rpg isekai rondo mod apk</h2><br /><p><b><b>Download Zip</b> ⇒ <a href="https://jinyurl.com/2uNO6U">https://jinyurl.com/2uNO6U</a></b></p><br /><br />
|
5 |
-
<h2>What is RPG Isekai Rondo?</h2>
|
6 |
-
<p>RPG Isekai Rondo is a game developed by KEMCO, a Japanese company that specializes in creating RPGs for mobile platforms. The game was released in February 2023 for Android and iOS devices. The game belongs to the isekai genre, which typically involves a person being transported to a parallel world, usually with fantasy elements.</p>
|
7 |
-
<h3>The story and the gameplay of RPG Isekai Rondo</h3>
|
8 |
-
<p>The game tells the story of Sho, an ordinary man who works at an exploitative company in modern Japan. One day, he gets into an accident and finds himself reincarnated in a parallel world as Shaw, a sage with ultra rare passive skills. He meets Viola, a hero who is on a mission to defeat the overlord who threatens the world. Together, they embark on a thrilling adventure that will test their skills and courage.</p>
|
9 |
-
<p>The gameplay of RPG Isekai Rondo is based on turn-based battles, where you can use various skills and items to defeat your enemies. You can also summon spirits, manage mana plants, and acquire monster skills to enhance your abilities. You can also explore different locations, complete quests, raise your guild rank, challenge other parties, participate in arenas, and loot dungeons for treasure chests.</p>
|
10 |
-
<h3>The features and the benefits of RPG Isekai Rondo mod apk</h3>
|
11 |
-
<p>If you want to enjoy RPG Isekai Rondo with more ease and fun, you should download its mod apk version, which offers several advantages over the original version. Some of the features and benefits of RPG Isekai Rondo mod apk are:</p>
|
12 |
-
<p>download rpg isekai rondo mod apk free<br />
|
13 |
-
download rpg isekai rondo mod apk latest version<br />
|
14 |
-
download rpg isekai rondo mod apk unlimited money<br />
|
15 |
-
download rpg isekai rondo mod apk offline<br />
|
16 |
-
download rpg isekai rondo mod apk for android<br />
|
17 |
-
download rpg isekai rondo mod apk premium edition<br />
|
18 |
-
download rpg isekai rondo mod apk no ads<br />
|
19 |
-
download rpg isekai rondo mod apk full unlocked<br />
|
20 |
-
download rpg isekai rondo mod apk english<br />
|
21 |
-
download rpg isekai rondo mod apk hack<br />
|
22 |
-
download rpg isekai rondo mod apk cheat<br />
|
23 |
-
download rpg isekai rondo mod apk mega mod<br />
|
24 |
-
download rpg isekai rondo mod apk update<br />
|
25 |
-
download rpg isekai rondo mod apk 1.1.3g<br />
|
26 |
-
download rpg isekai rondo mod apk kemco<br />
|
27 |
-
download rpg isekai rondo mod apk exe-create<br />
|
28 |
-
download rpg isekai rondo mod apk role playing game<br />
|
29 |
-
download rpg isekai rondo mod apk parallel universe<br />
|
30 |
-
download rpg isekai rondo mod apk sage job<br />
|
31 |
-
download rpg isekai rondo mod apk overlord battle<br />
|
32 |
-
download rpg isekai rondo mod apk passive skills<br />
|
33 |
-
download rpg isekai rondo mod apk dungeon quest<br />
|
34 |
-
download rpg isekai rondo mod apk guild rank<br />
|
35 |
-
download rpg isekai rondo mod apk battle arena<br />
|
36 |
-
download rpg isekai rondo mod apk treasure chest<br />
|
37 |
-
download RPG Isekai Rondo APK (Android Game) - Free Download - APKCombo<br />
|
38 |
-
RPG Isekai Rondo APK (Android Game) - Free Download APKCombo Games Role Playing RPG Isekai Rondo RPG Isekai Rondo 1.1.3g KEMCO Download APK (177 MB)<br />
|
39 |
-
Quest for a parallel universe after being reborn with the ultimate job! Description Role Playing Advertisement Latest Version Version 1.1.3g (113) Update Mar 19, 2023 Developer KEMCO Category Role Playing Google Play ID kemco.execreate.isekai Installs 50,000+ App APKs Isekai Rondo APK 異世界輪舞 APK RPG Isekai Rondo GAME Sho, a young man who spends his days in an exploitative company in modern Japan, is reincarnated into a parallel universe as Shaw, due to an accident! While the job given in his new life is 'Unemployed' and hardships continue with a bad luck party, one day he acquires the ultimate job as a Sage, suddenly becoming the strongest with ultra rare passive skills! Aiming for the long-awaited mellow life he finds himself in a battle to defeat the Overlord after meeting the Hero Viola. With exceptional passive skills that for example can change the future by returning before Game Over, get rid of enemies in turn-based battles by summoning spirits or managing mana plants, or even use the skills of monsters! Complete quests and raise the guild rank so you can challenge deeper dungeon or other adventuring parties, too! With other elements such as a battle arena and the dungeon where treasure chests are automatically generated, your adventure against the Overlord's Army will surely be an unparalleled quest to finally gain a calm, modest life! * This app contains ads in some screens. The game itself can be played in its entirety for free. * Ads can be removed through in-app purchases by purchasing the Ad Eliminator. Please note that the Ad Eliminator of the freemium edition does not include the bonus 150 Magistones. * A premium edition with 150 bonus Magistones is also available. [6](https://play.google.com/store/apps/details?id=kemco.execreate.isekaipremium) (Save data cannot be transferred between the Premium and freemium editions.) [IMPORTANT NOTICE] Your use of the application requires your agreement to the following EULA and 'Privacy Policy and Notice'. If you do not agree, please do not download our application. End User License Agreement: [5](http://kemco.jp/eula/index.html) Privacy Policy and Notice: [4](http://www.kemco.jp/app_pp/privacy.html) [Game Controller] - Partially optimized [Languages] - English, Japanese [SD Card Storage] - Enabled (Save backup/transfer are not supported.) [Non-Supported Devices] This app has generally been tested to work on any mobile device released in Japan. We cannot guarantee full support on other devices. If you have the Developer Options enabled in your</p>
|
40 |
-
<ul>
|
41 |
-
<li>You can access the menu mod, which allows you to customize various aspects of the game, such as speed, damage, defense, etc.</li>
|
42 |
-
<li>You can get unlimited currency, such as gold and magistones, which you can use to buy items, upgrade skills, unlock features, etc.</li>
|
43 |
-
<li>You can unlock all the characters, elements, skills, passives, spirits, plants, monsters, etc., without having to spend time or money.</li>
|
44 |
-
<li>You can get unlimited moves in battles, which means you can attack as many times as you want without waiting for your turn.</li>
|
45 |
-
<li>You can bypass the ads, verification, and root checks that may interrupt your gaming experience.</li>
|
46 |
-
</ul>
|
47 |
-
<p>With these features and benefits, you can enjoy RPG Isekai Rondo mod apk with more freedom and fun. You can also save your time and money by getting everything you need for free.</p>
|
48 |
-
<h2>How to Download and Install RPG Isekai Rondo Mod Apk on Your Android Device</h2>
|
49 |
-
<p>Now that you know what RPG Isekai Rondo mod apk is and what it offers, you may be wondering how to download and install it on your Android device. Don't worry, we will guide you through the process step by step. Just follow these simple instructions:</p>
|
50 |
-
<h3>Step 1: Enable unknown sources on your device</h3>
|
51 |
-
<p>Before you can install RPG Isekai Rondo mod apk on your device, you need to enable unknown sources, which will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and toggle it on. You may see a warning message, but just ignore it and confirm your choice.</p>
|
52 |
-
<h3>Step 2: Download the RPG Isekai Rondo mod apk file from a trusted source</h3>
|
53 |
-
<p>Next, you need to download the RPG Isekai Rondo mod apk file from a trusted source. There are many websites that claim to offer the mod apk file, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should be careful and choose a reputable source that has positive reviews and feedback from other users. One such source is [RPG Isekai Rondo Mod Apk Download], which provides the latest version of the mod apk file with all the features and benefits mentioned above.</p>
|
54 |
-
<p>To download the RPG Isekai Rondo mod apk file from this source, just click on the link above and you will be redirected to the download page. There, you will see a button that says "Download Now". Click on it and wait for the download to start. The file size is about 100 MB, so it may take a few minutes depending on your internet speed.</p>
|
55 |
-
<h3>Step 3: Locate and install the RPG Isekai Rondo mod apk file on your device</h3>
|
56 |
-
<p>Once the download is complete, you need to locate and install the RPG Isekai Rondo mod apk file on your device. To do this, go to your device's file manager and find the folder where you saved the downloaded file. It may be in your downloads folder or in any other folder that you specified. Tap on the file and you will see a pop-up window that asks you to install the app. Tap on "Install" and wait for the installation to finish.</p>
|
57 |
-
<h3>Step 4: Launch the game and enjoy the mod features</h3>
|
58 |
-
<p>Congratulations! You have successfully installed RPG Isekai Rondo mod apk on your device. Now, you can launch the game by tapping on its icon on your home screen or app drawer. You will see a menu mod icon on the top right corner of the screen. Tap on it and you will be able to access various options to customize your game settings. You can also check your currency, characters, skills, etc., and see that they are all unlimited and unlocked. Enjoy playing RPG Isekai Rondo with all the mod features!</p>
|
59 |
-
<h2>How to Play RPG Isekai Rondo with Tips and Tricks</h2>
|
60 |
-
<p>RPG Isekai Rondo is a fun and addictive game that will keep you entertained for hours. However, it can also be challenging and complex at times, especially if you are new to the game or want to advance faster. That's why we have prepared some tips and tricks that will help you play RPG Isekai Rondo better and smarter. Here they are:</p>
|
61 |
-
<h3>Tip 1: Choose your character and element wisely</h3>
|
62 |
-
<p>In RPG Isekai Rondo, you can choose from four different characters: Shaw (sage), Viola (hero), Lila (priestess), or Leon (knight). Each character has a different element: fire, water, wind, or earth. Each element has its own advantages and disadvantages against other elements. For example, fire is strong against wind but weak against water. You should choose your character and element based on your preference and strategy.</p>
|
63 |
-
<h3>Tip 2: Understand the effects and passives of your skills</h3>
|
64 |
-
<p>Each character has four skills that they can use in battles: one normal skill, one special skill, one ultimate skill, and one and use wind skills instead. You can also use skills that lower the enemy's resistance level or increase your own element power.</p>
|
65 |
-
<h3>Tip 4: Earn extra points by joining guild quests and participating in daily tasks</h3>
|
66 |
-
<p>One way to earn more points and rewards in RPG Isekai Rondo is to join guild quests and participate in daily tasks. Guild quests are missions that you can complete with other players in your guild. They can range from defeating a certain number of enemies, collecting a certain amount of items, or clearing a certain stage. By completing guild quests, you can earn guild points, which you can use to exchange for various items and features in the guild shop. You can also increase your guild rank and reputation by completing guild quests.</p>
|
67 |
-
<p>Daily tasks are tasks that you can complete every day to earn extra points and rewards. They can include logging in, playing for a certain amount of time, using a certain skill, etc. By completing daily tasks, you can earn daily points, which you can use to spin the daily roulette and get various prizes, such as gold, magistones, items, etc. You can also get bonus rewards by completing all the daily tasks.</p>
|
68 |
-
<h3>Tip 5: Explore the battle arena and the dungeon for more rewards and challenges</h3>
|
69 |
-
<p>If you want to test your skills and challenge yourself in RPG Isekai Rondo, you should explore the battle arena and the dungeon. The battle arena is a place where you can fight against other players in real-time battles. You can choose from three modes: ranked, casual, or friendly. By winning battles, you can earn arena points, which you can use to exchange for various items and features in the arena shop. You can also increase your arena rank and reputation by winning battles.</p>
|
70 |
-
<p>The dungeon is a place where you can explore different floors and rooms filled with enemies, traps, and treasures. You can choose from three difficulties: easy, normal, or hard. By clearing floors and rooms, you can earn dungeon points, which you can use to exchange for various items and features in the dungeon shop. You can also find rare items and monsters in the dungeon.</p>
|
71 |
-
<h2>Conclusion</h2>
|
72 |
-
<p>RPG Isekai Rondo is a game that will give you a lot of fun and excitement as you travel to a parallel world and become a powerful sage. You can enjoy the game even more by downloading its mod apk version, which gives you access to unlimited currency, characters, skills, etc. You can also play the game better and smarter by following our tips and tricks on how to choose your character and element, understand your skills, observe your enemies, earn extra points, and explore the battle arena and the dungeon. We hope that this article has helped you learn how to download RPG Isekai Rondo mod apk and enjoy a thrilling adventure in a parallel world.</p>
|
73 |
-
<h2>FAQs</h2>
|
74 |
-
<p>Here are some frequently asked questions about RPG Isekai Rondo mod apk:</p>
|
75 |
-
<h4>Q: Is RPG Isekai Rondo mod apk safe to download and install?</h4>
|
76 |
-
<p>A: Yes, RPG Isekai Rondo mod apk is safe to download and install as long as you get it from a trusted source like [RPG Isekai Rondo Mod Apk Download]. However, you should always be careful when downloading any mod apk file from unknown sources as they may contain harmful viruses or malware.</p>
|
77 |
-
<h4>Q: Do I need to root my device to use RPG Isekai Rondo mod apk?</h4>
|
78 |
-
<p>A: No, you do not need to root your device to use RPG Isekai Rondo mod apk. The mod apk file will work on any Android device without requiring any root access or permission.</p>
|
79 |
-
<h4>Q: Will RPG Isekai Rondo mod apk affect my original game data?</h4>
|
80 |
-
<p>A: No, RPG Isekai Rondo mod apk will not affect your original game data as it will create a separate folder for its data on your device. You can still play the original version of the game without any problem.</p>
|
81 |
-
<h4>Q: How do I update RPG Isekai Rondo mod apk?</h4>
|
82 |
-
<p>A: To update RPG Isekai Rondo mod apk, you need to download the latest version of the mod apk file from [RPG Isekai Rondo Mod Apk Download] and install it on your device. You do not need to uninstall the previous version of the mod apk file as it will overwrite it automatically.</p>
|
83 |
-
<h4>Q: How do I contact the developer of RPG Isekai Rondo mod apk?</h4>
|
84 |
-
<p>A: If you have any questions or feedback about RPG Isekai Rondo mod apk, you can contact the developer of the mod apk file by visiting their website [RPG Isekai R ondo Mod Apk Download]. You can also follow them on their social media accounts [RPG Isekai Rondo Mod Apk Facebook] and [RPG Isekai Rondo Mod Apk Twitter]. They are very responsive and friendly and will answer your queries as soon as possible.</p> 197e85843d<br />
|
85 |
-
<br />
|
86 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Red Ball 4 Mod APK and Play 100 Square Stages with PremiumUnlocked Benefits.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Red Ball 4 Mod 100 Square APK: A Fun and Challenging Game for Android Users</h1>
|
3 |
-
<p>If you are looking for a fun and challenging game to play on your Android device, you might want to check out Red Ball 4 Mod 100 Square APK. This is a modified version of the popular Red Ball 4 game, which has over 100 million downloads on Google Play Store. In this article, we will tell you what Red Ball 4 is, what the mod 100 square apk is, why you should download it, and how to do it. We will also highlight some of the features of this amazing game that will keep you entertained for hours.</p>
|
4 |
-
<h2>red ball 4 mod 100 square apk</h2><br /><p><b><b>Download</b> ……… <a href="https://jinyurl.com/2uNT9B">https://jinyurl.com/2uNT9B</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is Red Ball 4?</h3>
|
7 |
-
<p>Red Ball 4 is a physics-based platformer game developed by FDG Entertainment GmbH & Co.KG. The game follows the adventures of a red ball who has to save the world from the evil black squares who want to turn everything into cubes. The game has four episodes, each with 15 levels of increasing difficulty. The game also has a level editor where you can create your own levels and share them with other players.</p>
|
8 |
-
<h3>What is the mod 100 square apk?</h3>
|
9 |
-
<p>The mod 100 square apk is a modified version of Red Ball 4 that gives you some extra features and advantages. The mod allows you to have unlimited lives and coins, which means you can play as long as you want without worrying about dying or running out of money. The mod also lets you change the appearance of your ball and choose from different colors and patterns. The mod also adds some new elements to the game, such as epic boss fights, cloud service, and HID controller support.</p>
|
10 |
-
<h3>Why should you download it?</h3>
|
11 |
-
<p>You should download Red Ball 4 Mod 100 Square APK if you want to enjoy the game to the fullest. The mod gives you more freedom and flexibility to play the game as you like. You can customize your ball, explore more levels, and challenge yourself with harder enemies. The mod also makes the game more fun and exciting with its groovy music and stunning graphics. The mod is also easy to download and install, as we will show you in the next section.</p>
|
12 |
-
<p>red ball 4 premium unlocked apk download<br />
|
13 |
-
red ball 4 mod apk unlimited lives and stars<br />
|
14 |
-
red ball 4 hack apk latest version<br />
|
15 |
-
red ball 4 mod apk android 1<br />
|
16 |
-
red ball 4 mod apk revdl<br />
|
17 |
-
red ball 4 mod apk no ads<br />
|
18 |
-
red ball 4 mod apk all levels unlocked<br />
|
19 |
-
red ball 4 mod apk free shopping<br />
|
20 |
-
red ball 4 mod apk unlimited money<br />
|
21 |
-
red ball 4 mod apk happymod<br />
|
22 |
-
red ball 4 mod apk rexdl<br />
|
23 |
-
red ball 4 mod apk an1<br />
|
24 |
-
red ball 4 mod apk offline<br />
|
25 |
-
red ball 4 mod apk online<br />
|
26 |
-
red ball 4 mod apk pure<br />
|
27 |
-
red ball 4 mod apk uptodown<br />
|
28 |
-
red ball 4 mod apk apkpure<br />
|
29 |
-
red ball 4 mod apk mob.org<br />
|
30 |
-
red ball 4 mod apk android oyun club<br />
|
31 |
-
red ball 4 mod apk andropalace<br />
|
32 |
-
red ball 4 mod apk blackmod<br />
|
33 |
-
red ball 4 mod apk cheat<br />
|
34 |
-
red ball 4 mod apk dlandroid<br />
|
35 |
-
red ball 4 mod apk everything unlocked<br />
|
36 |
-
red ball 4 mod apk for pc<br />
|
37 |
-
red ball 4 mod apk gamestechy<br />
|
38 |
-
red ball 4 mod apk ihackedit<br />
|
39 |
-
red ball 4 mod apk lenov.ru<br />
|
40 |
-
red ball 4 mod apk mega<br />
|
41 |
-
red ball 4 mod apk onhax<br />
|
42 |
-
red ball 4 mod apk platinmods<br />
|
43 |
-
red ball 4 mod apk unlimited health<br />
|
44 |
-
red ball 4 mod apk vipmods<br />
|
45 |
-
red ball 4 mod menu apk download<br />
|
46 |
-
download game red ball 4 mod apk versi terbaru<br />
|
47 |
-
how to install red ball 4 mod apk on android device<br />
|
48 |
-
where to download red ball 4 mod apk for free<br />
|
49 |
-
what is new in red ball 4 mod apk update<br />
|
50 |
-
how to play red ball 4 with modded apk file<br />
|
51 |
-
how to get more stars in red ball 4 using modded apk file</p>
|
52 |
-
<h2>Features of Red Ball 4 Mod 100 Square APK</h2>
|
53 |
-
<h3>Unlimited lives and coins</h3>
|
54 |
-
<p>One of the best features of Red Ball 4 Mod 100 Square APK is that it gives you unlimited lives and coins. This means that you can play as long as you want without worrying about dying or running out of money. You can use the coins to buy power-ups, such as rockets, magnets, and shields, that will help you overcome obstacles and enemies. You can also use the coins to unlock new balls with different abilities and stats.</p>
|
55 |
-
<h3>Customizable ball appearance</h3>
|
56 |
-
<p>Another great feature of Red Ball 4 Mod 100 Square APK is that it lets you customize your ball appearance. You can choose from different colors and patterns for your ball, such as blue, green, yellow, pink, striped, dotted, etc. You can also change the shape of your ball, such as square, triangle, star, etc. You can mix and match different options to create your own unique ball that suits your style and personality.</p>
|
57 |
-
<h3>Epic boss battles</h3>
|
58 |
-
<p>Red Ball 4 Mod 100 Square APK also adds some epic boss battles to the game. These are challenging and thrilling levels where you have to face the black square bosses who have different powers and abilities. You have to use your skills and strategies to defeat them and save the world. The boss battles are fun and rewarding, as they test your reflexes and creativity.</p>
|
59 |
-
<h3>Cloud save and HID controller support</h3>
|
60 |
-
<p>Red Ball 4 Mod 100 Square APK also supports cloud save and HID controller. This means that you can save your progress online and access it from any device. You can also play the game with a physical controller, such as a gamepad or a joystick, for a more comfortable and immersive experience. These features make the game more convenient and enjoyable for all types of players.</p>
|
61 |
-
<h2>How to download and install Red Ball 4 Mod 100 Square APK</h2>
|
62 |
-
<h3>Step 1: Enable unknown sources on your device</h3>
|
63 |
-
<p>Before you can download and install Red Ball 4 Mod 100 Square APK, you need to enable unknown sources on your device. This will allow you to install apps that are not from the official Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may see a warning message, but don't worry, it is safe to proceed.</p>
|
64 |
-
<h3>Step 2: Download the apk file from a trusted source</h3>
|
65 |
-
<p>Next, you need to download the apk file of Red Ball 4 Mod 100 Square APK from a trusted source. You can find many websites that offer the apk file, but be careful, as some of them may contain viruses or malware. We recommend you to use this link, which is verified and safe. Once you click on the link, you will see a download button. Tap on it and wait for the download to finish.</p>
|
66 |
-
<h3>Step 3: Install the apk file and launch the game</h3>
|
67 |
-
<p>Finally, you need to install the apk file and launch the game. To do this, go to your file manager and locate the downloaded apk file. Tap on it and follow the instructions on the screen. The installation process may take a few seconds or minutes, depending on your device. Once the installation is done, you will see an icon of Red Ball 4 on your home screen or app drawer. Tap on it and enjoy the game!</p>
|
68 |
-
<h2>Conclusion</h2>
|
69 |
-
<h3>Summary of the main points</h3>
|
70 |
-
<p>In conclusion, Red Ball 4 Mod 100 Square APK is a fun and challenging game for Android users who love physics-based platformers. The game has four episodes with 15 levels each, plus a level editor where you can create your own levels. The mod gives you unlimited lives and coins, customizable ball appearance, epic boss battles, cloud save and HID controller support. The mod is easy to download and install, as we have shown you in this article.</p>
|
71 |
-
<h3>Call to action</h3>
|
72 |
-
<p>If you are ready to have some fun and challenge yourself with Red Ball 4 Mod 100 Square APK, don't hesitate any longer. Download the mod now and start playing this amazing game. You will not regret it!</p>
|
73 |
-
<h2>Frequently Asked Questions</h2>
|
74 |
-
<h4>Q: Is Red Ball 4 Mod 100 Square APK free?</h4>
|
75 |
-
<p>A: Yes, Red Ball 4 Mod 100 Square APK is free to download and play. You don't need to pay anything to enjoy this game.</p>
|
76 |
-
<h4>Q: Is Red Ball 4 Mod 100 Square APK safe?</h4>
|
77 |
-
<p>A: Yes, Red Ball 4 Mod 100 Square APK is safe to use. It does not contain any viruses or malware that could harm your device or data.</p>
|
78 |
-
<h4>Q: How can I share my levels with other players?</h4>
|
79 |
-
<p>A: You can share your levels with other players by using the level editor feature in the game. You can create your own levels using various objects and obstacles, and then upload them online for others to play.</p>
|
80 |
-
<h4>Q: How can I update Red Ball 4 Mod 100 Square APK?</h4>
|
81 |
-
<p>A: You can update Red Ball 4 Mod 100 Square APK by downloading the latest version of the apk file from the same source that you used before. You don't need to uninstall the previous version, just install the new one over it.</p>
|
82 |
-
<h4>Q: What are some tips and tricks for playing Red Ball 4 Mod 100 Square APK?</h4>
|
83 |
-
<p>A: Some tips and tricks for playing Red Ball 4 Mod 100 Square APK are:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Use power-ups wisely. They can help you overcome difficult situations, but they are limited in number.</li>
|
86 |
-
<li>Collect stars in each level. They will increase your score and unlock new balls.</li>
|
87 |
-
<li>Watch out for the black squares. They are your enemies and they will try to stop you or harm you.</li>
|
88 |
-
<li>Use the environment to your advantage. You can bounce off walls, roll down slopes, and push objects to solve puzzles and reach your goal.</li>
|
89 |
-
<li>Have fun and be creative. There is no one right way to play the game. You can explore different paths and strategies to complete each level.</li>
|
90 |
-
</ul>
|
91 |
-
<p>I hope these tips and tricks will help you enjoy Red Ball 4 Mod 100 Square APK even more.</p> 197e85843d<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Dynamons World Dragon MOD APK Catch Train and Evolve Your Dynamons.md
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Dynamons World Dragon Mod APK: A Guide for RPG Fans</h1>
|
3 |
-
<p>If you are a fan of role-playing games (RPGs), you might have heard of Dynamons World, a popular and exciting game that lets you catch and train your own team of Dynamons, which are cute and powerful creatures that can fight in battles. But did you know that there is a way to make the game even more fun and rewarding? That's right, we are talking about Dynamons World Dragon Mod APK, a modified version of the original game that comes with amazing features and benefits. In this article, we will tell you everything you need to know about this mod, including what it is, how to download and install it, how to play it, and some tips and tricks to help you become the best RPG battle master in the Dynamons Kingdom.</p>
|
4 |
-
<h2>dynamons world dragon mod apk</h2><br /><p><b><b>Download Zip</b> ✶ <a href="https://jinyurl.com/2uNPMa">https://jinyurl.com/2uNPMa</a></b></p><br /><br />
|
5 |
-
<h2>What is Dynamons World?</h2>
|
6 |
-
<p>Before we dive into the details of the mod, let's first review what Dynamons World is all about. Dynamons World is an addicting, action-adventure game that puts you in the role of a Dynamons master. You can catch and train dozens of unique Dynamons, each with their own skills and abilities, and use them to fight against other players in online multiplayer battles. You can also explore an open world full of secrets, quests, and challenges, and fight against tough Captains and bosses to prove your skills.</p>
|
7 |
-
<h3>A fun and addictive RPG game</h3>
|
8 |
-
<p>Dynamons World is a game that will keep you hooked for hours, as you discover new Dynamons, level them up, evolve them, and equip them with skill cards that enhance their performance in battle. You can also customize your character with different outfits and accessories, and collect badges and trophies as you complete achievements. The game has a captivating storyline that takes you from the Dynamons Camp to the Temple Ruins, where you will face the ultimate enemy, Zenix.</p>
|
9 |
-
<h3>A huge open world to explore</h3>
|
10 |
-
<p>Dynamons World is not just about battles, it's also about adventure. The game has a huge open world that you can explore at your own pace, finding hidden items, secrets, and surprises along the way. You can visit different locations, such as forests, deserts, caves, islands, volcanoes, and more, each with their own unique environments and Dynamons. You can also interact with other characters, such as trainers, shopkeepers, scientists, and villagers, who will give you quests, tips, or items.</p>
|
11 |
-
<h3>A variety of Dynamons to catch and train</h3>
|
12 |
-
<p>One of the most appealing aspects of Dynamons World is the diversity of Dynamons that you can catch and train. There are over 50 different types of Dynamons in the game, each belonging to one of six elements: fire, water, plant, electric, dark, or dragon. Each element has its own strengths and weaknesses against other elements, so you need to choose your team wisely. Some Dynamons are more common than others, while some are very rare and hard to find. You can also evolve your Dynamons into more powerful forms when they reach a certain level.</p>
|
13 |
-
<h2>What is Dynamons World Dragon Mod APK?</h2>
|
14 |
-
<p>Now that you have a clear idea of what Dynamons World is all about, let's talk about what makes the mod version different from the original one. Dynamons World Dragon Mod APK is a modified version of the original game that <p>buff, debuff, etc. You can also switch your Dynamon with another one from your team by using the swap icon on the bottom left corner of the screen.</li>
|
15 |
-
<li>The game uses an elemental system, where each Dynamon and skill belongs to one of six elements: fire, water, plant, electric, dark, or dragon. Each element has its own strengths and weaknesses against other elements, as shown in the table below:</li>
|
16 |
-
</ul>
|
17 |
-
| Element | Strong against | Weak against | |---------|----------------|--------------| | Fire | Plant, Dark | Water, Dragon| | Water | Fire, Dragon | Plant, Electric| | Plant | Water, Electric| Fire, Dark | | Electric| Water, Dragon | Plant, Dark | | Dark | Plant, Electric| Fire, Dragon | | Dragon | Fire, Dark | Water, Electric| <p>You can use this table to plan your strategy and choose the best Dynamon and skill for each situation. For example, if you are facing a water Dynamon, you should use a plant or electric Dynamon and skill to deal more damage and take less damage.</li>
|
18 |
-
<li>The game also has a stamina system, where each skill consumes a certain amount of stamina points (SP) when used. You can see the SP cost of each skill on the bottom right corner of the skill icon. You can also see the remaining SP of your Dynamon on the top left corner of the screen. You need to manage your SP wisely and avoid running out of it during a battle. You can restore your SP by using potions or by resting at the camp.</li>
|
19 |
-
<li>The game has a level system, where your Dynamons gain experience points (XP) when they win battles or complete quests. When they gain enough XP, they level up and increase their stats, such as health, attack, defense, and speed. You can see the XP and level of your Dynamon on the top left corner of the screen. You can also see the XP bar that shows how much XP you need to reach the next level.</li>
|
20 |
-
<li>The game has an evolution system, where some Dynamons can evolve into more powerful forms when they reach a certain level. You can see if your Dynamon can evolve by tapping on it on the team menu and checking the evolution icon on the bottom right corner of the screen. You can also see what level and form your Dynamon will evolve into. When your Dynamon evolves, it changes its appearance and gains new skills and abilities.</li>
|
21 |
-
<li>The game has a skill card system, where you can equip your Dynamons with skill cards that enhance their skills in battle. You can find skill cards by winning battles, completing quests, or buying them from the shop. You can equip up to four skill cards per Dynamon by tapping on it on the team menu and selecting the skill card icon on the bottom left corner of the screen. You can also see the effect of each skill card on the skill icon.</li>
|
22 |
-
</ul>
|
23 |
-
<h3>Best dragon Dynamons to use</h3>
|
24 |
-
<p>If you are playing with the mod version of Dynamons World, you will have access to exclusive dragon Dynamons that are not available in the original version. These dragon Dynamons are very powerful and rare, and they have special skills and abilities that make them stand out from other Dynamons. Here are some of the best dragon Dynamons to use in your team:</p>
|
25 |
-
<p>dynamons world unlimited money and dragon mod apk<br />
|
26 |
-
dynamons world mod apk latest version with dragon<br />
|
27 |
-
download dynamons world dragon mod apk for android<br />
|
28 |
-
dynamons world hack mod apk unlock all dragons<br />
|
29 |
-
how to install dynamons world dragon mod apk<br />
|
30 |
-
dynamons world mod apk free shopping and dragon<br />
|
31 |
-
dynamons world mod apk offline with dragon<br />
|
32 |
-
dynamons world mod apk unlimited coins and gems and dragon<br />
|
33 |
-
dynamons world mod apk revdl with dragon<br />
|
34 |
-
dynamons world mod apk rexdl with dragon<br />
|
35 |
-
dynamons world mod apk happymod with dragon<br />
|
36 |
-
dynamons world mod apk android 1 with dragon<br />
|
37 |
-
dynamons world mod apk 2023 with dragon<br />
|
38 |
-
dynamons world mod apk no root with dragon<br />
|
39 |
-
dynamons world mod apk pure with dragon<br />
|
40 |
-
dynamons world mod apk vip with dragon<br />
|
41 |
-
dynamons world mod apk new update with dragon<br />
|
42 |
-
dynamons world mod apk old version with dragon<br />
|
43 |
-
dynamons world mod apk online with dragon<br />
|
44 |
-
dynamons world mod apk original with dragon<br />
|
45 |
-
dynamons world mod apk unlimited everything and dragon<br />
|
46 |
-
dynamons world mod apk unlimited energy and dragon<br />
|
47 |
-
dynamons world mod apk unlimited health and dragon<br />
|
48 |
-
dynamons world mod apk unlimited skills and dragon<br />
|
49 |
-
dynamons world mod apk unlimited stars and dragon<br />
|
50 |
-
dynamons world mod apk mega with dragon<br />
|
51 |
-
dynamons world mod apk premium with dragon<br />
|
52 |
-
dynamons world mod apk pro with dragon<br />
|
53 |
-
dynamons world mod apk full with dragon<br />
|
54 |
-
dynamons world mod apk cracked with dragon<br />
|
55 |
-
dynamons world mod apk cheat with dragon<br />
|
56 |
-
dynamons world mod apk generator with dragon<br />
|
57 |
-
dynamons world mod apk trainer with dragon<br />
|
58 |
-
dynamons world mod apk editor with dragon<br />
|
59 |
-
dynamons world mod apk obb with dragon<br />
|
60 |
-
dynamons world mod apk data with dragon<br />
|
61 |
-
dynamons world mod apk file with dragon<br />
|
62 |
-
dynamons world mod apk zip with dragon<br />
|
63 |
-
dynamons world mod apk rar with dragon<br />
|
64 |
-
dynamons world mod apk mediafire with dragon<br />
|
65 |
-
dynamons world mod apk google drive with dragon<br />
|
66 |
-
dynamons world mod apk zippyshare with dragon<br />
|
67 |
-
dynamons world mod apk 4shared with dragon<br />
|
68 |
-
dynamons world mod apk mega.nz with dragon<br />
|
69 |
-
dynamons world mod apk dropbox with dragon<br />
|
70 |
-
dynamons world mod apk uptodown with dragon<br />
|
71 |
-
dynamons world mod apk apkpure with dragon<br />
|
72 |
-
dynamons world mod apk apkmirror with dragon</p>
|
73 |
-
<ul>
|
74 |
-
<li>Drakomo: A fire dragon Dynamon that has high attack and speed stats. It can use skills such as Flame Burst, Dragon Claw, Inferno Blast, and Dragon Rage.</li>
|
75 |
-
<li>Hydrogon: A water dragon Dynamon that has high defense and health stats. It can use skills such as Water Pulse, Dragon Tail, Hydro Pump, and Dragon Breath.</li>
|
76 |
-
<li>Floragon: A plant dragon Dynamon that has high health and attack stats. It can use skills such as Leaf Blade, Dragon Fang, Solar Beam, and Dragon Dance.</li>
|
77 |
-
<li>Electragon: An electric dragon Dynamon that has high speed and attack stats. It can use skills such as Thunder Shock, Dragon Rush, <p>Thunderbolt, and Dragon Force.</li>
|
78 |
-
<li>Nightagon: A dark dragon Dynamon that has high attack and defense stats. It can use skills such as Dark Pulse, Dragon Bite, Shadow Ball, and Dragon Roar.</li>
|
79 |
-
</ul>
|
80 |
-
<h3>Online multiplayer mode</h3>
|
81 |
-
<p>One of the most exciting features of Dynamons World is the online multiplayer mode, where you can challenge other players from around the world in real-time battles. You can use the online icon on the top left corner of the screen to access the online mode. You can choose to play in one of three modes: ranked, friendly, or tournament. In ranked mode, you can compete with other players for points and ranks. In friendly mode, you can play with your friends or random players for fun. In tournament mode, you can join or create a tournament and fight for prizes and glory. You can also chat with other players, send them gifts, or add them as friends.</p>
|
82 |
-
<h2>Conclusion</h2>
|
83 |
-
<p>Dynamons World is a fun and addictive RPG game that lets you catch and train your own team of Dynamons and fight against other players in online multiplayer battles. You can also explore a huge open world full of secrets, quests, and challenges, and fight against tough Captains and bosses to prove your skills. If you want to make the game even more fun and rewarding, you can try Dynamons World Dragon Mod APK, a modified version of the original game that comes with amazing features and benefits, such as unlimited money and gems, unlocked everything, mod menu and free shopping, and exclusive dragon Dynamons. You can download and install the mod easily and safely by following the steps in this article. You can also use the tips and tricks in this article to help you play the mod and become the best RPG battle master in the Dynamons Kingdom.</p>
|
84 |
-
<h2>FAQs</h2>
|
85 |
-
<p>Here are some of the frequently asked questions about Dynamons World Dragon Mod APK:</p>
|
86 |
-
<ul>
|
87 |
-
<li>Q: Is Dynamons World Dragon Mod APK safe to use?</li>
|
88 |
-
<li>A: Yes, it is safe to use as long as you download it from a trusted source and follow the requirements and precautions mentioned in this article.</li>
|
89 |
-
<li>Q: Do I need to root my device to use Dynamons World Dragon Mod APK?</li>
|
90 |
-
<li>A: No, you don't need to root your device to use the mod. You just need to enable unknown sources on your device settings.</li>
|
91 |
-
<li>Q: Will I get banned from playing online if I use Dynamons World Dragon Mod APK?</li>
|
92 |
-
<li>A: No, you won't get banned from playing online if you use the mod. The mod is undetectable by the game servers and does not affect other players' experience.</li>
|
93 |
-
<li>Q: Can I update Dynamons World Dragon Mod APK when a new version of the original game is released?</li>
|
94 |
-
<li>A: Yes, you can update the mod when a new version of the original game is released. However, you need to wait for the mod developers to release a new version of the mod that is compatible with the new version of the original game.</li>
|
95 |
-
<li>Q: Can I play Dynamons World Dragon Mod APK offline?</li>
|
96 |
-
<li>A: Yes, you can play the mod offline without an internet connection. However, you won't be able to access some features that require an internet connection, such as online multiplayer mode.</li>
|
97 |
-
</ul></p> 197e85843d<br />
|
98 |
-
<br />
|
99 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/__init__.py
DELETED
File without changes
|
spaces/4Taps/SadTalker/src/facerender/modules/mapping.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import torch.nn.functional as F
|
6 |
-
|
7 |
-
|
8 |
-
class MappingNet(nn.Module):
|
9 |
-
def __init__(self, coeff_nc, descriptor_nc, layer, num_kp, num_bins):
|
10 |
-
super( MappingNet, self).__init__()
|
11 |
-
|
12 |
-
self.layer = layer
|
13 |
-
nonlinearity = nn.LeakyReLU(0.1)
|
14 |
-
|
15 |
-
self.first = nn.Sequential(
|
16 |
-
torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True))
|
17 |
-
|
18 |
-
for i in range(layer):
|
19 |
-
net = nn.Sequential(nonlinearity,
|
20 |
-
torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3))
|
21 |
-
setattr(self, 'encoder' + str(i), net)
|
22 |
-
|
23 |
-
self.pooling = nn.AdaptiveAvgPool1d(1)
|
24 |
-
self.output_nc = descriptor_nc
|
25 |
-
|
26 |
-
self.fc_roll = nn.Linear(descriptor_nc, num_bins)
|
27 |
-
self.fc_pitch = nn.Linear(descriptor_nc, num_bins)
|
28 |
-
self.fc_yaw = nn.Linear(descriptor_nc, num_bins)
|
29 |
-
self.fc_t = nn.Linear(descriptor_nc, 3)
|
30 |
-
self.fc_exp = nn.Linear(descriptor_nc, 3*num_kp)
|
31 |
-
|
32 |
-
def forward(self, input_3dmm):
|
33 |
-
out = self.first(input_3dmm)
|
34 |
-
for i in range(self.layer):
|
35 |
-
model = getattr(self, 'encoder' + str(i))
|
36 |
-
out = model(out) + out[:,:,3:-3]
|
37 |
-
out = self.pooling(out)
|
38 |
-
out = out.view(out.shape[0], -1)
|
39 |
-
#print('out:', out.shape)
|
40 |
-
|
41 |
-
yaw = self.fc_yaw(out)
|
42 |
-
pitch = self.fc_pitch(out)
|
43 |
-
roll = self.fc_roll(out)
|
44 |
-
t = self.fc_t(out)
|
45 |
-
exp = self.fc_exp(out)
|
46 |
-
|
47 |
-
return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIBoy1993/segment_anything_webui/README.md
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Segment Anything
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.25.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
# Segment Anything WebUI
|
13 |
-
|
14 |
-
[](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui?duplicate=true)
|
15 |
-
[](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui?duplicate=true)
|
16 |
-
|
17 |
-
This project is based on **[Segment Anything Model](https://segment-anything.com/)** by Meta. The UI is based on [Gradio](https://gradio.app/).
|
18 |
-
|
19 |
-
- Try deme on HF: [AIBoy1993/segment_anything_webui](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui)
|
20 |
-
- [GitHub](https://github.com/5663015/segment_anything_webui)
|
21 |
-
|
22 |
-

|
23 |
-
|
24 |
-
## Change Logs
|
25 |
-
|
26 |
-
- [2023-4-11]
|
27 |
-
- Support video segmentation. A short video can be automatically segmented by SAM.
|
28 |
-
- Support text prompt segmentation using [OWL-ViT](https://huggingface.co/docs/transformers/v4.27.2/en/model_doc/owlvit#overview) (Vision Transformer for Open-World Localization) model.
|
29 |
-
|
30 |
-
|
31 |
-
## **Usage**
|
32 |
-
|
33 |
-
Following usage is running on your computer.
|
34 |
-
|
35 |
-
- Install Segment Anything([more details about install Segment Anything](https://github.com/facebookresearch/segment-anything#installation)):
|
36 |
-
|
37 |
-
```
|
38 |
-
pip install git+https://github.com/facebookresearch/segment-anything.git
|
39 |
-
```
|
40 |
-
|
41 |
-
- `git clone` this repository:
|
42 |
-
|
43 |
-
```
|
44 |
-
git clone https://github.com/5663015/segment_anything_webui.git
|
45 |
-
```
|
46 |
-
|
47 |
-
- Make a new folder named `checkpoints` under this project,and put the downloaded weights files in `checkpoints`。You can download the weights using following URLs:
|
48 |
-
|
49 |
-
- `vit_h`: [ViT-H SAM model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth)
|
50 |
-
|
51 |
-
- `vit_l`: [ViT-L SAM model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth)
|
52 |
-
|
53 |
-
- `vit_b`: [ViT-B SAM model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth)
|
54 |
-
|
55 |
-
- Under `checkpoints`, make a new folder named `models--google--owlvit-base-patch32`, and put the downloaded [OWL-ViT weights](https://huggingface.co/google/owlvit-base-patch32) files in `models--google--owlvit-base-patch32`.
|
56 |
-
- Run:
|
57 |
-
|
58 |
-
```
|
59 |
-
python app.py
|
60 |
-
```
|
61 |
-
|
62 |
-
**Note:** Default model is `vit_b`,the demo can run on CPU. Default device is `cpu`。
|
63 |
-
|
64 |
-
## TODO
|
65 |
-
|
66 |
-
- [x] Video segmentation
|
67 |
-
|
68 |
-
- [x] Add text prompt
|
69 |
-
|
70 |
-
- [ ] Add segmentation prompt (point and box)
|
71 |
-
|
72 |
-
## Reference
|
73 |
-
|
74 |
-
- Thanks to the wonderful work [Segment Anything](https://segment-anything.com/) and [OWL-ViT](https://arxiv.org/abs/2205.06230)
|
75 |
-
- Some video processing code references [kadirnar/segment-anything-video](https://github.com/kadirnar/segment-anything-video), and some OWL-ViT code references [ngthanhtin/owlvit_segment_anything](https://github.com/ngthanhtin/owlvit_segment_anything).
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/grids/compression/debug.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Grid search file, simply list all the exp you want in `explorer`.
|
9 |
-
Any new exp added there will be scheduled.
|
10 |
-
You can cancel and experiment by commenting its line.
|
11 |
-
|
12 |
-
This grid is a minimal example for debugging compression task
|
13 |
-
and how to override parameters directly in a grid.
|
14 |
-
Learn more about dora grids: https://github.com/facebookresearch/dora
|
15 |
-
"""
|
16 |
-
|
17 |
-
from ._explorers import CompressionExplorer
|
18 |
-
from ...environment import AudioCraftEnvironment
|
19 |
-
|
20 |
-
|
21 |
-
@CompressionExplorer
|
22 |
-
def explorer(launcher):
|
23 |
-
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
|
24 |
-
launcher.slurm_(gpus=2, partition=partitions)
|
25 |
-
launcher.bind_(solver='compression/debug')
|
26 |
-
|
27 |
-
with launcher.job_array():
|
28 |
-
# base debug task using config from solver=compression/debug
|
29 |
-
launcher()
|
30 |
-
# we can override parameters in the grid to launch additional xps
|
31 |
-
launcher({'rvq.bins': 2048, 'rvq.n_q': 4})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py
DELETED
@@ -1,434 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2019 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""Parallel WaveGAN Modules."""
|
7 |
-
|
8 |
-
import logging
|
9 |
-
import math
|
10 |
-
|
11 |
-
import torch
|
12 |
-
from torch import nn
|
13 |
-
|
14 |
-
from modules.parallel_wavegan.layers import Conv1d
|
15 |
-
from modules.parallel_wavegan.layers import Conv1d1x1
|
16 |
-
from modules.parallel_wavegan.layers import ResidualBlock
|
17 |
-
from modules.parallel_wavegan.layers import upsample
|
18 |
-
from modules.parallel_wavegan import models
|
19 |
-
|
20 |
-
|
21 |
-
class ParallelWaveGANGenerator(torch.nn.Module):
|
22 |
-
"""Parallel WaveGAN Generator module."""
|
23 |
-
|
24 |
-
def __init__(self,
|
25 |
-
in_channels=1,
|
26 |
-
out_channels=1,
|
27 |
-
kernel_size=3,
|
28 |
-
layers=30,
|
29 |
-
stacks=3,
|
30 |
-
residual_channels=64,
|
31 |
-
gate_channels=128,
|
32 |
-
skip_channels=64,
|
33 |
-
aux_channels=80,
|
34 |
-
aux_context_window=2,
|
35 |
-
dropout=0.0,
|
36 |
-
bias=True,
|
37 |
-
use_weight_norm=True,
|
38 |
-
use_causal_conv=False,
|
39 |
-
upsample_conditional_features=True,
|
40 |
-
upsample_net="ConvInUpsampleNetwork",
|
41 |
-
upsample_params={"upsample_scales": [4, 4, 4, 4]},
|
42 |
-
use_pitch_embed=False,
|
43 |
-
):
|
44 |
-
"""Initialize Parallel WaveGAN Generator module.
|
45 |
-
|
46 |
-
Args:
|
47 |
-
in_channels (int): Number of input channels.
|
48 |
-
out_channels (int): Number of output channels.
|
49 |
-
kernel_size (int): Kernel size of dilated convolution.
|
50 |
-
layers (int): Number of residual block layers.
|
51 |
-
stacks (int): Number of stacks i.e., dilation cycles.
|
52 |
-
residual_channels (int): Number of channels in residual conv.
|
53 |
-
gate_channels (int): Number of channels in gated conv.
|
54 |
-
skip_channels (int): Number of channels in skip conv.
|
55 |
-
aux_channels (int): Number of channels for auxiliary feature conv.
|
56 |
-
aux_context_window (int): Context window size for auxiliary feature.
|
57 |
-
dropout (float): Dropout rate. 0.0 means no dropout applied.
|
58 |
-
bias (bool): Whether to use bias parameter in conv layer.
|
59 |
-
use_weight_norm (bool): Whether to use weight norm.
|
60 |
-
If set to true, it will be applied to all of the conv layers.
|
61 |
-
use_causal_conv (bool): Whether to use causal structure.
|
62 |
-
upsample_conditional_features (bool): Whether to use upsampling network.
|
63 |
-
upsample_net (str): Upsampling network architecture.
|
64 |
-
upsample_params (dict): Upsampling network parameters.
|
65 |
-
|
66 |
-
"""
|
67 |
-
super(ParallelWaveGANGenerator, self).__init__()
|
68 |
-
self.in_channels = in_channels
|
69 |
-
self.out_channels = out_channels
|
70 |
-
self.aux_channels = aux_channels
|
71 |
-
self.layers = layers
|
72 |
-
self.stacks = stacks
|
73 |
-
self.kernel_size = kernel_size
|
74 |
-
|
75 |
-
# check the number of layers and stacks
|
76 |
-
assert layers % stacks == 0
|
77 |
-
layers_per_stack = layers // stacks
|
78 |
-
|
79 |
-
# define first convolution
|
80 |
-
self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True)
|
81 |
-
|
82 |
-
# define conv + upsampling network
|
83 |
-
if upsample_conditional_features:
|
84 |
-
upsample_params.update({
|
85 |
-
"use_causal_conv": use_causal_conv,
|
86 |
-
})
|
87 |
-
if upsample_net == "MelGANGenerator":
|
88 |
-
assert aux_context_window == 0
|
89 |
-
upsample_params.update({
|
90 |
-
"use_weight_norm": False, # not to apply twice
|
91 |
-
"use_final_nonlinear_activation": False,
|
92 |
-
})
|
93 |
-
self.upsample_net = getattr(models, upsample_net)(**upsample_params)
|
94 |
-
else:
|
95 |
-
if upsample_net == "ConvInUpsampleNetwork":
|
96 |
-
upsample_params.update({
|
97 |
-
"aux_channels": aux_channels,
|
98 |
-
"aux_context_window": aux_context_window,
|
99 |
-
})
|
100 |
-
self.upsample_net = getattr(upsample, upsample_net)(**upsample_params)
|
101 |
-
else:
|
102 |
-
self.upsample_net = None
|
103 |
-
|
104 |
-
# define residual blocks
|
105 |
-
self.conv_layers = torch.nn.ModuleList()
|
106 |
-
for layer in range(layers):
|
107 |
-
dilation = 2 ** (layer % layers_per_stack)
|
108 |
-
conv = ResidualBlock(
|
109 |
-
kernel_size=kernel_size,
|
110 |
-
residual_channels=residual_channels,
|
111 |
-
gate_channels=gate_channels,
|
112 |
-
skip_channels=skip_channels,
|
113 |
-
aux_channels=aux_channels,
|
114 |
-
dilation=dilation,
|
115 |
-
dropout=dropout,
|
116 |
-
bias=bias,
|
117 |
-
use_causal_conv=use_causal_conv,
|
118 |
-
)
|
119 |
-
self.conv_layers += [conv]
|
120 |
-
|
121 |
-
# define output layers
|
122 |
-
self.last_conv_layers = torch.nn.ModuleList([
|
123 |
-
torch.nn.ReLU(inplace=True),
|
124 |
-
Conv1d1x1(skip_channels, skip_channels, bias=True),
|
125 |
-
torch.nn.ReLU(inplace=True),
|
126 |
-
Conv1d1x1(skip_channels, out_channels, bias=True),
|
127 |
-
])
|
128 |
-
|
129 |
-
self.use_pitch_embed = use_pitch_embed
|
130 |
-
if use_pitch_embed:
|
131 |
-
self.pitch_embed = nn.Embedding(300, aux_channels, 0)
|
132 |
-
self.c_proj = nn.Linear(2 * aux_channels, aux_channels)
|
133 |
-
|
134 |
-
# apply weight norm
|
135 |
-
if use_weight_norm:
|
136 |
-
self.apply_weight_norm()
|
137 |
-
|
138 |
-
def forward(self, x, c=None, pitch=None, **kwargs):
|
139 |
-
"""Calculate forward propagation.
|
140 |
-
|
141 |
-
Args:
|
142 |
-
x (Tensor): Input noise signal (B, C_in, T).
|
143 |
-
c (Tensor): Local conditioning auxiliary features (B, C ,T').
|
144 |
-
pitch (Tensor): Local conditioning pitch (B, T').
|
145 |
-
|
146 |
-
Returns:
|
147 |
-
Tensor: Output tensor (B, C_out, T)
|
148 |
-
|
149 |
-
"""
|
150 |
-
# perform upsampling
|
151 |
-
if c is not None and self.upsample_net is not None:
|
152 |
-
if self.use_pitch_embed:
|
153 |
-
p = self.pitch_embed(pitch)
|
154 |
-
c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2)
|
155 |
-
c = self.upsample_net(c)
|
156 |
-
assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1))
|
157 |
-
|
158 |
-
# encode to hidden representation
|
159 |
-
x = self.first_conv(x)
|
160 |
-
skips = 0
|
161 |
-
for f in self.conv_layers:
|
162 |
-
x, h = f(x, c)
|
163 |
-
skips += h
|
164 |
-
skips *= math.sqrt(1.0 / len(self.conv_layers))
|
165 |
-
|
166 |
-
# apply final layers
|
167 |
-
x = skips
|
168 |
-
for f in self.last_conv_layers:
|
169 |
-
x = f(x)
|
170 |
-
|
171 |
-
return x
|
172 |
-
|
173 |
-
def remove_weight_norm(self):
|
174 |
-
"""Remove weight normalization module from all of the layers."""
|
175 |
-
def _remove_weight_norm(m):
|
176 |
-
try:
|
177 |
-
logging.debug(f"Weight norm is removed from {m}.")
|
178 |
-
torch.nn.utils.remove_weight_norm(m)
|
179 |
-
except ValueError: # this module didn't have weight norm
|
180 |
-
return
|
181 |
-
|
182 |
-
self.apply(_remove_weight_norm)
|
183 |
-
|
184 |
-
def apply_weight_norm(self):
|
185 |
-
"""Apply weight normalization module from all of the layers."""
|
186 |
-
def _apply_weight_norm(m):
|
187 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
|
188 |
-
torch.nn.utils.weight_norm(m)
|
189 |
-
logging.debug(f"Weight norm is applied to {m}.")
|
190 |
-
|
191 |
-
self.apply(_apply_weight_norm)
|
192 |
-
|
193 |
-
@staticmethod
|
194 |
-
def _get_receptive_field_size(layers, stacks, kernel_size,
|
195 |
-
dilation=lambda x: 2 ** x):
|
196 |
-
assert layers % stacks == 0
|
197 |
-
layers_per_cycle = layers // stacks
|
198 |
-
dilations = [dilation(i % layers_per_cycle) for i in range(layers)]
|
199 |
-
return (kernel_size - 1) * sum(dilations) + 1
|
200 |
-
|
201 |
-
@property
|
202 |
-
def receptive_field_size(self):
|
203 |
-
"""Return receptive field size."""
|
204 |
-
return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size)
|
205 |
-
|
206 |
-
|
207 |
-
class ParallelWaveGANDiscriminator(torch.nn.Module):
|
208 |
-
"""Parallel WaveGAN Discriminator module."""
|
209 |
-
|
210 |
-
def __init__(self,
|
211 |
-
in_channels=1,
|
212 |
-
out_channels=1,
|
213 |
-
kernel_size=3,
|
214 |
-
layers=10,
|
215 |
-
conv_channels=64,
|
216 |
-
dilation_factor=1,
|
217 |
-
nonlinear_activation="LeakyReLU",
|
218 |
-
nonlinear_activation_params={"negative_slope": 0.2},
|
219 |
-
bias=True,
|
220 |
-
use_weight_norm=True,
|
221 |
-
):
|
222 |
-
"""Initialize Parallel WaveGAN Discriminator module.
|
223 |
-
|
224 |
-
Args:
|
225 |
-
in_channels (int): Number of input channels.
|
226 |
-
out_channels (int): Number of output channels.
|
227 |
-
kernel_size (int): Number of output channels.
|
228 |
-
layers (int): Number of conv layers.
|
229 |
-
conv_channels (int): Number of chnn layers.
|
230 |
-
dilation_factor (int): Dilation factor. For example, if dilation_factor = 2,
|
231 |
-
the dilation will be 2, 4, 8, ..., and so on.
|
232 |
-
nonlinear_activation (str): Nonlinear function after each conv.
|
233 |
-
nonlinear_activation_params (dict): Nonlinear function parameters
|
234 |
-
bias (bool): Whether to use bias parameter in conv.
|
235 |
-
use_weight_norm (bool) Whether to use weight norm.
|
236 |
-
If set to true, it will be applied to all of the conv layers.
|
237 |
-
|
238 |
-
"""
|
239 |
-
super(ParallelWaveGANDiscriminator, self).__init__()
|
240 |
-
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
|
241 |
-
assert dilation_factor > 0, "Dilation factor must be > 0."
|
242 |
-
self.conv_layers = torch.nn.ModuleList()
|
243 |
-
conv_in_channels = in_channels
|
244 |
-
for i in range(layers - 1):
|
245 |
-
if i == 0:
|
246 |
-
dilation = 1
|
247 |
-
else:
|
248 |
-
dilation = i if dilation_factor == 1 else dilation_factor ** i
|
249 |
-
conv_in_channels = conv_channels
|
250 |
-
padding = (kernel_size - 1) // 2 * dilation
|
251 |
-
conv_layer = [
|
252 |
-
Conv1d(conv_in_channels, conv_channels,
|
253 |
-
kernel_size=kernel_size, padding=padding,
|
254 |
-
dilation=dilation, bias=bias),
|
255 |
-
getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params)
|
256 |
-
]
|
257 |
-
self.conv_layers += conv_layer
|
258 |
-
padding = (kernel_size - 1) // 2
|
259 |
-
last_conv_layer = Conv1d(
|
260 |
-
conv_in_channels, out_channels,
|
261 |
-
kernel_size=kernel_size, padding=padding, bias=bias)
|
262 |
-
self.conv_layers += [last_conv_layer]
|
263 |
-
|
264 |
-
# apply weight norm
|
265 |
-
if use_weight_norm:
|
266 |
-
self.apply_weight_norm()
|
267 |
-
|
268 |
-
def forward(self, x):
|
269 |
-
"""Calculate forward propagation.
|
270 |
-
|
271 |
-
Args:
|
272 |
-
x (Tensor): Input noise signal (B, 1, T).
|
273 |
-
|
274 |
-
Returns:
|
275 |
-
Tensor: Output tensor (B, 1, T)
|
276 |
-
|
277 |
-
"""
|
278 |
-
for f in self.conv_layers:
|
279 |
-
x = f(x)
|
280 |
-
return x
|
281 |
-
|
282 |
-
def apply_weight_norm(self):
|
283 |
-
"""Apply weight normalization module from all of the layers."""
|
284 |
-
def _apply_weight_norm(m):
|
285 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
|
286 |
-
torch.nn.utils.weight_norm(m)
|
287 |
-
logging.debug(f"Weight norm is applied to {m}.")
|
288 |
-
|
289 |
-
self.apply(_apply_weight_norm)
|
290 |
-
|
291 |
-
def remove_weight_norm(self):
|
292 |
-
"""Remove weight normalization module from all of the layers."""
|
293 |
-
def _remove_weight_norm(m):
|
294 |
-
try:
|
295 |
-
logging.debug(f"Weight norm is removed from {m}.")
|
296 |
-
torch.nn.utils.remove_weight_norm(m)
|
297 |
-
except ValueError: # this module didn't have weight norm
|
298 |
-
return
|
299 |
-
|
300 |
-
self.apply(_remove_weight_norm)
|
301 |
-
|
302 |
-
|
303 |
-
class ResidualParallelWaveGANDiscriminator(torch.nn.Module):
|
304 |
-
"""Parallel WaveGAN Discriminator module."""
|
305 |
-
|
306 |
-
def __init__(self,
|
307 |
-
in_channels=1,
|
308 |
-
out_channels=1,
|
309 |
-
kernel_size=3,
|
310 |
-
layers=30,
|
311 |
-
stacks=3,
|
312 |
-
residual_channels=64,
|
313 |
-
gate_channels=128,
|
314 |
-
skip_channels=64,
|
315 |
-
dropout=0.0,
|
316 |
-
bias=True,
|
317 |
-
use_weight_norm=True,
|
318 |
-
use_causal_conv=False,
|
319 |
-
nonlinear_activation="LeakyReLU",
|
320 |
-
nonlinear_activation_params={"negative_slope": 0.2},
|
321 |
-
):
|
322 |
-
"""Initialize Parallel WaveGAN Discriminator module.
|
323 |
-
|
324 |
-
Args:
|
325 |
-
in_channels (int): Number of input channels.
|
326 |
-
out_channels (int): Number of output channels.
|
327 |
-
kernel_size (int): Kernel size of dilated convolution.
|
328 |
-
layers (int): Number of residual block layers.
|
329 |
-
stacks (int): Number of stacks i.e., dilation cycles.
|
330 |
-
residual_channels (int): Number of channels in residual conv.
|
331 |
-
gate_channels (int): Number of channels in gated conv.
|
332 |
-
skip_channels (int): Number of channels in skip conv.
|
333 |
-
dropout (float): Dropout rate. 0.0 means no dropout applied.
|
334 |
-
bias (bool): Whether to use bias parameter in conv.
|
335 |
-
use_weight_norm (bool): Whether to use weight norm.
|
336 |
-
If set to true, it will be applied to all of the conv layers.
|
337 |
-
use_causal_conv (bool): Whether to use causal structure.
|
338 |
-
nonlinear_activation_params (dict): Nonlinear function parameters
|
339 |
-
|
340 |
-
"""
|
341 |
-
super(ResidualParallelWaveGANDiscriminator, self).__init__()
|
342 |
-
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
|
343 |
-
|
344 |
-
self.in_channels = in_channels
|
345 |
-
self.out_channels = out_channels
|
346 |
-
self.layers = layers
|
347 |
-
self.stacks = stacks
|
348 |
-
self.kernel_size = kernel_size
|
349 |
-
|
350 |
-
# check the number of layers and stacks
|
351 |
-
assert layers % stacks == 0
|
352 |
-
layers_per_stack = layers // stacks
|
353 |
-
|
354 |
-
# define first convolution
|
355 |
-
self.first_conv = torch.nn.Sequential(
|
356 |
-
Conv1d1x1(in_channels, residual_channels, bias=True),
|
357 |
-
getattr(torch.nn, nonlinear_activation)(
|
358 |
-
inplace=True, **nonlinear_activation_params),
|
359 |
-
)
|
360 |
-
|
361 |
-
# define residual blocks
|
362 |
-
self.conv_layers = torch.nn.ModuleList()
|
363 |
-
for layer in range(layers):
|
364 |
-
dilation = 2 ** (layer % layers_per_stack)
|
365 |
-
conv = ResidualBlock(
|
366 |
-
kernel_size=kernel_size,
|
367 |
-
residual_channels=residual_channels,
|
368 |
-
gate_channels=gate_channels,
|
369 |
-
skip_channels=skip_channels,
|
370 |
-
aux_channels=-1,
|
371 |
-
dilation=dilation,
|
372 |
-
dropout=dropout,
|
373 |
-
bias=bias,
|
374 |
-
use_causal_conv=use_causal_conv,
|
375 |
-
)
|
376 |
-
self.conv_layers += [conv]
|
377 |
-
|
378 |
-
# define output layers
|
379 |
-
self.last_conv_layers = torch.nn.ModuleList([
|
380 |
-
getattr(torch.nn, nonlinear_activation)(
|
381 |
-
inplace=True, **nonlinear_activation_params),
|
382 |
-
Conv1d1x1(skip_channels, skip_channels, bias=True),
|
383 |
-
getattr(torch.nn, nonlinear_activation)(
|
384 |
-
inplace=True, **nonlinear_activation_params),
|
385 |
-
Conv1d1x1(skip_channels, out_channels, bias=True),
|
386 |
-
])
|
387 |
-
|
388 |
-
# apply weight norm
|
389 |
-
if use_weight_norm:
|
390 |
-
self.apply_weight_norm()
|
391 |
-
|
392 |
-
def forward(self, x):
|
393 |
-
"""Calculate forward propagation.
|
394 |
-
|
395 |
-
Args:
|
396 |
-
x (Tensor): Input noise signal (B, 1, T).
|
397 |
-
|
398 |
-
Returns:
|
399 |
-
Tensor: Output tensor (B, 1, T)
|
400 |
-
|
401 |
-
"""
|
402 |
-
x = self.first_conv(x)
|
403 |
-
|
404 |
-
skips = 0
|
405 |
-
for f in self.conv_layers:
|
406 |
-
x, h = f(x, None)
|
407 |
-
skips += h
|
408 |
-
skips *= math.sqrt(1.0 / len(self.conv_layers))
|
409 |
-
|
410 |
-
# apply final layers
|
411 |
-
x = skips
|
412 |
-
for f in self.last_conv_layers:
|
413 |
-
x = f(x)
|
414 |
-
return x
|
415 |
-
|
416 |
-
def apply_weight_norm(self):
|
417 |
-
"""Apply weight normalization module from all of the layers."""
|
418 |
-
def _apply_weight_norm(m):
|
419 |
-
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
|
420 |
-
torch.nn.utils.weight_norm(m)
|
421 |
-
logging.debug(f"Weight norm is applied to {m}.")
|
422 |
-
|
423 |
-
self.apply(_apply_weight_norm)
|
424 |
-
|
425 |
-
def remove_weight_norm(self):
|
426 |
-
"""Remove weight normalization module from all of the layers."""
|
427 |
-
def _remove_weight_norm(m):
|
428 |
-
try:
|
429 |
-
logging.debug(f"Weight norm is removed from {m}.")
|
430 |
-
torch.nn.utils.remove_weight_norm(m)
|
431 |
-
except ValueError: # this module didn't have weight norm
|
432 |
-
return
|
433 |
-
|
434 |
-
self.apply(_remove_weight_norm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhaykoul/Wikipedia/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Wikipedia
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.28.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/app.py
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
# https://planogram-compliance.herokuapp.com/
|
2 |
-
# https://dashboard.heroku.com/apps/planogram-compliance/deploy/heroku-git
|
3 |
-
|
4 |
-
# https://medium.com/@mohcufe/how-to-deploy-your-trained-pytorch-model-on-heroku-ff4b73085ddd\
|
5 |
-
# https://stackoverflow.com/questions/51730880/where-do-i-get-a-cpu-only-version-of-pytorch
|
6 |
-
# https://blog.jcharistech.com/2020/02/26/how-to-deploy-a-face-detection-streamlit-app-on-heroku/
|
7 |
-
# https://towardsdatascience.com/a-quick-tutorial-on-how-to-deploy-your-streamlit-app-to-heroku-
|
8 |
-
# https://www.analyticsvidhya.com/blog/2021/06/deploy-your-ml-dl-streamlit-application-on-heroku/
|
9 |
-
# https://gist.github.com/jeremyjordan/6b506257509e8ba673f145baa568a1ea
|
10 |
-
|
11 |
-
import json
|
12 |
-
|
13 |
-
# https://www.r-bloggers.com/2020/12/creating-a-streamlit-web-app-building-with-docker-github-actions-and-hosting-on-heroku/
|
14 |
-
# https://devcenter.heroku.com/articles/container-registry-and-runtime
|
15 |
-
# from yolo_inference_util import run_yolo_v5
|
16 |
-
import os
|
17 |
-
from tempfile import NamedTemporaryFile
|
18 |
-
|
19 |
-
import cv2
|
20 |
-
import numpy as np
|
21 |
-
import pandas as pd
|
22 |
-
import streamlit as st
|
23 |
-
|
24 |
-
# import matplotlib.pyplot as plt
|
25 |
-
from app_utils import annotate_planogram_compliance, bucket_sort, do_sorting, xml_to_csv
|
26 |
-
from inference import run
|
27 |
-
|
28 |
-
# from utils.plots import Annotator, colors
|
29 |
-
# from utils.general import scale_coords
|
30 |
-
|
31 |
-
app_formal_name = "Planogram Compliance"
|
32 |
-
|
33 |
-
FILE_UPLOAD_DIR = "tmp"
|
34 |
-
|
35 |
-
os.makedirs(FILE_UPLOAD_DIR, exist_ok=True)
|
36 |
-
# Start the app in wide-mode
|
37 |
-
st.set_page_config(
|
38 |
-
layout="wide",
|
39 |
-
page_title=app_formal_name,
|
40 |
-
)
|
41 |
-
# https://github.com/streamlit/streamlit/issues/1361
|
42 |
-
uploaded_file = st.file_uploader(
|
43 |
-
"Choose a planogram image to score",
|
44 |
-
type=["jpg", "JPEG", "PNG", "JPG", "jpeg"],
|
45 |
-
)
|
46 |
-
uploaded_master_planogram_file = st.file_uploader(
|
47 |
-
"Upload a master planogram", type=["jpg", "JPEG", "PNG", "JPG", "jpeg"]
|
48 |
-
)
|
49 |
-
annotation_file = st.file_uploader("upload master polanogram", type=["xml"])
|
50 |
-
temp_file = NamedTemporaryFile(delete=False)
|
51 |
-
|
52 |
-
target_names = [
|
53 |
-
"Bottle,100PLUS ACTIVE 1.5L",
|
54 |
-
"Bottle,100PLUS ACTIVE 500ML",
|
55 |
-
"Bottle,100PLUS LEMON LIME 1.5L",
|
56 |
-
"Bottle,100PLUS ORANGE 500ML",
|
57 |
-
"Bottle,100PLUS ORIGINAL 1.5L",
|
58 |
-
"Bottle,100PLUS TANGY ORANGE 1.5L",
|
59 |
-
"Bottle,100PLUS ZERO 1.5L",
|
60 |
-
"Bottle,100PLUS ZERO 500ML",
|
61 |
-
"Packet,F:M MAGNOLIA CHOC 1L",
|
62 |
-
"Bottle,F&N GINGER ADE 1.5L",
|
63 |
-
"Bottle,F&N GRAPE 1.5L",
|
64 |
-
"Bottle,F&N ICE CREAM SODA 1.5L",
|
65 |
-
"Bottle,F&N LYCHEE PEAR 1.5L",
|
66 |
-
"Bottle,F&N ORANGE 1.5L",
|
67 |
-
"Bottle,F&N PINEAPPLE PET 1.5L",
|
68 |
-
"Bottle,F&N SARSI 1.5L",
|
69 |
-
"Bottle,F&N SS ICE LEM TEA RS 500ML",
|
70 |
-
"Bottle,F&N SS ICE LEMON TEA RS 1.5L",
|
71 |
-
"Bottle,F&N SS ICE LEMON TEA 1.5L",
|
72 |
-
"Bottle,F&N SS ICE LEMON TEA 500ML",
|
73 |
-
"Bottle,F&N SS ICE PEACH TEA 1.5L",
|
74 |
-
"Bottle,SS ICE LEMON GT 1.48L",
|
75 |
-
"Bottle,SS WHITE CHRYS TEA 1.48L",
|
76 |
-
"Packet,FARMHOUSE FRESH MILK 1L FNDM",
|
77 |
-
"Packet,FARMHOUSE PLAIN LF 1L",
|
78 |
-
"Packet,PURA FRESH MILK 1L FS",
|
79 |
-
"Packet,NUTRISOY REG NO SUGAR ADDED 1L",
|
80 |
-
"Packet,NUTRISOY PLAIN 475ML",
|
81 |
-
"Packet,NUTRISOY PLAIN 1L",
|
82 |
-
"Packet,NUTRISOY OMEGA RD SUGAR 1L",
|
83 |
-
"Packet,NUTRISOY OMEGA NSA 1L",
|
84 |
-
"Packet,NUTRISOY ALMOND 1L",
|
85 |
-
"Packet,MAGNOLIA FRESH MILK 1L FNDM",
|
86 |
-
"Packet,FM MAG FC PLAIN 200ML",
|
87 |
-
"Packet,MAG OMEGA PLUS PLAIN 200ML",
|
88 |
-
"Packet,MAG KURMA MILK 500ML",
|
89 |
-
"Packet,MAG KURMA MILK 1L",
|
90 |
-
"Packet,MAG CHOCOLATE FC 500ML",
|
91 |
-
"Packet,MAG BROWN SUGAR SS MILK 1L",
|
92 |
-
"Packet,FM MAG LFHC PLN 500ML",
|
93 |
-
"Packet,FM MAG LFHC OAT 500ML",
|
94 |
-
"Packet,FM MAG LFHC OAT 1L",
|
95 |
-
"Packet,FM MAG FC PLAIN 500ML",
|
96 |
-
"Void,PARTIAL VOID",
|
97 |
-
"Void,FULL VOID",
|
98 |
-
"Bottle,F&N SS ICE LEM TEA 500ML",
|
99 |
-
]
|
100 |
-
|
101 |
-
run_app = st.button("Run the compliance check")
|
102 |
-
if run_app and uploaded_file is not None:
|
103 |
-
# Convert the file to an opencv image.
|
104 |
-
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
105 |
-
temp_file.write(uploaded_file.getvalue())
|
106 |
-
uploaded_img = cv2.imdecode(file_bytes, 1)
|
107 |
-
cv2.imwrite("tmp/to_score_planogram_tmp.png", uploaded_img)
|
108 |
-
|
109 |
-
# if uploaded_master_planogram_file is None:
|
110 |
-
# master = cv2.imread('./sample_master_planogram.jpeg')
|
111 |
-
|
112 |
-
names_dict = {name: id for id, name in enumerate(target_names)}
|
113 |
-
|
114 |
-
sorted_xml_df = None
|
115 |
-
# https://discuss.streamlit.io/t/unable-to-read-files-using-standard-file-uploader/2258/2
|
116 |
-
if uploaded_master_planogram_file and annotation_file:
|
117 |
-
file_bytes = np.asarray(
|
118 |
-
bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8
|
119 |
-
)
|
120 |
-
master = cv2.imdecode(file_bytes, 1)
|
121 |
-
cv2.imwrite("tmp/master_tmp.png", master)
|
122 |
-
# cv2.imwrite("tmp_uploaded_master_planogram_img.png", master)
|
123 |
-
# xml = annotation_file.read()
|
124 |
-
# tmp_xml ="tmp_xml_annotation.xml"
|
125 |
-
# with open(tmp_xml ,'w',encoding='utf-8') as f:
|
126 |
-
# xml = f.write(xml)
|
127 |
-
xml_df = xml_to_csv(annotation_file)
|
128 |
-
xml_df["cls"] = xml_df["cls"].map(names_dict)
|
129 |
-
sorted_xml_df = do_sorting(xml_df)
|
130 |
-
sorted_xml_df.line_number.value_counts()
|
131 |
-
|
132 |
-
line_data = sorted_xml_df.line_number.value_counts()
|
133 |
-
n_rows = int(len(line_data))
|
134 |
-
n_cols = int(max(line_data))
|
135 |
-
master_table = np.zeros((n_rows, n_cols)) + 101
|
136 |
-
master_annotations = []
|
137 |
-
for i, row in sorted_xml_df.groupby("line_number"):
|
138 |
-
# print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist())
|
139 |
-
products = row.cls.tolist()
|
140 |
-
master_table[int(i - 1), 0 : len(products)] = products
|
141 |
-
annotations = [
|
142 |
-
(int(k), int(v))
|
143 |
-
for k, v in list(
|
144 |
-
zip(row.cls.unique(), row.cls.value_counts().tolist())
|
145 |
-
)
|
146 |
-
]
|
147 |
-
master_annotations.append(annotations)
|
148 |
-
master_table.shape
|
149 |
-
# print("Annoatated planogram")
|
150 |
-
# print(np.matrix(master_table))
|
151 |
-
|
152 |
-
elif uploaded_master_planogram_file:
|
153 |
-
print(
|
154 |
-
"Finding the amster annotations with the YOLOv5 model predictions"
|
155 |
-
)
|
156 |
-
file_bytes = np.asarray(
|
157 |
-
bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8
|
158 |
-
)
|
159 |
-
master = cv2.imdecode(file_bytes, 1)
|
160 |
-
cv2.imwrite("tmp/master_tmp.png", master)
|
161 |
-
master_results = run(
|
162 |
-
weights="base_line_best_model_exp5.pt",
|
163 |
-
source="tmp/master_tmp.png",
|
164 |
-
imgsz=[640, 640],
|
165 |
-
conf_thres=0.6,
|
166 |
-
iou_thres=0.6,
|
167 |
-
)
|
168 |
-
|
169 |
-
bb_df = pd.DataFrame(
|
170 |
-
master_results[0][1].tolist(),
|
171 |
-
columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"],
|
172 |
-
)
|
173 |
-
sorted_df = do_sorting(bb_df)
|
174 |
-
|
175 |
-
n_rows = int(sorted_df.line_number.max())
|
176 |
-
n_cols = int(
|
177 |
-
sorted_df.groupby("line_number")
|
178 |
-
.size()
|
179 |
-
.reset_index(name="counts")["counts"]
|
180 |
-
.max()
|
181 |
-
)
|
182 |
-
non_null_product = 101
|
183 |
-
print("master size", n_rows, n_cols)
|
184 |
-
master_annotations = []
|
185 |
-
master_table = np.zeros((int(n_rows), int(n_cols))) + non_null_product
|
186 |
-
for i, row in sorted_df.groupby("line_number"):
|
187 |
-
# print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist())
|
188 |
-
products = row.cls.tolist()
|
189 |
-
col_len = min(len(products), n_cols)
|
190 |
-
print("col size: ", col_len)
|
191 |
-
print("row size: ", i - 1)
|
192 |
-
if n_rows <= (i - 1):
|
193 |
-
print("more rows than expected in the predictions")
|
194 |
-
break
|
195 |
-
master_table[int(i - 1), 0:col_len] = products[:col_len]
|
196 |
-
annotations = [
|
197 |
-
(int(k), int(v))
|
198 |
-
for k, v in list(
|
199 |
-
zip(row.cls.unique(), row.cls.value_counts().tolist())
|
200 |
-
)
|
201 |
-
]
|
202 |
-
master_annotations.append(annotations)
|
203 |
-
else:
|
204 |
-
master = cv2.imread("./sample_master_planogram.jpeg")
|
205 |
-
n_rows = 3
|
206 |
-
n_cols = 16
|
207 |
-
master_table = np.zeros((n_rows, n_cols)) + 101
|
208 |
-
master_annotations = [
|
209 |
-
[(32, 12), (8, 4)],
|
210 |
-
[(36, 1), (41, 6), (50, 4), (51, 3), (52, 2)],
|
211 |
-
[(23, 5), (24, 6), (54, 5)],
|
212 |
-
]
|
213 |
-
|
214 |
-
for i, row in enumerate(master_annotations):
|
215 |
-
idx = 0
|
216 |
-
for product, count in row:
|
217 |
-
master_table[i, idx : idx + count] = product
|
218 |
-
idx = idx + count
|
219 |
-
# Now do something with the image! For example, let's display it:
|
220 |
-
# st.image(opencv_image, channels="BGR")
|
221 |
-
|
222 |
-
# uploaded_img = '/content/drive/My Drive/0.CV/0.Planogram_Compliance/planogram_data/images/test/IMG_5718.jpg'
|
223 |
-
result_list = run(
|
224 |
-
weights="base_line_best_model_exp5.pt",
|
225 |
-
source="tmp/to_score_planogram_tmp.png",
|
226 |
-
imgsz=[640, 640],
|
227 |
-
conf_thres=0.6,
|
228 |
-
iou_thres=0.6,
|
229 |
-
)
|
230 |
-
|
231 |
-
bb_df = pd.DataFrame(
|
232 |
-
result_list[0][1].tolist(),
|
233 |
-
columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"],
|
234 |
-
)
|
235 |
-
sorted_df = do_sorting(bb_df)
|
236 |
-
|
237 |
-
non_null_product = 101
|
238 |
-
print("master size", n_rows, n_cols)
|
239 |
-
detected_table = np.zeros((n_rows, n_cols)) + non_null_product
|
240 |
-
for i, row in sorted_df.groupby("line_number"):
|
241 |
-
# print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist())
|
242 |
-
products = row.cls.tolist()
|
243 |
-
col_len = min(len(products), n_cols)
|
244 |
-
print("col size: ", col_len)
|
245 |
-
print("row size: ", i - 1)
|
246 |
-
if n_rows <= (i - 1):
|
247 |
-
print("more rows than expected in the predictions")
|
248 |
-
break
|
249 |
-
detected_table[int(i - 1), 0:col_len] = products[:col_len]
|
250 |
-
|
251 |
-
# score = (master_table == detected_table).sum() / (master_table != non_null_product).sum()
|
252 |
-
correct_matches = (
|
253 |
-
np.ma.masked_equal(master_table, non_null_product) == detected_table
|
254 |
-
).sum()
|
255 |
-
total_products = (master_table != non_null_product).sum()
|
256 |
-
score = correct_matches / total_products
|
257 |
-
# if sorted_xml_df is not None:
|
258 |
-
# annotate_df = sorted_xml_df[["xmin","ymin", "xmax", "ymax", "line_number","cls"]].astype(int)
|
259 |
-
# else:
|
260 |
-
annotate_df = sorted_df[
|
261 |
-
["xmin", "ymin", "xmax", "ymax", "line_number", "cls"]
|
262 |
-
].astype(int)
|
263 |
-
|
264 |
-
mask = master_table != non_null_product
|
265 |
-
m_detected_table = np.ma.masked_array(master_table, mask=mask)
|
266 |
-
m_annotated_table = np.ma.masked_array(detected_table, mask=mask)
|
267 |
-
|
268 |
-
# wrong_indexes = np.ravel_multi_index(master_table*mask != detected_table*mask, master_table.shape)
|
269 |
-
wrong_indexes = np.where(master_table != detected_table)
|
270 |
-
correct_indexes = np.where(master_table == detected_table)
|
271 |
-
annotated_planogram = annotate_planogram_compliance(
|
272 |
-
uploaded_img, annotate_df, correct_indexes, wrong_indexes, target_names
|
273 |
-
)
|
274 |
-
st.title("Target Products")
|
275 |
-
st.write(json.dumps(target_names))
|
276 |
-
st.title("The master planogram annotation")
|
277 |
-
st.write(
|
278 |
-
"The annotations are based on the index of products from Target products list "
|
279 |
-
)
|
280 |
-
st.write(json.dumps(master_annotations))
|
281 |
-
|
282 |
-
# https://github.com/streamlit/streamlit/issues/888
|
283 |
-
st.image(
|
284 |
-
[master, annotated_planogram, result_list[0][0]],
|
285 |
-
width=512,
|
286 |
-
caption=[
|
287 |
-
"Master planogram",
|
288 |
-
"Planogram Compliance",
|
289 |
-
"Planogram Predictions",
|
290 |
-
],
|
291 |
-
channels="BGR",
|
292 |
-
)
|
293 |
-
# st.image([master, annotated_planogram], width=512, caption=["Master planogram", "Planogram Compliance"], channels="BGR")
|
294 |
-
st.title("Planogram Compiance score")
|
295 |
-
# st.write(f"{correct_matches} / {total_products}")
|
296 |
-
st.write(score)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/AchyuthGamer-OpenGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AchyuthGamer OpenGPT
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.45.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ylokh.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
|
5 |
-
from ..requests import StreamSession
|
6 |
-
from .base_provider import AsyncGeneratorProvider
|
7 |
-
from ..typing import AsyncResult, Messages
|
8 |
-
|
9 |
-
class Ylokh(AsyncGeneratorProvider):
|
10 |
-
url = "https://chat.ylokh.xyz"
|
11 |
-
working = True
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: Messages,
|
20 |
-
stream: bool = True,
|
21 |
-
proxy: str = None,
|
22 |
-
timeout: int = 120,
|
23 |
-
**kwargs
|
24 |
-
) -> AsyncResult:
|
25 |
-
model = model if model else "gpt-3.5-turbo"
|
26 |
-
headers = {
|
27 |
-
"Origin" : cls.url,
|
28 |
-
"Referer": cls.url + "/",
|
29 |
-
}
|
30 |
-
data = {
|
31 |
-
"messages": messages,
|
32 |
-
"model": model,
|
33 |
-
"temperature": 1,
|
34 |
-
"presence_penalty": 0,
|
35 |
-
"top_p": 1,
|
36 |
-
"frequency_penalty": 0,
|
37 |
-
"allow_fallback": True,
|
38 |
-
"stream": stream,
|
39 |
-
**kwargs
|
40 |
-
}
|
41 |
-
async with StreamSession(
|
42 |
-
headers=headers,
|
43 |
-
proxies={"https": proxy},
|
44 |
-
timeout=timeout
|
45 |
-
) as session:
|
46 |
-
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
|
47 |
-
response.raise_for_status()
|
48 |
-
if stream:
|
49 |
-
async for line in response.iter_lines():
|
50 |
-
line = line.decode()
|
51 |
-
if line.startswith("data: "):
|
52 |
-
if line.startswith("data: [DONE]"):
|
53 |
-
break
|
54 |
-
line = json.loads(line[6:])
|
55 |
-
content = line["choices"][0]["delta"].get("content")
|
56 |
-
if content:
|
57 |
-
yield content
|
58 |
-
else:
|
59 |
-
chat = await response.json()
|
60 |
-
yield chat["choices"][0]["message"].get("content")
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
@classmethod
|
65 |
-
@property
|
66 |
-
def params(cls):
|
67 |
-
params = [
|
68 |
-
("model", "str"),
|
69 |
-
("messages", "list[dict[str, str]]"),
|
70 |
-
("stream", "bool"),
|
71 |
-
("proxy", "str"),
|
72 |
-
("timeout", "int"),
|
73 |
-
("temperature", "float"),
|
74 |
-
("top_p", "float"),
|
75 |
-
]
|
76 |
-
param = ", ".join([": ".join(p) for p in params])
|
77 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostLayout.js
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
var PostLayout = function (parent, newWidth, newHeight) {
|
2 |
-
if (this._anchor) {
|
3 |
-
this._anchor.updatePosition();
|
4 |
-
}
|
5 |
-
return this;
|
6 |
-
}
|
7 |
-
export default PostLayout;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Sovits Models
|
3 |
-
emoji: 🎙️
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: zomehwh/sovits-models
|
12 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenSpeechRecogntion/app.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import Wav2Vec2ForCTC, AutoProcessor
|
3 |
-
import torch
|
4 |
-
import librosa
|
5 |
-
import json
|
6 |
-
|
7 |
-
with open('ISO_codes.json', 'r') as file:
|
8 |
-
iso_codes = json.load(file)
|
9 |
-
|
10 |
-
languages = list(iso_codes.keys())
|
11 |
-
|
12 |
-
model_id = "facebook/mms-1b-all"
|
13 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
14 |
-
model = Wav2Vec2ForCTC.from_pretrained(model_id)
|
15 |
-
|
16 |
-
def transcribe(audio_file_mic=None, audio_file_upload=None, language="Turkmen"):
|
17 |
-
if audio_file_mic:
|
18 |
-
audio_file = audio_file_mic
|
19 |
-
elif audio_file_upload:
|
20 |
-
audio_file = audio_file_upload
|
21 |
-
else:
|
22 |
-
return "Please upload an audio file or record one"
|
23 |
-
|
24 |
-
# Make sure audio is 16kHz
|
25 |
-
speech, sample_rate = librosa.load(audio_file)
|
26 |
-
if sample_rate != 16000:
|
27 |
-
speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000)
|
28 |
-
|
29 |
-
# Keep the same model in memory and simply switch out the language adapters by calling load_adapter() for the model and set_target_lang() for the tokenizer
|
30 |
-
language_code = ("tuk-script_latin")
|
31 |
-
processor.tokenizer.set_target_lang(language_code)
|
32 |
-
model.load_adapter(language_code)
|
33 |
-
|
34 |
-
inputs = processor(speech, sampling_rate=16_000, return_tensors="pt")
|
35 |
-
|
36 |
-
with torch.no_grad():
|
37 |
-
outputs = model(**inputs).logits
|
38 |
-
|
39 |
-
ids = torch.argmax(outputs, dim=-1)[0]
|
40 |
-
transcription = processor.decode(ids)
|
41 |
-
return transcription
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
iface = gr.Interface(fn=transcribe,
|
46 |
-
inputs=[
|
47 |
-
gr.Audio(source="microphone", type="filepath", label="Sesiňi ýazdyr"),
|
48 |
-
gr.Audio(source="upload", type="filepath", label="Sesiňi ýükle"),
|
49 |
-
gr.Dropdown(choices=languages, label="Language", value="Türkmen")
|
50 |
-
],
|
51 |
-
outputs=gr.Textbox(label="Transcription")
|
52 |
-
)
|
53 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/run_macOS.command
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
# 获取脚本所在目录
|
4 |
-
script_dir=$(dirname "$(readlink -f "$0")")
|
5 |
-
|
6 |
-
# 将工作目录更改为脚本所在目录
|
7 |
-
cd "$script_dir" || exit
|
8 |
-
|
9 |
-
# 检查Git仓库是否有更新
|
10 |
-
git remote update
|
11 |
-
pwd
|
12 |
-
|
13 |
-
if ! git status -uno | grep 'up to date' > /dev/null; then
|
14 |
-
# 如果有更新,关闭当前运行的服务器
|
15 |
-
pkill -f ChuanhuChatbot.py
|
16 |
-
|
17 |
-
# 拉取最新更改
|
18 |
-
git pull
|
19 |
-
|
20 |
-
# 安装依赖
|
21 |
-
pip3 install -r requirements.txt
|
22 |
-
|
23 |
-
# 重新启动服务器
|
24 |
-
nohup python3 ChuanhuChatbot.py &
|
25 |
-
fi
|
26 |
-
|
27 |
-
# 检查ChuanhuChatbot.py是否在运行
|
28 |
-
if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
|
29 |
-
# 如果没有运行,启动服务器
|
30 |
-
nohup python3 ChuanhuChatbot.py &
|
31 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/evaluation/losses/fid/__init__.py
DELETED
File without changes
|
spaces/Aloento/9Nine-VITS/load_checkpoint.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
|
4 |
-
import torch
|
5 |
-
|
6 |
-
|
7 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
8 |
-
assert os.path.isfile(checkpoint_path)
|
9 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
10 |
-
iteration = checkpoint_dict['iteration']
|
11 |
-
learning_rate = checkpoint_dict['learning_rate']
|
12 |
-
if optimizer is not None:
|
13 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
14 |
-
saved_state_dict = checkpoint_dict['model']
|
15 |
-
if hasattr(model, 'module'):
|
16 |
-
state_dict = model.module.state_dict()
|
17 |
-
else:
|
18 |
-
state_dict = model.state_dict()
|
19 |
-
new_state_dict = {}
|
20 |
-
for k, v in state_dict.items():
|
21 |
-
try:
|
22 |
-
new_state_dict[k] = saved_state_dict[k]
|
23 |
-
except:
|
24 |
-
logging.info("%s is not in the checkpoint" % k)
|
25 |
-
new_state_dict[k] = v
|
26 |
-
if hasattr(model, 'module'):
|
27 |
-
model.module.load_state_dict(new_state_dict)
|
28 |
-
else:
|
29 |
-
model.load_state_dict(new_state_dict)
|
30 |
-
logging.info("Loaded checkpoint '{}' (iteration {})".format(
|
31 |
-
checkpoint_path, iteration))
|
32 |
-
return model, optimizer, learning_rate, iteration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/helpers.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
from collections import namedtuple
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
|
5 |
-
|
6 |
-
"""
|
7 |
-
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
|
8 |
-
"""
|
9 |
-
|
10 |
-
|
11 |
-
class Flatten(Module):
|
12 |
-
def forward(self, input):
|
13 |
-
return input.view(input.size(0), -1)
|
14 |
-
|
15 |
-
|
16 |
-
def l2_norm(input, axis=1):
|
17 |
-
norm = torch.norm(input, 2, axis, True)
|
18 |
-
output = torch.div(input, norm)
|
19 |
-
return output
|
20 |
-
|
21 |
-
|
22 |
-
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
|
23 |
-
""" A named tuple describing a ResNet block. """
|
24 |
-
|
25 |
-
|
26 |
-
def get_block(in_channel, depth, num_units, stride=2):
|
27 |
-
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
|
28 |
-
|
29 |
-
|
30 |
-
def get_blocks(num_layers):
|
31 |
-
if num_layers == 50:
|
32 |
-
blocks = [
|
33 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
34 |
-
get_block(in_channel=64, depth=128, num_units=4),
|
35 |
-
get_block(in_channel=128, depth=256, num_units=14),
|
36 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
37 |
-
]
|
38 |
-
elif num_layers == 100:
|
39 |
-
blocks = [
|
40 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
41 |
-
get_block(in_channel=64, depth=128, num_units=13),
|
42 |
-
get_block(in_channel=128, depth=256, num_units=30),
|
43 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
44 |
-
]
|
45 |
-
elif num_layers == 152:
|
46 |
-
blocks = [
|
47 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
48 |
-
get_block(in_channel=64, depth=128, num_units=8),
|
49 |
-
get_block(in_channel=128, depth=256, num_units=36),
|
50 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
51 |
-
]
|
52 |
-
else:
|
53 |
-
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
|
54 |
-
return blocks
|
55 |
-
|
56 |
-
|
57 |
-
class SEModule(Module):
|
58 |
-
def __init__(self, channels, reduction):
|
59 |
-
super(SEModule, self).__init__()
|
60 |
-
self.avg_pool = AdaptiveAvgPool2d(1)
|
61 |
-
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
|
62 |
-
self.relu = ReLU(inplace=True)
|
63 |
-
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
|
64 |
-
self.sigmoid = Sigmoid()
|
65 |
-
|
66 |
-
def forward(self, x):
|
67 |
-
module_input = x
|
68 |
-
x = self.avg_pool(x)
|
69 |
-
x = self.fc1(x)
|
70 |
-
x = self.relu(x)
|
71 |
-
x = self.fc2(x)
|
72 |
-
x = self.sigmoid(x)
|
73 |
-
return module_input * x
|
74 |
-
|
75 |
-
|
76 |
-
class bottleneck_IR(Module):
|
77 |
-
def __init__(self, in_channel, depth, stride):
|
78 |
-
super(bottleneck_IR, self).__init__()
|
79 |
-
if in_channel == depth:
|
80 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
81 |
-
else:
|
82 |
-
self.shortcut_layer = Sequential(
|
83 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
84 |
-
BatchNorm2d(depth)
|
85 |
-
)
|
86 |
-
self.res_layer = Sequential(
|
87 |
-
BatchNorm2d(in_channel),
|
88 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
|
89 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
|
90 |
-
)
|
91 |
-
|
92 |
-
def forward(self, x):
|
93 |
-
shortcut = self.shortcut_layer(x)
|
94 |
-
res = self.res_layer(x)
|
95 |
-
return res + shortcut
|
96 |
-
|
97 |
-
|
98 |
-
class bottleneck_IR_SE(Module):
|
99 |
-
def __init__(self, in_channel, depth, stride):
|
100 |
-
super(bottleneck_IR_SE, self).__init__()
|
101 |
-
if in_channel == depth:
|
102 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
103 |
-
else:
|
104 |
-
self.shortcut_layer = Sequential(
|
105 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
106 |
-
BatchNorm2d(depth)
|
107 |
-
)
|
108 |
-
self.res_layer = Sequential(
|
109 |
-
BatchNorm2d(in_channel),
|
110 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
|
111 |
-
PReLU(depth),
|
112 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
|
113 |
-
BatchNorm2d(depth),
|
114 |
-
SEModule(depth, 16)
|
115 |
-
)
|
116 |
-
|
117 |
-
def forward(self, x):
|
118 |
-
shortcut = self.shortcut_layer(x)
|
119 |
-
res = self.res_layer(x)
|
120 |
-
return res + shortcut
|
121 |
-
|
122 |
-
|
123 |
-
def _upsample_add(x, y):
|
124 |
-
"""Upsample and add two feature maps.
|
125 |
-
Args:
|
126 |
-
x: (Variable) top feature map to be upsampled.
|
127 |
-
y: (Variable) lateral feature map.
|
128 |
-
Returns:
|
129 |
-
(Variable) added feature map.
|
130 |
-
Note in PyTorch, when input size is odd, the upsampled feature map
|
131 |
-
with `F.upsample(..., scale_factor=2, mode='nearest')`
|
132 |
-
maybe not equal to the lateral feature map size.
|
133 |
-
e.g.
|
134 |
-
original input size: [N,_,15,15] ->
|
135 |
-
conv2d feature map size: [N,_,8,8] ->
|
136 |
-
upsampled feature map size: [N,_,16,16]
|
137 |
-
So we choose bilinear upsample which supports arbitrary output sizes.
|
138 |
-
"""
|
139 |
-
_, _, H, W = y.size()
|
140 |
-
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/index.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
<p align="center">
|
14 |
-
<br>
|
15 |
-
<img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/>
|
16 |
-
<br>
|
17 |
-
</p>
|
18 |
-
|
19 |
-
# 🧨 Diffusers
|
20 |
-
|
21 |
-
🤗 Diffusers 是一个值得首选用于生成图像、音频甚至 3D 分子结构的,最先进的预训练扩散模型库。
|
22 |
-
无论您是在寻找简单的推理解决方案,还是想训练自己的扩散模型,🤗 Diffusers 这一模块化工具箱都能对其提供支持。
|
23 |
-
本库的设计更偏重于[可用而非高性能](conceptual/philosophy#usability-over-performance)、[简明而非简单](conceptual/philosophy#simple-over-easy)以及[易用而非抽象](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)。
|
24 |
-
|
25 |
-
|
26 |
-
本库包含三个主要组件:
|
27 |
-
|
28 |
-
- 最先进的扩散管道 [diffusion pipelines](api/pipelines/overview),只需几行代码即可进行推理。
|
29 |
-
- 可交替使用的各种噪声调度器 [noise schedulers](api/schedulers/overview),用于平衡生成速度和质量。
|
30 |
-
- 预训练模型 [models](api/models),可作为构建模块,并与调度程序结合使用,来创建您自己的端到端扩散系统。
|
31 |
-
|
32 |
-
<div class="mt-10">
|
33 |
-
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
|
34 |
-
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview"
|
35 |
-
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
|
36 |
-
<p class="text-gray-700">Learn the fundamental skills you need to start generating outputs, build your own diffusion system, and train a diffusion model. We recommend starting here if you're using 🤗 Diffusers for the first time!</p>
|
37 |
-
</a>
|
38 |
-
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview"
|
39 |
-
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
40 |
-
<p class="text-gray-700">Practical guides for helping you load pipelines, models, and schedulers. You'll also learn how to use pipelines for specific tasks, control how outputs are generated, optimize for inference speed, and different training techniques.</p>
|
41 |
-
</a>
|
42 |
-
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy"
|
43 |
-
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
|
44 |
-
<p class="text-gray-700">Understand why the library was designed the way it was, and learn more about the ethical guidelines and safety implementations for using the library.</p>
|
45 |
-
</a>
|
46 |
-
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models"
|
47 |
-
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
|
48 |
-
<p class="text-gray-700">Technical descriptions of how 🤗 Diffusers classes and methods work.</p>
|
49 |
-
</a>
|
50 |
-
</div>
|
51 |
-
</div>
|
52 |
-
|
53 |
-
## 🧨 Diffusers pipelines
|
54 |
-
|
55 |
-
下表汇总了当前所有官方支持的pipelines及其对应的论文.
|
56 |
-
|
57 |
-
| 管道 | 论文/仓库 | 任务 |
|
58 |
-
|---|---|:---:|
|
59 |
-
| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
|
60 |
-
| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
|
61 |
-
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
|
62 |
-
| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
|
63 |
-
| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
|
64 |
-
| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
|
65 |
-
| [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
|
66 |
-
| [if](./if) | [**IF**](./api/pipelines/if) | Image Generation |
|
67 |
-
| [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
|
68 |
-
| [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
|
69 |
-
| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
|
70 |
-
| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
|
71 |
-
| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
|
72 |
-
| [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
|
73 |
-
| [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
|
74 |
-
| [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
75 |
-
| [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
|
76 |
-
| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation |
|
77 |
-
| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation |
|
78 |
-
| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation |
|
79 |
-
| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting |
|
80 |
-
| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
|
81 |
-
| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing|
|
82 |
-
| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
|
83 |
-
| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation |
|
84 |
-
| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation |
|
85 |
-
| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
|
86 |
-
| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
|
87 |
-
| [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing |
|
88 |
-
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
|
89 |
-
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
|
90 |
-
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
|
91 |
-
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
|
92 |
-
| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation |
|
93 |
-
| [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation |
|
94 |
-
| [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation |
|
95 |
-
| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
|
96 |
-
| [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation |
|
97 |
-
| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation |
|
98 |
-
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
|
99 |
-
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
|
100 |
-
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
|
101 |
-
| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(
|
4 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
5 |
-
norm_eval=False,
|
6 |
-
plugins=[
|
7 |
-
dict(
|
8 |
-
cfg=dict(type='ContextBlock', ratio=1. / 4),
|
9 |
-
stages=(False, True, True, True),
|
10 |
-
position='after_conv3')
|
11 |
-
]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ccnet_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_80k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/chase_db1.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
|
3 |
-
from .builder import DATASETS
|
4 |
-
from .custom import CustomDataset
|
5 |
-
|
6 |
-
|
7 |
-
@DATASETS.register_module()
|
8 |
-
class ChaseDB1Dataset(CustomDataset):
|
9 |
-
"""Chase_db1 dataset.
|
10 |
-
|
11 |
-
In segmentation map annotation for Chase_db1, 0 stands for background,
|
12 |
-
which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
|
13 |
-
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
14 |
-
'_1stHO.png'.
|
15 |
-
"""
|
16 |
-
|
17 |
-
CLASSES = ('background', 'vessel')
|
18 |
-
|
19 |
-
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
20 |
-
|
21 |
-
def __init__(self, **kwargs):
|
22 |
-
super(ChaseDB1Dataset, self).__init__(
|
23 |
-
img_suffix='.png',
|
24 |
-
seg_map_suffix='_1stHO.png',
|
25 |
-
reduce_zero_label=False,
|
26 |
-
**kwargs)
|
27 |
-
assert osp.exists(self.img_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArturStepanenko/digitsSpace/app.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import gradio as gr
|
3 |
-
from tensorflow import keras
|
4 |
-
|
5 |
-
model = keras.models.load_model('my_model')
|
6 |
-
|
7 |
-
def greet(img):
|
8 |
-
img = np.expand_dims(img, axis=0)
|
9 |
-
return np.argmax(model.predict(img)[0])
|
10 |
-
|
11 |
-
|
12 |
-
demo = gr.Interface(fn=greet, inputs="sketchpad", outputs="text")
|
13 |
-
|
14 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BOXNYC/shirley/app.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
|
2 |
-
from langchain import OpenAI
|
3 |
-
import gradio as gr
|
4 |
-
import sys
|
5 |
-
import os
|
6 |
-
|
7 |
-
#os.environ["OPENAI_API_KEY"]
|
8 |
-
|
9 |
-
def construct_index(directory_path):
|
10 |
-
max_input_size = 4096
|
11 |
-
num_outputs = 512
|
12 |
-
max_chunk_overlap = 20
|
13 |
-
chunk_size_limit = 600
|
14 |
-
|
15 |
-
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
|
16 |
-
|
17 |
-
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.7, model_name="gpt-4", max_tokens=num_outputs))
|
18 |
-
|
19 |
-
documents = SimpleDirectoryReader(directory_path).load_data()
|
20 |
-
|
21 |
-
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
|
22 |
-
|
23 |
-
index.save_to_disk('index.json')
|
24 |
-
|
25 |
-
return index
|
26 |
-
|
27 |
-
def chatbot(input_text, api_token):
|
28 |
-
if api_token != os.environ["API_TOKEN"]:
|
29 |
-
return 'API_TOKEN does not match'
|
30 |
-
index = GPTSimpleVectorIndex.load_from_disk('index.json')
|
31 |
-
response = index.query(input_text, response_mode="tree_summarize")
|
32 |
-
return response.response
|
33 |
-
|
34 |
-
iface = gr.Interface(fn=chatbot,
|
35 |
-
inputs=[gr.inputs.Textbox(lines=1, label="Ask Shirley"), gr.inputs.Textbox(lines=1, label="API_TOKEN")],
|
36 |
-
outputs="text",
|
37 |
-
title="Ask Shirley Anything")
|
38 |
-
|
39 |
-
index = construct_index("docs")
|
40 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Carreras De Caballos Virtuales 3d.md
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Virtual Horse Racing 3D: Una guía para los aficionados a las carreras de caballos</h1>
|
3 |
-
<p>Si te gustan las carreras de caballos, te encantarán las carreras de caballos virtuales en 3D. Carreras de caballos virtuales 3d es un juego que simula el mundo real de las carreras de caballos en un entorno digital. Puede crear perfiles, hacer apuestas, elegir caballos y jinetes, ver la carrera en impresionantes gráficos en 3D y efectos de sonido, y ganar premios. Si usted es un principiante o un experto, usted encontrará las carreras de caballos virtuales 3d divertido, desafiante y gratificante. En este artículo, le mostraremos cómo descargar carreras de caballos virtuales en 3D en diferentes dispositivos, cómo jugarlo, cómo ganarlo y qué beneficios ofrece. Entonces, ¿qué estás esperando? Sigue leyendo y prepárate para experimentar la emoción de las carreras de caballos virtuales. </p>
|
4 |
-
<h2>Cómo descargar Virtual Horse Racing 3D en diferentes dispositivos</h2>
|
5 |
-
<p>Una de las mejores cosas acerca de las carreras de caballos virtuales 3d es que se puede jugar en varios dispositivos. Ya sea que tenga un dispositivo Android, un dispositivo Windows u otro dispositivo, puede encontrar un juego virtual de carreras de caballos en 3D que se adapte a sus preferencias. Estas son algunas de las opciones que puedes elegir:</p>
|
6 |
-
<h2>descargar carreras de caballos virtuales 3d</h2><br /><p><b><b>Download File</b> ✶✶✶ <a href="https://bltlly.com/2v6JjU">https://bltlly.com/2v6JjU</a></b></p><br /><br />
|
7 |
-
<h3>Dispositivos Android: Google Play Store</h3>
|
8 |
-
<p>Si tienes un dispositivo Android, puedes descargar <a href="( 3 )">Horse Racing 3D</a> desde Google Play Store. Este es uno de los juegos de carreras de caballos virtuales más populares y auténticos disponibles. Tiene más de 10 millones de descargas y una calificación de 4.4 de 5 estrellas. Puedes competir por el dominio en diferentes modos, como el modo carrera, el modo de apuestas o el modo torneo. También puede personalizar sus caballos con diferentes colores y accesorios. El juego es gratuito, pero ofrece compras en la aplicación para características adicionales. </p>
|
9 |
-
<h3>Dispositivos de Windows: Microsoft Store</h3>
|
10 |
-
|
11 |
-
<h3>Otros dispositivos: ZED RUN <h3>Otros dispositivos: ZED RUN sitio web</h3>
|
12 |
-
<p>Si tienes otro dispositivo, como un Mac o un iPad, puedes jugar <a href="( 1 )">ZED RUN</a> en su sitio web. ZED RUN es un juego de carreras de caballos virtual único e innovador que vive en el blockchain y en un mercado abierto. Puede poseer, criar y competir con sus propios caballos digitales que tienen atributos y rendimiento únicos. También puedes comprar y vender tus caballos usando criptomonedas, como Ethereum. ZED RUN es más que un juego, es una comunidad de entusiastas de las carreras de caballos que comparten su pasión y emoción. ZED RUN es libre de unirse pero requiere una cartera y algunos fondos para comenzar a jugar. </p>
|
13 |
-
<h2>Cómo jugar carreras de caballos virtuales 3D</h2>
|
14 |
-
<p>Ahora que sabes cómo descargar carreras de caballos virtuales 3d en diferentes dispositivos, es posible que se pregunte cómo jugarlo. Bueno, el juego es simple e intuitivo, pero también desafiante y gratificante. Estos son los pasos básicos que debes seguir:</p>
|
15 |
-
<h3>Crear perfiles y hacer apuestas</h3>
|
16 |
-
<p>Lo primero que tienes que hacer es crear tu perfil y elegir tu moneda preferida. Puedes usar dinero real o monedas virtuales, dependiendo del juego que estés jugando. Entonces, necesitas hacer tus apuestas. Usted puede apostar en uno o más caballos en cada carrera, dependiendo del tipo de apuesta que desea colocar. También puede ajustar la cantidad de su apuesta de acuerdo a su presupuesto y apetito de riesgo. </p>
|
17 |
-
<h3>Elige caballos y jinetes</h3>
|
18 |
-
<p>Lo siguiente que tienes que hacer es elegir tus caballos y jinetes. Puedes navegar por la lista de caballos y jinetes disponibles en cada carrera y seleccionar los que se adapten a tus preferencias. También puede ver sus estadísticas, como velocidad, resistencia, agilidad y forma. También puedes personalizar tus caballos con diferentes colores y accesorios, si el juego lo permite. </p>
|
19 |
-
<h3>Ver la carrera en gráficos 3D y efectos de sonido</h3>
|
20 |
-
|
21 |
-
<h2>Cómo ganar carreras de caballos virtuales en 3D</h2>
|
22 |
-
<p>Por supuesto, jugar carreras de caballos virtuales 3d no solo se trata de ver la carrera, sino también de ganarla. Ganar carreras de caballos virtuales 3d requiere cierta habilidad y conocimiento de las carreras de caballos, así como algo de suerte y estrategia. Aquí hay algunos consejos sobre cómo ganar carreras de caballos virtuales 3d:</p>
|
23 |
-
<h3>Conozca las probabilidades y los tipos de apuestas</h3>
|
24 |
-
<p>Una de las cosas más importantes que necesita saber es cómo leer las probabilidades y los tipos de apuestas. Las probabilidades son los números que indican cuán probable es que un caballo gane o se coloque en una carrera. Cuanto más bajas sean las probabilidades, mayor será la probabilidad de ganar, pero también menor será el pago. Cuanto mayores sean las probabilidades, menor será la probabilidad de ganar, pero también mayor será el pago. Los tipos de apuestas son las formas en que puedes apostar a uno o más caballos en una carrera. El tipo más simple de apuesta es una apuesta ganadora, donde se apuesta a un caballo para ganar la carrera. Los otros tipos de apuestas son más complejos e involucran apuestas en múltiples caballos o resultados, como lugar, espectáculo, quinella, exacta, trifecta o superfecta. </p>
|
25 |
-
<h3>Obtener la suciedad en los caballos' atributos y rendimiento</h3>
|
26 |
-
<p>Otra cosa importante que necesitas saber es cómo analizar los atributos y el rendimiento de los caballos. Los atributos son las características que afectan el rendimiento de un caballo en una carrera, como la velocidad, la resistencia, la agilidad y la forma. El rendimiento es cómo un caballo se ha desempeñado en carreras anteriores, tales como victorias, pérdidas, lugares, espectáculos o arañazos. Puede utilizar esta información para comparar y contrastar diferentes caballos y jinetes y elegir los que tienen una ventaja sobre los demás. </p>
|
27 |
-
<p></p>
|
28 |
-
<h3>Usa estrategias y consejos para aumentar tus posibilidades</h3>
|
29 |
-
<p>La última cosa importante que necesita saber es cómo usar estrategias y consejos para aumentar sus posibilidades de ganar. Hay muchas estrategias y consejos que puedes usar dependiendo de tu nivel de experiencia y habilidad. Algunos de ellos son:</p>
|
30 |
-
<ul>
|
31 |
-
|
32 |
-
<li>Apuesta en tiros largos: Esta es una estrategia arriesgada que implica apostar en caballos con altas probabilidades que tienen una baja probabilidad de ganar o colocar, pero un alto pago si lo hacen. </li>
|
33 |
-
<li>Apuesta en combinaciones: Esta es una estrategia moderada que implica apostar en múltiples caballos o resultados en una carrera, como quinella, exacta, trifecta o superfecta. Esto aumenta sus posibilidades de ganar, pero también aumenta el costo de su apuesta. </li>
|
34 |
-
<li>Apuesta por las tendencias: Esta es una estrategia avanzada que implica apostar en caballos o jinetes que tienen un rendimiento consistente o mejorado en carreras recientes, como rachas ganadoras, mejorar la forma o condiciones de pista favorables. </li>
|
35 |
-
<li>Apuesta por consejos: Esta es una estrategia útil que implica seguir los consejos y consejos de los expertos, como los hándicap, analistas o tipsters. Puedes encontrar estos consejos en línea, en periódicos o en revistas. Pueden darte información valiosa y recomendaciones sobre qué caballos o jinetes apostar y por qué. </li>
|
36 |
-
</ul>
|
37 |
-
<h2>Beneficios de jugar carreras de caballos virtuales 3D</h2>
|
38 |
-
<p>Jugar carreras de caballos virtuales 3d no solo es divertido y emocionante, sino también beneficioso. Estos son algunos de los beneficios de jugar carreras de caballos virtuales 3d:</p>
|
39 |
-
<h3>Disfruta de la emoción y la emoción de las carreras de caballos en cualquier momento, en cualquier lugar</h3>
|
40 |
-
<p>Uno de los principales beneficios de jugar carreras de caballos virtuales 3d es que se puede disfrutar de la emoción y la emoción de las carreras de caballos en cualquier momento, en cualquier lugar. No es necesario ir a una pista de carreras o una tienda de apuestas para experimentar la adrenalina de las carreras de caballos. Usted puede jugar carreras de caballos virtuales 3d en su dispositivo en casa, en el trabajo, o en el camino. También puede elegir entre diferentes modos, pistas y eventos para adaptarse a su estado de ánimo y preferencia. </p>
|
41 |
-
<h3>Mejorar sus habilidades y conocimientos de carreras de caballos</h3>
|
42 |
-
|
43 |
-
<h3>Compite con otros jugadores y gana premios</h3>
|
44 |
-
<p>Un beneficio final de jugar carreras de caballos virtuales 3d es que puedes competir con otros jugadores y ganar premios. Puedes unirte a torneos y ligas en línea y desafiar a otros jugadores de todo el mundo. También puedes chatear con ellos y compartir tus consejos y opiniones. Jugar carreras de caballos virtuales en 3D puede ayudarte a hacer nuevos amigos y divertirte. También puedes ganar premios reales o virtuales, como dinero en efectivo, monedas, vales o trofeos. </p>
|
45 |
-
<h2>Conclusión: Por qué usted debe descargar Virtual Horse Racing 3D hoy</h2>
|
46 |
-
<p>En conclusión, carreras de caballos virtuales 3d es un juego que simula el mundo real de las carreras de caballos en un entorno digital. Puede crear perfiles, hacer apuestas, elegir caballos y jinetes, ver la carrera en impresionantes gráficos en 3D y efectos de sonido, y ganar premios. También puede disfrutar de la emoción y la emoción de las carreras de caballos en cualquier momento, en cualquier lugar; mejorar sus habilidades y conocimientos de carreras de caballos; y competir con otros jugadores y ganar premios. Las carreras de caballos virtuales en 3D son divertidas, desafiantes, gratificantes y beneficiosas. Entonces, ¿qué estás esperando? Descargue las carreras de caballos virtuales 3d hoy y experimente la emoción de las carreras de caballos virtuales. </p>
|
47 |
-
<h2>Preguntas frecuentes: Preguntas frecuentes sobre las carreras de caballos virtuales en 3D</h2>
|
48 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre las carreras de caballos virtuales 3d:</p>
|
49 |
-
<h4>Q1: Es virtual carreras de caballos 3d libre para jugar? </h4>
|
50 |
-
<p>A1: Depende del juego que estés jugando. Algunos juegos son gratis pero ofrecen compras en la aplicación para características adicionales. Algunos juegos cuestan una pequeña tarifa pero no tienen anuncios ni compras dentro de la aplicación. Algunos juegos son gratuitos pero requieren una billetera y algunos fondos para comenzar a jugar. </p>
|
51 |
-
<h4>Q2: ¿Puedo jugar carreras de caballos virtuales 3d offline? </h4>
|
52 |
-
<p>A2: Depende del juego que esté jugando. Algunos juegos requieren una conexión a Internet para jugar en línea con otros jugadores o acceder a eventos en vivo. Algunos juegos te permiten jugar sin conexión con oponentes generados por ordenador o modos en solitario. </p>
|
53 |
-
|
54 |
-
<p>A3: Depende del juego que esté jugando. Algunos juegos le permiten poseer y criar sus propios caballos que tienen atributos y rendimiento únicos. Algunos juegos también le permiten comprar y vender sus caballos utilizando criptomoneda. Algunos juegos solo le permiten elegir entre una lista de caballos y jinetes disponibles. </p>
|
55 |
-
<h4>Q4: <h4>Q4: ¿Qué tan realistas son las carreras de caballos virtuales 3d? </h4>
|
56 |
-
<p>A4: Depende del juego que estés jugando. Algunos juegos son muy realistas e inmersivos, con impresionantes gráficos en 3D y efectos de sonido que te hacen sentir como si estuvieras en el hipódromo. Algunos juegos también utilizan datos y estadísticas reales de eventos de carreras de caballos reales y caballos. Algunos juegos son más casuales y caricaturescos, con gráficos simples y efectos de sonido que te hacen sentir como si estuvieras jugando un juego. </p>
|
57 |
-
<h4>Q5: ¿Cuáles son los mejores juegos virtuales de carreras de caballos en 3D para descargar? </h4>
|
58 |
-
<p>A5: Depende de sus preferencias y dispositivo. Algunos de los mejores juegos virtuales de carreras de caballos en 3D para descargar son:</p>
|
59 |
-
<ul>
|
60 |
-
<li>Carreras de caballos 3D: Un popular y auténtico juego de carreras de caballos virtuales para dispositivos Android. </li>
|
61 |
-
<li>Virtual Horse Racing Pro: Un juego de carreras de caballos virtual realista e inmersivo para dispositivos Windows. </li>
|
62 |
-
<li>ZED RUN: Un juego de carreras de caballos virtual único e innovador que vive en la cadena de bloques y en un mercado abierto para otros dispositivos. </li>
|
63 |
-
</ul></p> 64aa2da5cf<br />
|
64 |
-
<br />
|
65 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Carreras De Coches Para Windows 7.md
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar gratis juego de carreras de coches para Windows 7</h1>
|
3 |
-
<p>Si te gusta la velocidad, la adrenalina y la emoción, entonces probablemente te gusta jugar juegos de carreras de coches. Los juegos de carreras de coches son uno de los géneros más populares de los videojuegos, ya que ofrecen una variedad de desafíos, entornos, vehículos y modos para adaptarse a diferentes gustos y preferencias. Ya sea que prefieras simulaciones realistas, acción estilo árcade o aventuras todoterreno, hay un juego de carreras de coches para ti. </p>
|
4 |
-
<p>Pero ¿qué pasa si usted tiene un Windows 7 PC? ¿Todavía puede jugar juegos de carreras de coches en su antiguo sistema operativo? La respuesta es sí! Windows 7 sigue siendo una plataforma compatible para muchos juegos de carreras de coches, tanto antiguos como nuevos. De hecho, algunos de los mejores juegos de carreras de coches jamás realizados pueden funcionar sin problemas en Windows 7, siempre y cuando tenga los requisitos mínimos del sistema. </p>
|
5 |
-
<h2>descargar gratis juego de carreras de coches para windows 7</h2><br /><p><b><b>Download</b> ✺✺✺ <a href="https://bltlly.com/2v6Lz3">https://bltlly.com/2v6Lz3</a></b></p><br /><br />
|
6 |
-
<p>En este artículo, le mostraremos cómo encontrar y descargar juegos de carreras de coches gratis para Windows 7, así como algunos de los mejores pagados. También le daremos algunos consejos y consejos sobre cómo optimizar su experiencia de juego en su PC con Windows 7. ¡Así que abróchense el cinturón, enciendan sus motores y prepárense para correr! </p>
|
7 |
-
<h2>Los mejores juegos de carreras de coches para Windows 7</h2>
|
8 |
-
<p>Si usted está dispuesto a gastar algo de dinero en juegos de carreras de coches de calidad, entonces usted tiene un montón de opciones para elegir. Estos son algunos de los mejores juegos de carreras de coches que puedes jugar en tu PC con Windows 7.</p>
|
9 |
-
<h3>Forza Horizon 5</h3>
|
10 |
-
|
11 |
-
<p>Forza Horizon 5 es un juego visualmente impresionante que muestra el poder de las plataformas Xbox Series X|S y Windows 10, pero también puede ejecutarse en Windows 7 con algunos ajustes. Para descargar y jugar Forza Horizon 5 en Windows 7, tendrá que seguir estos pasos:</p>
|
12 |
-
<ol>
|
13 |
-
<li>Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Forza Horizon 5, que son: <ul>
|
14 |
-
<li>Procesador: Intel Core i3-4170 o AMD FX-8350</li>
|
15 |
-
<li>Memoria: 8 GB RAM</li>
|
16 |
-
<li>Gráficos: NVIDIA GeForce GTX 760 o AMD Radeon R7 260X</li>
|
17 |
-
<li>DirectX: Versión 11</li>
|
18 |
-
<li>Almacenamiento: 80 GB de espacio disponible</li>
|
19 |
-
</ul>
|
20 |
-
</li>
|
21 |
-
<li>Descargue e instale el cliente <a href="">Steam</a> en su PC con Windows 7.</li>
|
22 |
-
<li>Crear una cuenta de Steam o iniciar sesión en la existente. </li>
|
23 |
-
<li>Compra Forza Horizon 5 en la tienda Steam por $59.99 USD o tu equivalente regional. </li>
|
24 |
-
<li>Descargue e instale Forza Horizon 5 en su PC a través del cliente de Steam. </li>
|
25 |
-
<li>Iniciar el juego y disfrutar! </li>
|
26 |
-
</ol>
|
27 |
-
<p>Forza Horizon 5 es un juego que ofrece diversión sin fin y variedad para los amantes de las carreras de coches. Puede elegir entre más de 500 coches con licencia de diferentes fabricantes, como Ferrari, Lamborghini, Ford, Toyota, Honda y más. También puede personalizar sus coches con diferentes trabajos de pintura, calcomanías, llantas, alerones y mejoras de rendimiento. También puede sintonizar sus coches para adaptarse a diferentes terrenos y condiciones climáticas, como la suciedad, el barro, la nieve, la lluvia y la arena. </p>
|
28 |
-
<p></p>
|
29 |
-
<p>El juego también cuenta con un dinámico ciclo día-noche y un sistema de temporada que cambia el entorno y el juego cada semana. Por ejemplo, en invierno, puede deslizarse por carreteras heladas y lagos congelados, mientras que en verano, puede disfrutar del sol y los colores vibrantes de la cultura mexicana. El juego también tiene un motor de física realista que simula el peso, la velocidad, la tracción y el daño de los coches. </p>
|
30 |
-
|
31 |
-
<p>Si quieres jugar con otras personas, puedes unirte a sesiones online que te permiten explorar el mundo abierto con hasta 72 jugadores. También puede unirse o crear clubes con otros jugadores que compartan sus intereses y preferencias. También puedes retar a otros jugadores a carreras o juegos en el mundo abierto o en arenas personalizadas. También puedes cooperar con otros jugadores en eventos o modos basados en equipos. </p>
|
32 |
-
<h3>Dirt Rally 2.0</h3> <p>Dirt Rally 2.0 es un juego que tiene como objetivo ofrecer una experiencia de carreras de coches realista y desafiante, centrándose en la disciplina emocionante y exigente de rally. Lanzado en febrero <a href="( 1 )">2019</a>, Dirt Rally 2.0 es la secuela del aclamado Dirt Rally, que fue elogiado por su auténtica simulación de conducción de rally. Dirt Rally 2.0 se basa en los puntos fuertes de su predecesor, añadiendo más contenido, características y mejoras. </p>
|
33 |
-
<p>Dirt Rally 2.0 te permite conocer algunos de los lugares más emblemáticos del mundo, como Nueva Zelanda, Argentina, España, Polonia, Australia y los Estados Unidos. Puedes elegir entre más de 50 coches de rally de diferentes épocas y clases, como los legendarios monstruos del Grupo B, las modernas bestias del WRC y el clásico Mini Cooper S. También puedes personalizar tus coches con diferentes decoraciones, configuraciones y actualizaciones. </p>
|
34 |
-
<p>El juego también cuenta con un modo de carrera, donde puedes crear tu propio equipo y competir en varios eventos y campeonatos en todo el mundo. Puede contratar y administrar a su personal, como ingenieros, mecánicos y copilotos, y actualizar sus instalaciones y equipos. También puedes ganar dinero y reputación completando contratos y desafíos. </p>
|
35 |
-
|
36 |
-
<p>El juego también tiene un sofisticado sistema de copiloto que te proporciona notas de ritmo precisas y oportunas que te guían a través de cada etapa. El copiloto le advertirá de los próximos giros, peligros, saltos y otras características de la carretera. Tendrás que escuchar atentamente y confiar en tu copiloto para navegar por las complejas y variadas etapas. </p>
|
37 |
-
<p>Para descargar y jugar Dirt Rally 2.0 en Windows 7, tendrá que seguir estos pasos:</p>
|
38 |
-
<ol>
|
39 |
-
<li>Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Dirt Rally 2.0, que son: <ul>
|
40 |
-
<li>Procesador: AMD FX4300 o Intel Core i3 2130</li>
|
41 |
-
<li>Memoria: 8 GB RAM</li>
|
42 |
-
<li>Gráficos: AMD HD7750 o NVIDIA GTX650Ti</li>
|
43 |
-
<li>DirectX: Versión 11</li>
|
44 |
-
<li>Almacenamiento: 50 GB de espacio disponible</li>
|
45 |
-
</ul>
|
46 |
-
</li>
|
47 |
-
<li>Descargue e instale el cliente <a href="( 2 )">Steam</a> en su PC con Windows 7.</li>
|
48 |
-
<li>Crear una cuenta de Steam o iniciar sesión en la existente. </li>
|
49 |
-
<li>Compra Dirt Rally 2.0 en la tienda Steam por $39.99 USD o tu equivalente regional. </li>
|
50 |
-
<li>Descargue e instale Dirt Rally 2.0 en su PC a través del cliente de Steam. </li>
|
51 |
-
<li>Iniciar el juego y disfrutar! </li>
|
52 |
-
</ol>
|
53 |
-
<p>Dirt Rally 2.0 es un juego que ofrece una experiencia de carreras de coches gratificante y satisfactoria para los fanáticos del rally. Puede disfrutar de la emoción de conducir rápido y furioso en algunas de las carreteras más bellas y desafiantes del mundo. También puede probar sus habilidades contra otros jugadores en línea o fuera de línea en varios modos y eventos. </p> <h3>Necesidad de velocidad más buscados</h3>
|
54 |
-
<p>Need for Speed Most Wanted es un juego que combina la emoción de las carreras callejeras con la emoción de escapar de la ley. Lanzado en octubre <a href=">2012</a>, Need for Speed Most Wanted es un reinicio del juego de 2005 del mismo nombre, que fue uno de los juegos más populares y exitosos de la franquicia Need for Speed. Need for Speed Most Wanted te lleva a la ciudad de mundo abierto de Fairhaven, donde puedes conducir, correr y explorar como quieras. </p>
|
55 |
-
|
56 |
-
<p>El juego también cuenta con un modo para un solo jugador, donde puedes competir en varios eventos y desafíos, como circuito, sprint, carrera de velocidad, emboscada y punto de control. También puede desafiar a los 10 corredores más buscados en la ciudad, que son los mejores y más duros oponentes en el juego. También puedes ganar puntos de velocidad conduciendo rápido, realizando acrobacias, rompiendo vallas publicitarias y puertas de seguridad, y evadiendo a los policías. Puede utilizar puntos de velocidad para actualizar sus coches y desbloquear nuevas características. </p>
|
57 |
-
<p>Need for Speed Most Wanted también tiene un modo multijugador, donde puedes unirte a sesiones en línea con hasta 11 jugadores más. También puedes crear o unirte a un equipo con tus amigos y competir en eventos y desafíos basados en equipos. También puede personalizar sus propios eventos y listas de reproducción con diferentes configuraciones y reglas. </p>
|
58 |
-
<p>Para descargar y jugar Need for Speed Most Wanted en Windows 7, tendrá que seguir estos pasos:</p>
|
59 |
-
<ol>
|
60 |
-
<li>Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Need for Speed Most Wanted, que son: <ul>
|
61 |
-
<li>Procesador: Intel Core 2 Duo 2.4 GHz o AMD Athlon X2 2.7 GHz</li>
|
62 |
-
<li>Memoria: 2 GB de RAM</li>
|
63 |
-
<li>Gráficos: NVIDIA GeForce 8800 GT o ATI Radeon HD 3870</li>
|
64 |
-
<li>DirectX: Versión 10</li>
|
65 |
-
<li>Almacenamiento: 20 GB de espacio disponible</li>
|
66 |
-
</ul>
|
67 |
-
</li>
|
68 |
-
<li>Descargue e instale el cliente <a href="">Origin</a> en su PC con Windows 7.</li>
|
69 |
-
<li>Crear una cuenta de Origin o iniciar sesión en la existente. </li>
|
70 |
-
<li>Necesidad de compra de velocidad más buscados de la tienda Origin por $19.99 USD o su equivalente regional. </li>
|
71 |
-
<li>Descargar e instalar La necesidad de velocidad más buscada en su PC a través del cliente de Origin. </li>
|
72 |
-
<li>Iniciar el juego y disfrutar! </li>
|
73 |
-
</ol>
|
74 |
-
|
75 |
-
<h2>Juegos de carreras de coches gratis para Windows 7</h2>
|
76 |
-
<p>Si estás buscando algunos juegos de carreras de coches gratis que puedes jugar en tu PC con Windows 7, entonces también tienes algunas opciones para elegir. Estos son algunos de los juegos de carreras de coches gratis que puedes descargar y jugar en tu PC con Windows 7.</p>
|
77 |
-
<h3>Conductor de coche ruso: ZIL 130</h3>
|
78 |
-
<p>Russian Car Driver: ZIL 130 es un juego que te permite conducir un camión soviético clásico a través de diferentes terrenos y escenarios. Lanzado en junio <a href=">2018</a>, Russian Car Driver: ZIL 130 es un juego de carreras de coches divertido y único que ofrece mucha variedad y personalización. </p>
|
79 |
-
<p>Russian Car Driver: ZIL 130 le permite elegir entre seis modos de juego diferentes, como carrera, estacionamiento, rally, carreras de arrastre, punto de control y viaje gratis. También puede personalizar su camión con diferentes colores, calcomanías, ruedas, motores, transmisiones, suspensiones y más. También puede actualizar su camión con diferentes piezas y accesorios. </p>
|
80 |
-
<p>El juego también cuenta con gráficos realistas y física que simulan el comportamiento y el daño del camión y el medio ambiente. El juego también tiene un sistema de tiempo dinámico que afecta la visibilidad y el agarre de las carreteras. El juego también tiene un sistema de sonido realista que reproduce el ruido del motor y la bocina del camión. </p>
|
81 |
-
<p>Para descargar y jugar Russian Car Driver: ZIL 130 en Windows 7, tendrá que seguir estos pasos:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Asegúrese de que su PC cumple con los requisitos mínimos del sistema para el conductor de coche ruso: ZIL 130, que son: <ul>
|
84 |
-
<li>Procesador: Intel Core 2 Duo E8400 o AMD Phenom II X4 940</li>
|
85 |
-
<li>Memoria: 4 GB de RAM</li>
|
86 |
-
<li>Gráficos: NVIDIA GeForce GT 730 o AMD Radeon HD 6670</li>
|
87 |
-
<li>DirectX: Versión 9.0c</li>
|
88 |
-
<li>Almacenamiento: 1 GB de espacio disponible</li>
|
89 |
-
</ul>
|
90 |
-
</li>
|
91 |
-
<li>Descargue e instale el cliente <a href="">GameTop</a> en su PC con Windows 7.</li>
|
92 |
-
<li>Crear una cuenta de GameTop o iniciar sesión en la existente. </li>
|
93 |
-
|
94 |
-
<li>Instalar y lanzar el juego a través del cliente GameTop. </li>
|
95 |
-
<li>Disfruta! </li>
|
96 |
-
</ol>
|
97 |
-
<p>Russian Car Driver: ZIL 130 es un juego que ofrece mucha diversión y desafío para los aficionados a las carreras de coches que quieren probar algo diferente. Usted puede disfrutar de conducir un camión vintage en varios terrenos y escenarios, y personalizarlo a su gusto. También puedes competir en diferentes modos y eventos, o simplemente explorar el mundo abierto a tu propio ritmo. </p>
|
98 |
-
<h3>Coches locos</h3>
|
99 |
-
<p>Crazy Cars es un juego que te permite revivir la nostalgia de los clásicos juegos de carreras de coches de estilo árcade de los años 80 y 90. Lanzado en enero <a href=">2017</a>, Crazy Cars es un remake del juego original de Crazy Cars de <a href="">1987</a>, que fue uno de los primeros juegos en presentar física y gráficos realistas. </p>
|
100 |
-
<p>Crazy Cars te permite elegir entre cuatro coches diferentes, cada uno con su propia velocidad, manejo y aceleración. También puede elegir entre tres pistas diferentes, cada una con su propio paisaje, tráfico y obstáculos. También puedes elegir entre tres niveles de dificultad diferentes, cada uno con su propio límite de tiempo y número de oponentes. </p>
|
101 |
-
<p>El juego también cuenta con controles simples e intuitivos que le permiten dirigir, acelerar, frenar y turbo impulsar su coche. El juego también cuenta con gráficos de estilo retro y efectos de sonido que recrean la sensación de los juegos de carreras de coches de la vieja escuela. El juego también cuenta con un sistema de puntuación alta que te permite competir con otros jugadores de todo el mundo. </p>
|
102 |
-
<p>Para descargar y jugar Crazy Cars en Windows 7, tendrá que seguir estos pasos:</p>
|
103 |
-
<ol>
|
104 |
-
<li>Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Crazy Cars, que son: <ul>
|
105 |
-
<li>Procesador: Pentium III o equivalente</li>
|
106 |
-
<li>Memoria: 256 MB RAM</li>
|
107 |
-
<li>Gráficos: tarjeta de video compatible con DirectX</li>
|
108 |
-
<li>DirectX: Versión 9.0c</li>
|
109 |
-
<li>Almacenamiento: 100 MB de espacio disponible</li>
|
110 |
-
</ul>
|
111 |
-
</li>
|
112 |
-
<li>Descargar Crazy Cars desde el sitio web de GameTop gratis. </li>
|
113 |
-
|
114 |
-
<li>Siga las instrucciones de instalación y inicie el juego. </li>
|
115 |
-
<li>Disfruta! </li>
|
116 |
-
</ol>
|
117 |
-
<p>Crazy Cars es un juego que ofrece una experiencia de carreras de coches divertida y nostálgica para los fans de los juegos clásicos de estilo árcade. Usted puede disfrutar de la conducción rápida y furiosa en diferentes pistas, y esquivar el tráfico y los obstáculos en el camino. También puede desafiarse a sí mismo para vencer a su propio tiempo y puntuación, o comparar sus resultados con otros jugadores en línea. </p>
|
118 |
-
<h2>Conclusión</h2>
|
119 |
-
<p>En este artículo, le hemos mostrado cómo encontrar y descargar juegos de carreras de coches gratis para Windows 7, así como algunos de los mejores pagados. También le hemos dado algunos consejos y consejos sobre cómo optimizar su experiencia de juego en su PC con Windows 7. Esperamos que hayas disfrutado leyendo este artículo y lo hayas encontrado útil e informativo. </p>
|
120 |
-
<p>Los juegos de carreras de coches son uno de los géneros más populares y divertidos de los videojuegos, ya que ofrecen una variedad de desafíos, entornos, vehículos y modos para adaptarse a diferentes gustos y preferencias. Ya sea que prefieras simulaciones realistas, acción estilo árcade o aventuras todoterreno, hay un juego de carreras de coches para ti. </p>
|
121 |
-
<p>Si usted tiene una PC con Windows 7, todavía puede jugar muchos juegos de carreras de coches, tanto antiguos como nuevos. De hecho, algunos de los mejores juegos de carreras de coches jamás realizados pueden funcionar sin problemas en Windows 7, siempre y cuando tenga los requisitos mínimos del sistema. También puede descargar algunos juegos de carreras de coches gratis de sitios web fiables, como GameTop.</p>
|
122 |
-
<p>Para disfrutar de los juegos de carreras de coches en Windows 7, tendrá que descargar e instalar un cliente de juegos, como Steam u Origin, dependiendo del juego que desee jugar. También tendrá que crear una cuenta y comprar el juego de la tienda del juego. También tendrá que actualizar sus controladores y DirectX para garantizar el mejor rendimiento y gráficos del juego. También tendrás que ajustar la configuración y las opciones del juego para adaptarte a tus preferencias y necesidades. </p>
|
123 |
-
|
124 |
-
<h2>Preguntas frecuentes</h2>
|
125 |
-
<h4> ¿Cuáles son los requisitos del sistema para jugar juegos de carreras de coches en Windows 7?</h4>
|
126 |
-
<p>Los requisitos del sistema para jugar juegos de carreras de coches en Windows 7 varían dependiendo del juego que desee jugar. Sin embargo, una pauta general es que necesitará al menos un procesador de doble núcleo, 2 GB de RAM, una tarjeta de video compatible con DirectX y 20 GB de espacio de almacenamiento. También necesitarás una conexión a Internet estable si quieres jugar online. </p>
|
127 |
-
<h4> ¿Cómo puedo mejorar el rendimiento y los gráficos de los juegos de carreras de coches en Windows 7?</h4>
|
128 |
-
<p>Para mejorar el rendimiento y los gráficos de los juegos de carreras de coches en Windows 7, tendrá que actualizar sus controladores y DirectX a las últimas versiones. También tendrá que ajustar la configuración y las opciones del juego para que coincida con las capacidades de su PC y sus preferencias personales. Puede reducir la resolución, la calidad de la textura, las sombras, el anti-aliasing y otras características para aumentar la velocidad de fotogramas y reducir el retraso. También puedes activar o desactivar la sincronización vertical, el desenfoque de movimiento, la profundidad de campo y otros efectos para mejorar la calidad visual y el realismo del juego. </p>
|
129 |
-
<h4>¿Dónde puedo encontrar más juegos de carreras de coches gratis para Windows 7?</h4>
|
130 |
-
<p>Puedes encontrar más juegos de carreras de coches gratis para Windows 7 desde sitios web confiables, como GameTop, que ofrecen una gran colección de juegos de carreras de coches legales y seguros que puedes descargar y jugar sin limitaciones o restricciones. También puede navegar a través de diferentes categorías, géneros, calificaciones y comentarios para encontrar el mejor juego de carreras de coches para usted. </p>
|
131 |
-
<h4> ¿Cómo puedo jugar juegos de carreras de coches en línea con otros jugadores en Windows 7?</h4>
|
132 |
-
|
133 |
-
<h4>¿Cómo puedo usar un controlador o un volante para jugar juegos de carreras de coches en Windows 7?</h4>
|
134 |
-
<p>Para usar un controlador o un volante para jugar juegos de carreras de autos en Windows 7, necesitará un dispositivo compatible que se conecte a su PC a través de una conexión USB o inalámbrica. También tendrá que instalar los controladores y el software para su dispositivo si es necesario. A continuación, tendrá que configurar los ajustes y opciones del juego para reconocer y utilizar el dispositivo como un método de entrada. También puedes personalizar los botones, ejes, sensibilidad y forzar la retroalimentación de tu dispositivo para adaptarla a tus preferencias y necesidades. </p> 64aa2da5cf<br />
|
135 |
-
<br />
|
136 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filetypes.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
"""Filetype information.
|
2 |
-
"""
|
3 |
-
|
4 |
-
from typing import Tuple
|
5 |
-
|
6 |
-
from pip._internal.utils.misc import splitext
|
7 |
-
|
8 |
-
WHEEL_EXTENSION = ".whl"
|
9 |
-
BZ2_EXTENSIONS: Tuple[str, ...] = (".tar.bz2", ".tbz")
|
10 |
-
XZ_EXTENSIONS: Tuple[str, ...] = (
|
11 |
-
".tar.xz",
|
12 |
-
".txz",
|
13 |
-
".tlz",
|
14 |
-
".tar.lz",
|
15 |
-
".tar.lzma",
|
16 |
-
)
|
17 |
-
ZIP_EXTENSIONS: Tuple[str, ...] = (".zip", WHEEL_EXTENSION)
|
18 |
-
TAR_EXTENSIONS: Tuple[str, ...] = (".tar.gz", ".tgz", ".tar")
|
19 |
-
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS
|
20 |
-
|
21 |
-
|
22 |
-
def is_archive_file(name: str) -> bool:
|
23 |
-
"""Return True if `name` is a considered as an archive file."""
|
24 |
-
ext = splitext(name)[1].lower()
|
25 |
-
if ext in ARCHIVE_EXTENSIONS:
|
26 |
-
return True
|
27 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansi_test.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
|
2 |
-
import sys
|
3 |
-
from unittest import TestCase, main
|
4 |
-
|
5 |
-
from ..ansi import Back, Fore, Style
|
6 |
-
from ..ansitowin32 import AnsiToWin32
|
7 |
-
|
8 |
-
stdout_orig = sys.stdout
|
9 |
-
stderr_orig = sys.stderr
|
10 |
-
|
11 |
-
|
12 |
-
class AnsiTest(TestCase):
|
13 |
-
|
14 |
-
def setUp(self):
|
15 |
-
# sanity check: stdout should be a file or StringIO object.
|
16 |
-
# It will only be AnsiToWin32 if init() has previously wrapped it
|
17 |
-
self.assertNotEqual(type(sys.stdout), AnsiToWin32)
|
18 |
-
self.assertNotEqual(type(sys.stderr), AnsiToWin32)
|
19 |
-
|
20 |
-
def tearDown(self):
|
21 |
-
sys.stdout = stdout_orig
|
22 |
-
sys.stderr = stderr_orig
|
23 |
-
|
24 |
-
|
25 |
-
def testForeAttributes(self):
|
26 |
-
self.assertEqual(Fore.BLACK, '\033[30m')
|
27 |
-
self.assertEqual(Fore.RED, '\033[31m')
|
28 |
-
self.assertEqual(Fore.GREEN, '\033[32m')
|
29 |
-
self.assertEqual(Fore.YELLOW, '\033[33m')
|
30 |
-
self.assertEqual(Fore.BLUE, '\033[34m')
|
31 |
-
self.assertEqual(Fore.MAGENTA, '\033[35m')
|
32 |
-
self.assertEqual(Fore.CYAN, '\033[36m')
|
33 |
-
self.assertEqual(Fore.WHITE, '\033[37m')
|
34 |
-
self.assertEqual(Fore.RESET, '\033[39m')
|
35 |
-
|
36 |
-
# Check the light, extended versions.
|
37 |
-
self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m')
|
38 |
-
self.assertEqual(Fore.LIGHTRED_EX, '\033[91m')
|
39 |
-
self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m')
|
40 |
-
self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m')
|
41 |
-
self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m')
|
42 |
-
self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m')
|
43 |
-
self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m')
|
44 |
-
self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m')
|
45 |
-
|
46 |
-
|
47 |
-
def testBackAttributes(self):
|
48 |
-
self.assertEqual(Back.BLACK, '\033[40m')
|
49 |
-
self.assertEqual(Back.RED, '\033[41m')
|
50 |
-
self.assertEqual(Back.GREEN, '\033[42m')
|
51 |
-
self.assertEqual(Back.YELLOW, '\033[43m')
|
52 |
-
self.assertEqual(Back.BLUE, '\033[44m')
|
53 |
-
self.assertEqual(Back.MAGENTA, '\033[45m')
|
54 |
-
self.assertEqual(Back.CYAN, '\033[46m')
|
55 |
-
self.assertEqual(Back.WHITE, '\033[47m')
|
56 |
-
self.assertEqual(Back.RESET, '\033[49m')
|
57 |
-
|
58 |
-
# Check the light, extended versions.
|
59 |
-
self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m')
|
60 |
-
self.assertEqual(Back.LIGHTRED_EX, '\033[101m')
|
61 |
-
self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m')
|
62 |
-
self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m')
|
63 |
-
self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m')
|
64 |
-
self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m')
|
65 |
-
self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m')
|
66 |
-
self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m')
|
67 |
-
|
68 |
-
|
69 |
-
def testStyleAttributes(self):
|
70 |
-
self.assertEqual(Style.DIM, '\033[2m')
|
71 |
-
self.assertEqual(Style.NORMAL, '\033[22m')
|
72 |
-
self.assertEqual(Style.BRIGHT, '\033[1m')
|
73 |
-
|
74 |
-
|
75 |
-
if __name__ == '__main__':
|
76 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/measure.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
from operator import itemgetter
|
2 |
-
from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence
|
3 |
-
|
4 |
-
from . import errors
|
5 |
-
from .protocol import is_renderable, rich_cast
|
6 |
-
|
7 |
-
if TYPE_CHECKING:
|
8 |
-
from .console import Console, ConsoleOptions, RenderableType
|
9 |
-
|
10 |
-
|
11 |
-
class Measurement(NamedTuple):
|
12 |
-
"""Stores the minimum and maximum widths (in characters) required to render an object."""
|
13 |
-
|
14 |
-
minimum: int
|
15 |
-
"""Minimum number of cells required to render."""
|
16 |
-
maximum: int
|
17 |
-
"""Maximum number of cells required to render."""
|
18 |
-
|
19 |
-
@property
|
20 |
-
def span(self) -> int:
|
21 |
-
"""Get difference between maximum and minimum."""
|
22 |
-
return self.maximum - self.minimum
|
23 |
-
|
24 |
-
def normalize(self) -> "Measurement":
|
25 |
-
"""Get measurement that ensures that minimum <= maximum and minimum >= 0
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
Measurement: A normalized measurement.
|
29 |
-
"""
|
30 |
-
minimum, maximum = self
|
31 |
-
minimum = min(max(0, minimum), maximum)
|
32 |
-
return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
|
33 |
-
|
34 |
-
def with_maximum(self, width: int) -> "Measurement":
|
35 |
-
"""Get a RenderableWith where the widths are <= width.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
width (int): Maximum desired width.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
Measurement: New Measurement object.
|
42 |
-
"""
|
43 |
-
minimum, maximum = self
|
44 |
-
return Measurement(min(minimum, width), min(maximum, width))
|
45 |
-
|
46 |
-
def with_minimum(self, width: int) -> "Measurement":
|
47 |
-
"""Get a RenderableWith where the widths are >= width.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
width (int): Minimum desired width.
|
51 |
-
|
52 |
-
Returns:
|
53 |
-
Measurement: New Measurement object.
|
54 |
-
"""
|
55 |
-
minimum, maximum = self
|
56 |
-
width = max(0, width)
|
57 |
-
return Measurement(max(minimum, width), max(maximum, width))
|
58 |
-
|
59 |
-
def clamp(
|
60 |
-
self, min_width: Optional[int] = None, max_width: Optional[int] = None
|
61 |
-
) -> "Measurement":
|
62 |
-
"""Clamp a measurement within the specified range.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
|
66 |
-
max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
|
67 |
-
|
68 |
-
Returns:
|
69 |
-
Measurement: New Measurement object.
|
70 |
-
"""
|
71 |
-
measurement = self
|
72 |
-
if min_width is not None:
|
73 |
-
measurement = measurement.with_minimum(min_width)
|
74 |
-
if max_width is not None:
|
75 |
-
measurement = measurement.with_maximum(max_width)
|
76 |
-
return measurement
|
77 |
-
|
78 |
-
@classmethod
|
79 |
-
def get(
|
80 |
-
cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
|
81 |
-
) -> "Measurement":
|
82 |
-
"""Get a measurement for a renderable.
|
83 |
-
|
84 |
-
Args:
|
85 |
-
console (~rich.console.Console): Console instance.
|
86 |
-
options (~rich.console.ConsoleOptions): Console options.
|
87 |
-
renderable (RenderableType): An object that may be rendered with Rich.
|
88 |
-
|
89 |
-
Raises:
|
90 |
-
errors.NotRenderableError: If the object is not renderable.
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
Measurement: Measurement object containing range of character widths required to render the object.
|
94 |
-
"""
|
95 |
-
_max_width = options.max_width
|
96 |
-
if _max_width < 1:
|
97 |
-
return Measurement(0, 0)
|
98 |
-
if isinstance(renderable, str):
|
99 |
-
renderable = console.render_str(
|
100 |
-
renderable, markup=options.markup, highlight=False
|
101 |
-
)
|
102 |
-
renderable = rich_cast(renderable)
|
103 |
-
if is_renderable(renderable):
|
104 |
-
get_console_width: Optional[
|
105 |
-
Callable[["Console", "ConsoleOptions"], "Measurement"]
|
106 |
-
] = getattr(renderable, "__rich_measure__", None)
|
107 |
-
if get_console_width is not None:
|
108 |
-
render_width = (
|
109 |
-
get_console_width(console, options)
|
110 |
-
.normalize()
|
111 |
-
.with_maximum(_max_width)
|
112 |
-
)
|
113 |
-
if render_width.maximum < 1:
|
114 |
-
return Measurement(0, 0)
|
115 |
-
return render_width.normalize()
|
116 |
-
else:
|
117 |
-
return Measurement(0, _max_width)
|
118 |
-
else:
|
119 |
-
raise errors.NotRenderableError(
|
120 |
-
f"Unable to get render width for {renderable!r}; "
|
121 |
-
"a str, Segment, or object with __rich_console__ method is required"
|
122 |
-
)
|
123 |
-
|
124 |
-
|
125 |
-
def measure_renderables(
|
126 |
-
console: "Console",
|
127 |
-
options: "ConsoleOptions",
|
128 |
-
renderables: Sequence["RenderableType"],
|
129 |
-
) -> "Measurement":
|
130 |
-
"""Get a measurement that would fit a number of renderables.
|
131 |
-
|
132 |
-
Args:
|
133 |
-
console (~rich.console.Console): Console instance.
|
134 |
-
options (~rich.console.ConsoleOptions): Console options.
|
135 |
-
renderables (Iterable[RenderableType]): One or more renderable objects.
|
136 |
-
|
137 |
-
Returns:
|
138 |
-
Measurement: Measurement object containing range of character widths required to
|
139 |
-
contain all given renderables.
|
140 |
-
"""
|
141 |
-
if not renderables:
|
142 |
-
return Measurement(0, 0)
|
143 |
-
get_measurement = Measurement.get
|
144 |
-
measurements = [
|
145 |
-
get_measurement(console, options, renderable) for renderable in renderables
|
146 |
-
]
|
147 |
-
measured_width = Measurement(
|
148 |
-
max(measurements, key=itemgetter(0)).minimum,
|
149 |
-
max(measurements, key=itemgetter(1)).maximum,
|
150 |
-
)
|
151 |
-
return measured_width
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/include/pybind11/pytypes.h
DELETED
@@ -1,1608 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
pybind11/pytypes.h: Convenience wrapper classes for basic Python types
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#pragma once
|
11 |
-
|
12 |
-
#include "detail/common.h"
|
13 |
-
#include "buffer_info.h"
|
14 |
-
#include <utility>
|
15 |
-
#include <type_traits>
|
16 |
-
|
17 |
-
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
18 |
-
|
19 |
-
/* A few forward declarations */
|
20 |
-
class handle; class object;
|
21 |
-
class str; class iterator;
|
22 |
-
struct arg; struct arg_v;
|
23 |
-
|
24 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
25 |
-
class args_proxy;
|
26 |
-
inline bool isinstance_generic(handle obj, const std::type_info &tp);
|
27 |
-
|
28 |
-
// Accessor forward declarations
|
29 |
-
template <typename Policy> class accessor;
|
30 |
-
namespace accessor_policies {
|
31 |
-
struct obj_attr;
|
32 |
-
struct str_attr;
|
33 |
-
struct generic_item;
|
34 |
-
struct sequence_item;
|
35 |
-
struct list_item;
|
36 |
-
struct tuple_item;
|
37 |
-
}
|
38 |
-
using obj_attr_accessor = accessor<accessor_policies::obj_attr>;
|
39 |
-
using str_attr_accessor = accessor<accessor_policies::str_attr>;
|
40 |
-
using item_accessor = accessor<accessor_policies::generic_item>;
|
41 |
-
using sequence_accessor = accessor<accessor_policies::sequence_item>;
|
42 |
-
using list_accessor = accessor<accessor_policies::list_item>;
|
43 |
-
using tuple_accessor = accessor<accessor_policies::tuple_item>;
|
44 |
-
|
45 |
-
/// Tag and check to identify a class which implements the Python object API
|
46 |
-
class pyobject_tag { };
|
47 |
-
template <typename T> using is_pyobject = std::is_base_of<pyobject_tag, remove_reference_t<T>>;
|
48 |
-
|
49 |
-
/** \rst
|
50 |
-
A mixin class which adds common functions to `handle`, `object` and various accessors.
|
51 |
-
The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``.
|
52 |
-
\endrst */
|
53 |
-
template <typename Derived>
|
54 |
-
class object_api : public pyobject_tag {
|
55 |
-
const Derived &derived() const { return static_cast<const Derived &>(*this); }
|
56 |
-
|
57 |
-
public:
|
58 |
-
/** \rst
|
59 |
-
Return an iterator equivalent to calling ``iter()`` in Python. The object
|
60 |
-
must be a collection which supports the iteration protocol.
|
61 |
-
\endrst */
|
62 |
-
iterator begin() const;
|
63 |
-
/// Return a sentinel which ends iteration.
|
64 |
-
iterator end() const;
|
65 |
-
|
66 |
-
/** \rst
|
67 |
-
Return an internal functor to invoke the object's sequence protocol. Casting
|
68 |
-
the returned ``detail::item_accessor`` instance to a `handle` or `object`
|
69 |
-
subclass causes a corresponding call to ``__getitem__``. Assigning a `handle`
|
70 |
-
or `object` subclass causes a call to ``__setitem__``.
|
71 |
-
\endrst */
|
72 |
-
item_accessor operator[](handle key) const;
|
73 |
-
/// See above (the only difference is that they key is provided as a string literal)
|
74 |
-
item_accessor operator[](const char *key) const;
|
75 |
-
|
76 |
-
/** \rst
|
77 |
-
Return an internal functor to access the object's attributes. Casting the
|
78 |
-
returned ``detail::obj_attr_accessor`` instance to a `handle` or `object`
|
79 |
-
subclass causes a corresponding call to ``getattr``. Assigning a `handle`
|
80 |
-
or `object` subclass causes a call to ``setattr``.
|
81 |
-
\endrst */
|
82 |
-
obj_attr_accessor attr(handle key) const;
|
83 |
-
/// See above (the only difference is that they key is provided as a string literal)
|
84 |
-
str_attr_accessor attr(const char *key) const;
|
85 |
-
|
86 |
-
/** \rst
|
87 |
-
Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple``
|
88 |
-
or ``list`` for a function call. Applying another * to the result yields
|
89 |
-
** unpacking, e.g. to unpack a dict as function keyword arguments.
|
90 |
-
See :ref:`calling_python_functions`.
|
91 |
-
\endrst */
|
92 |
-
args_proxy operator*() const;
|
93 |
-
|
94 |
-
/// Check if the given item is contained within this object, i.e. ``item in obj``.
|
95 |
-
template <typename T> bool contains(T &&item) const;
|
96 |
-
|
97 |
-
/** \rst
|
98 |
-
Assuming the Python object is a function or implements the ``__call__``
|
99 |
-
protocol, ``operator()`` invokes the underlying function, passing an
|
100 |
-
arbitrary set of parameters. The result is returned as a `object` and
|
101 |
-
may need to be converted back into a Python object using `handle::cast()`.
|
102 |
-
|
103 |
-
When some of the arguments cannot be converted to Python objects, the
|
104 |
-
function will throw a `cast_error` exception. When the Python function
|
105 |
-
call fails, a `error_already_set` exception is thrown.
|
106 |
-
\endrst */
|
107 |
-
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
|
108 |
-
object operator()(Args &&...args) const;
|
109 |
-
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
|
110 |
-
PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)")
|
111 |
-
object call(Args&&... args) const;
|
112 |
-
|
113 |
-
/// Equivalent to ``obj is other`` in Python.
|
114 |
-
bool is(object_api const& other) const { return derived().ptr() == other.derived().ptr(); }
|
115 |
-
/// Equivalent to ``obj is None`` in Python.
|
116 |
-
bool is_none() const { return derived().ptr() == Py_None; }
|
117 |
-
/// Equivalent to obj == other in Python
|
118 |
-
bool equal(object_api const &other) const { return rich_compare(other, Py_EQ); }
|
119 |
-
bool not_equal(object_api const &other) const { return rich_compare(other, Py_NE); }
|
120 |
-
bool operator<(object_api const &other) const { return rich_compare(other, Py_LT); }
|
121 |
-
bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); }
|
122 |
-
bool operator>(object_api const &other) const { return rich_compare(other, Py_GT); }
|
123 |
-
bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); }
|
124 |
-
|
125 |
-
object operator-() const;
|
126 |
-
object operator~() const;
|
127 |
-
object operator+(object_api const &other) const;
|
128 |
-
object operator+=(object_api const &other) const;
|
129 |
-
object operator-(object_api const &other) const;
|
130 |
-
object operator-=(object_api const &other) const;
|
131 |
-
object operator*(object_api const &other) const;
|
132 |
-
object operator*=(object_api const &other) const;
|
133 |
-
object operator/(object_api const &other) const;
|
134 |
-
object operator/=(object_api const &other) const;
|
135 |
-
object operator|(object_api const &other) const;
|
136 |
-
object operator|=(object_api const &other) const;
|
137 |
-
object operator&(object_api const &other) const;
|
138 |
-
object operator&=(object_api const &other) const;
|
139 |
-
object operator^(object_api const &other) const;
|
140 |
-
object operator^=(object_api const &other) const;
|
141 |
-
object operator<<(object_api const &other) const;
|
142 |
-
object operator<<=(object_api const &other) const;
|
143 |
-
object operator>>(object_api const &other) const;
|
144 |
-
object operator>>=(object_api const &other) const;
|
145 |
-
|
146 |
-
PYBIND11_DEPRECATED("Use py::str(obj) instead")
|
147 |
-
pybind11::str str() const;
|
148 |
-
|
149 |
-
/// Get or set the object's docstring, i.e. ``obj.__doc__``.
|
150 |
-
str_attr_accessor doc() const;
|
151 |
-
|
152 |
-
/// Return the object's current reference count
|
153 |
-
int ref_count() const { return static_cast<int>(Py_REFCNT(derived().ptr())); }
|
154 |
-
/// Return a handle to the Python type object underlying the instance
|
155 |
-
handle get_type() const;
|
156 |
-
|
157 |
-
private:
|
158 |
-
bool rich_compare(object_api const &other, int value) const;
|
159 |
-
};
|
160 |
-
|
161 |
-
PYBIND11_NAMESPACE_END(detail)
|
162 |
-
|
163 |
-
/** \rst
|
164 |
-
Holds a reference to a Python object (no reference counting)
|
165 |
-
|
166 |
-
The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a
|
167 |
-
``PyObject *`` in Python's C API). It does not perform any automatic reference
|
168 |
-
counting and merely provides a basic C++ interface to various Python API functions.
|
169 |
-
|
170 |
-
.. seealso::
|
171 |
-
The `object` class inherits from `handle` and adds automatic reference
|
172 |
-
counting features.
|
173 |
-
\endrst */
|
174 |
-
class handle : public detail::object_api<handle> {
|
175 |
-
public:
|
176 |
-
/// The default constructor creates a handle with a ``nullptr``-valued pointer
|
177 |
-
handle() = default;
|
178 |
-
/// Creates a ``handle`` from the given raw Python object pointer
|
179 |
-
handle(PyObject *ptr) : m_ptr(ptr) { } // Allow implicit conversion from PyObject*
|
180 |
-
|
181 |
-
/// Return the underlying ``PyObject *`` pointer
|
182 |
-
PyObject *ptr() const { return m_ptr; }
|
183 |
-
PyObject *&ptr() { return m_ptr; }
|
184 |
-
|
185 |
-
/** \rst
|
186 |
-
Manually increase the reference count of the Python object. Usually, it is
|
187 |
-
preferable to use the `object` class which derives from `handle` and calls
|
188 |
-
this function automatically. Returns a reference to itself.
|
189 |
-
\endrst */
|
190 |
-
const handle& inc_ref() const & { Py_XINCREF(m_ptr); return *this; }
|
191 |
-
|
192 |
-
/** \rst
|
193 |
-
Manually decrease the reference count of the Python object. Usually, it is
|
194 |
-
preferable to use the `object` class which derives from `handle` and calls
|
195 |
-
this function automatically. Returns a reference to itself.
|
196 |
-
\endrst */
|
197 |
-
const handle& dec_ref() const & { Py_XDECREF(m_ptr); return *this; }
|
198 |
-
|
199 |
-
/** \rst
|
200 |
-
Attempt to cast the Python object into the given C++ type. A `cast_error`
|
201 |
-
will be throw upon failure.
|
202 |
-
\endrst */
|
203 |
-
template <typename T> T cast() const;
|
204 |
-
/// Return ``true`` when the `handle` wraps a valid Python object
|
205 |
-
explicit operator bool() const { return m_ptr != nullptr; }
|
206 |
-
/** \rst
|
207 |
-
Deprecated: Check that the underlying pointers are the same.
|
208 |
-
Equivalent to ``obj1 is obj2`` in Python.
|
209 |
-
\endrst */
|
210 |
-
PYBIND11_DEPRECATED("Use obj1.is(obj2) instead")
|
211 |
-
bool operator==(const handle &h) const { return m_ptr == h.m_ptr; }
|
212 |
-
PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead")
|
213 |
-
bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; }
|
214 |
-
PYBIND11_DEPRECATED("Use handle::operator bool() instead")
|
215 |
-
bool check() const { return m_ptr != nullptr; }
|
216 |
-
protected:
|
217 |
-
PyObject *m_ptr = nullptr;
|
218 |
-
};
|
219 |
-
|
220 |
-
/** \rst
|
221 |
-
Holds a reference to a Python object (with reference counting)
|
222 |
-
|
223 |
-
Like `handle`, the `object` class is a thin wrapper around an arbitrary Python
|
224 |
-
object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it
|
225 |
-
optionally increases the object's reference count upon construction, and it
|
226 |
-
*always* decreases the reference count when the `object` instance goes out of
|
227 |
-
scope and is destructed. When using `object` instances consistently, it is much
|
228 |
-
easier to get reference counting right at the first attempt.
|
229 |
-
\endrst */
|
230 |
-
class object : public handle {
|
231 |
-
public:
|
232 |
-
object() = default;
|
233 |
-
PYBIND11_DEPRECATED("Use reinterpret_borrow<object>() or reinterpret_steal<object>()")
|
234 |
-
object(handle h, bool is_borrowed) : handle(h) { if (is_borrowed) inc_ref(); }
|
235 |
-
/// Copy constructor; always increases the reference count
|
236 |
-
object(const object &o) : handle(o) { inc_ref(); }
|
237 |
-
/// Move constructor; steals the object from ``other`` and preserves its reference count
|
238 |
-
object(object &&other) noexcept { m_ptr = other.m_ptr; other.m_ptr = nullptr; }
|
239 |
-
/// Destructor; automatically calls `handle::dec_ref()`
|
240 |
-
~object() { dec_ref(); }
|
241 |
-
|
242 |
-
/** \rst
|
243 |
-
Resets the internal pointer to ``nullptr`` without decreasing the
|
244 |
-
object's reference count. The function returns a raw handle to the original
|
245 |
-
Python object.
|
246 |
-
\endrst */
|
247 |
-
handle release() {
|
248 |
-
PyObject *tmp = m_ptr;
|
249 |
-
m_ptr = nullptr;
|
250 |
-
return handle(tmp);
|
251 |
-
}
|
252 |
-
|
253 |
-
object& operator=(const object &other) {
|
254 |
-
other.inc_ref();
|
255 |
-
dec_ref();
|
256 |
-
m_ptr = other.m_ptr;
|
257 |
-
return *this;
|
258 |
-
}
|
259 |
-
|
260 |
-
object& operator=(object &&other) noexcept {
|
261 |
-
if (this != &other) {
|
262 |
-
handle temp(m_ptr);
|
263 |
-
m_ptr = other.m_ptr;
|
264 |
-
other.m_ptr = nullptr;
|
265 |
-
temp.dec_ref();
|
266 |
-
}
|
267 |
-
return *this;
|
268 |
-
}
|
269 |
-
|
270 |
-
// Calling cast() on an object lvalue just copies (via handle::cast)
|
271 |
-
template <typename T> T cast() const &;
|
272 |
-
// Calling on an object rvalue does a move, if needed and/or possible
|
273 |
-
template <typename T> T cast() &&;
|
274 |
-
|
275 |
-
protected:
|
276 |
-
// Tags for choosing constructors from raw PyObject *
|
277 |
-
struct borrowed_t { };
|
278 |
-
struct stolen_t { };
|
279 |
-
|
280 |
-
template <typename T> friend T reinterpret_borrow(handle);
|
281 |
-
template <typename T> friend T reinterpret_steal(handle);
|
282 |
-
|
283 |
-
public:
|
284 |
-
// Only accessible from derived classes and the reinterpret_* functions
|
285 |
-
object(handle h, borrowed_t) : handle(h) { inc_ref(); }
|
286 |
-
object(handle h, stolen_t) : handle(h) { }
|
287 |
-
};
|
288 |
-
|
289 |
-
/** \rst
|
290 |
-
Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference.
|
291 |
-
The target type ``T`` must be `object` or one of its derived classes. The function
|
292 |
-
doesn't do any conversions or checks. It's up to the user to make sure that the
|
293 |
-
target type is correct.
|
294 |
-
|
295 |
-
.. code-block:: cpp
|
296 |
-
|
297 |
-
PyObject *p = PyList_GetItem(obj, index);
|
298 |
-
py::object o = reinterpret_borrow<py::object>(p);
|
299 |
-
// or
|
300 |
-
py::tuple t = reinterpret_borrow<py::tuple>(p); // <-- `p` must be already be a `tuple`
|
301 |
-
\endrst */
|
302 |
-
template <typename T> T reinterpret_borrow(handle h) { return {h, object::borrowed_t{}}; }
|
303 |
-
|
304 |
-
/** \rst
|
305 |
-
Like `reinterpret_borrow`, but steals the reference.
|
306 |
-
|
307 |
-
.. code-block:: cpp
|
308 |
-
|
309 |
-
PyObject *p = PyObject_Str(obj);
|
310 |
-
py::str s = reinterpret_steal<py::str>(p); // <-- `p` must be already be a `str`
|
311 |
-
\endrst */
|
312 |
-
template <typename T> T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; }
|
313 |
-
|
314 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
315 |
-
inline std::string error_string();
|
316 |
-
PYBIND11_NAMESPACE_END(detail)
|
317 |
-
|
318 |
-
/// Fetch and hold an error which was already set in Python. An instance of this is typically
|
319 |
-
/// thrown to propagate python-side errors back through C++ which can either be caught manually or
|
320 |
-
/// else falls back to the function dispatcher (which then raises the captured error back to
|
321 |
-
/// python).
|
322 |
-
class error_already_set : public std::runtime_error {
|
323 |
-
public:
|
324 |
-
/// Constructs a new exception from the current Python error indicator, if any. The current
|
325 |
-
/// Python error indicator will be cleared.
|
326 |
-
error_already_set() : std::runtime_error(detail::error_string()) {
|
327 |
-
PyErr_Fetch(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr());
|
328 |
-
}
|
329 |
-
|
330 |
-
error_already_set(const error_already_set &) = default;
|
331 |
-
error_already_set(error_already_set &&) = default;
|
332 |
-
|
333 |
-
inline ~error_already_set();
|
334 |
-
|
335 |
-
/// Give the currently-held error back to Python, if any. If there is currently a Python error
|
336 |
-
/// already set it is cleared first. After this call, the current object no longer stores the
|
337 |
-
/// error variables (but the `.what()` string is still available).
|
338 |
-
void restore() { PyErr_Restore(m_type.release().ptr(), m_value.release().ptr(), m_trace.release().ptr()); }
|
339 |
-
|
340 |
-
/// If it is impossible to raise the currently-held error, such as in destructor, we can write
|
341 |
-
/// it out using Python's unraisable hook (sys.unraisablehook). The error context should be
|
342 |
-
/// some object whose repr() helps identify the location of the error. Python already knows the
|
343 |
-
/// type and value of the error, so there is no need to repeat that. For example, __func__ could
|
344 |
-
/// be helpful. After this call, the current object no longer stores the error variables,
|
345 |
-
/// and neither does Python.
|
346 |
-
void discard_as_unraisable(object err_context) {
|
347 |
-
restore();
|
348 |
-
PyErr_WriteUnraisable(err_context.ptr());
|
349 |
-
}
|
350 |
-
void discard_as_unraisable(const char *err_context) {
|
351 |
-
discard_as_unraisable(reinterpret_steal<object>(PYBIND11_FROM_STRING(err_context)));
|
352 |
-
}
|
353 |
-
|
354 |
-
// Does nothing; provided for backwards compatibility.
|
355 |
-
PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated")
|
356 |
-
void clear() {}
|
357 |
-
|
358 |
-
/// Check if the currently trapped error type matches the given Python exception class (or a
|
359 |
-
/// subclass thereof). May also be passed a tuple to search for any exception class matches in
|
360 |
-
/// the given tuple.
|
361 |
-
bool matches(handle exc) const { return PyErr_GivenExceptionMatches(m_type.ptr(), exc.ptr()); }
|
362 |
-
|
363 |
-
const object& type() const { return m_type; }
|
364 |
-
const object& value() const { return m_value; }
|
365 |
-
const object& trace() const { return m_trace; }
|
366 |
-
|
367 |
-
private:
|
368 |
-
object m_type, m_value, m_trace;
|
369 |
-
};
|
370 |
-
|
371 |
-
/** \defgroup python_builtins _
|
372 |
-
Unless stated otherwise, the following C++ functions behave the same
|
373 |
-
as their Python counterparts.
|
374 |
-
*/
|
375 |
-
|
376 |
-
/** \ingroup python_builtins
|
377 |
-
\rst
|
378 |
-
Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of
|
379 |
-
`object` or a class which was exposed to Python as ``py::class_<T>``.
|
380 |
-
\endrst */
|
381 |
-
template <typename T, detail::enable_if_t<std::is_base_of<object, T>::value, int> = 0>
|
382 |
-
bool isinstance(handle obj) { return T::check_(obj); }
|
383 |
-
|
384 |
-
template <typename T, detail::enable_if_t<!std::is_base_of<object, T>::value, int> = 0>
|
385 |
-
bool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); }
|
386 |
-
|
387 |
-
template <> inline bool isinstance<handle>(handle) = delete;
|
388 |
-
template <> inline bool isinstance<object>(handle obj) { return obj.ptr() != nullptr; }
|
389 |
-
|
390 |
-
/// \ingroup python_builtins
|
391 |
-
/// Return true if ``obj`` is an instance of the ``type``.
|
392 |
-
inline bool isinstance(handle obj, handle type) {
|
393 |
-
const auto result = PyObject_IsInstance(obj.ptr(), type.ptr());
|
394 |
-
if (result == -1)
|
395 |
-
throw error_already_set();
|
396 |
-
return result != 0;
|
397 |
-
}
|
398 |
-
|
399 |
-
/// \addtogroup python_builtins
|
400 |
-
/// @{
|
401 |
-
inline bool hasattr(handle obj, handle name) {
|
402 |
-
return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1;
|
403 |
-
}
|
404 |
-
|
405 |
-
inline bool hasattr(handle obj, const char *name) {
|
406 |
-
return PyObject_HasAttrString(obj.ptr(), name) == 1;
|
407 |
-
}
|
408 |
-
|
409 |
-
inline void delattr(handle obj, handle name) {
|
410 |
-
if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) { throw error_already_set(); }
|
411 |
-
}
|
412 |
-
|
413 |
-
inline void delattr(handle obj, const char *name) {
|
414 |
-
if (PyObject_DelAttrString(obj.ptr(), name) != 0) { throw error_already_set(); }
|
415 |
-
}
|
416 |
-
|
417 |
-
inline object getattr(handle obj, handle name) {
|
418 |
-
PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr());
|
419 |
-
if (!result) { throw error_already_set(); }
|
420 |
-
return reinterpret_steal<object>(result);
|
421 |
-
}
|
422 |
-
|
423 |
-
inline object getattr(handle obj, const char *name) {
|
424 |
-
PyObject *result = PyObject_GetAttrString(obj.ptr(), name);
|
425 |
-
if (!result) { throw error_already_set(); }
|
426 |
-
return reinterpret_steal<object>(result);
|
427 |
-
}
|
428 |
-
|
429 |
-
inline object getattr(handle obj, handle name, handle default_) {
|
430 |
-
if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) {
|
431 |
-
return reinterpret_steal<object>(result);
|
432 |
-
} else {
|
433 |
-
PyErr_Clear();
|
434 |
-
return reinterpret_borrow<object>(default_);
|
435 |
-
}
|
436 |
-
}
|
437 |
-
|
438 |
-
inline object getattr(handle obj, const char *name, handle default_) {
|
439 |
-
if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) {
|
440 |
-
return reinterpret_steal<object>(result);
|
441 |
-
} else {
|
442 |
-
PyErr_Clear();
|
443 |
-
return reinterpret_borrow<object>(default_);
|
444 |
-
}
|
445 |
-
}
|
446 |
-
|
447 |
-
inline void setattr(handle obj, handle name, handle value) {
|
448 |
-
if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) { throw error_already_set(); }
|
449 |
-
}
|
450 |
-
|
451 |
-
inline void setattr(handle obj, const char *name, handle value) {
|
452 |
-
if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) { throw error_already_set(); }
|
453 |
-
}
|
454 |
-
|
455 |
-
inline ssize_t hash(handle obj) {
|
456 |
-
auto h = PyObject_Hash(obj.ptr());
|
457 |
-
if (h == -1) { throw error_already_set(); }
|
458 |
-
return h;
|
459 |
-
}
|
460 |
-
|
461 |
-
/// @} python_builtins
|
462 |
-
|
463 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
464 |
-
inline handle get_function(handle value) {
|
465 |
-
if (value) {
|
466 |
-
#if PY_MAJOR_VERSION >= 3
|
467 |
-
if (PyInstanceMethod_Check(value.ptr()))
|
468 |
-
value = PyInstanceMethod_GET_FUNCTION(value.ptr());
|
469 |
-
else
|
470 |
-
#endif
|
471 |
-
if (PyMethod_Check(value.ptr()))
|
472 |
-
value = PyMethod_GET_FUNCTION(value.ptr());
|
473 |
-
}
|
474 |
-
return value;
|
475 |
-
}
|
476 |
-
|
477 |
-
// Helper aliases/functions to support implicit casting of values given to python accessors/methods.
|
478 |
-
// When given a pyobject, this simply returns the pyobject as-is; for other C++ type, the value goes
|
479 |
-
// through pybind11::cast(obj) to convert it to an `object`.
|
480 |
-
template <typename T, enable_if_t<is_pyobject<T>::value, int> = 0>
|
481 |
-
auto object_or_cast(T &&o) -> decltype(std::forward<T>(o)) { return std::forward<T>(o); }
|
482 |
-
// The following casting version is implemented in cast.h:
|
483 |
-
template <typename T, enable_if_t<!is_pyobject<T>::value, int> = 0>
|
484 |
-
object object_or_cast(T &&o);
|
485 |
-
// Match a PyObject*, which we want to convert directly to handle via its converting constructor
|
486 |
-
inline handle object_or_cast(PyObject *ptr) { return ptr; }
|
487 |
-
|
488 |
-
template <typename Policy>
|
489 |
-
class accessor : public object_api<accessor<Policy>> {
|
490 |
-
using key_type = typename Policy::key_type;
|
491 |
-
|
492 |
-
public:
|
493 |
-
accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) { }
|
494 |
-
accessor(const accessor &) = default;
|
495 |
-
accessor(accessor &&) = default;
|
496 |
-
|
497 |
-
// accessor overload required to override default assignment operator (templates are not allowed
|
498 |
-
// to replace default compiler-generated assignments).
|
499 |
-
void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); }
|
500 |
-
void operator=(const accessor &a) & { operator=(handle(a)); }
|
501 |
-
|
502 |
-
template <typename T> void operator=(T &&value) && {
|
503 |
-
Policy::set(obj, key, object_or_cast(std::forward<T>(value)));
|
504 |
-
}
|
505 |
-
template <typename T> void operator=(T &&value) & {
|
506 |
-
get_cache() = reinterpret_borrow<object>(object_or_cast(std::forward<T>(value)));
|
507 |
-
}
|
508 |
-
|
509 |
-
template <typename T = Policy>
|
510 |
-
PYBIND11_DEPRECATED("Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)")
|
511 |
-
explicit operator enable_if_t<std::is_same<T, accessor_policies::str_attr>::value ||
|
512 |
-
std::is_same<T, accessor_policies::obj_attr>::value, bool>() const {
|
513 |
-
return hasattr(obj, key);
|
514 |
-
}
|
515 |
-
template <typename T = Policy>
|
516 |
-
PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)")
|
517 |
-
explicit operator enable_if_t<std::is_same<T, accessor_policies::generic_item>::value, bool>() const {
|
518 |
-
return obj.contains(key);
|
519 |
-
}
|
520 |
-
|
521 |
-
operator object() const { return get_cache(); }
|
522 |
-
PyObject *ptr() const { return get_cache().ptr(); }
|
523 |
-
template <typename T> T cast() const { return get_cache().template cast<T>(); }
|
524 |
-
|
525 |
-
private:
|
526 |
-
object &get_cache() const {
|
527 |
-
if (!cache) { cache = Policy::get(obj, key); }
|
528 |
-
return cache;
|
529 |
-
}
|
530 |
-
|
531 |
-
private:
|
532 |
-
handle obj;
|
533 |
-
key_type key;
|
534 |
-
mutable object cache;
|
535 |
-
};
|
536 |
-
|
537 |
-
PYBIND11_NAMESPACE_BEGIN(accessor_policies)
|
538 |
-
struct obj_attr {
|
539 |
-
using key_type = object;
|
540 |
-
static object get(handle obj, handle key) { return getattr(obj, key); }
|
541 |
-
static void set(handle obj, handle key, handle val) { setattr(obj, key, val); }
|
542 |
-
};
|
543 |
-
|
544 |
-
struct str_attr {
|
545 |
-
using key_type = const char *;
|
546 |
-
static object get(handle obj, const char *key) { return getattr(obj, key); }
|
547 |
-
static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); }
|
548 |
-
};
|
549 |
-
|
550 |
-
struct generic_item {
|
551 |
-
using key_type = object;
|
552 |
-
|
553 |
-
static object get(handle obj, handle key) {
|
554 |
-
PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr());
|
555 |
-
if (!result) { throw error_already_set(); }
|
556 |
-
return reinterpret_steal<object>(result);
|
557 |
-
}
|
558 |
-
|
559 |
-
static void set(handle obj, handle key, handle val) {
|
560 |
-
if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) { throw error_already_set(); }
|
561 |
-
}
|
562 |
-
};
|
563 |
-
|
564 |
-
struct sequence_item {
|
565 |
-
using key_type = size_t;
|
566 |
-
|
567 |
-
static object get(handle obj, size_t index) {
|
568 |
-
PyObject *result = PySequence_GetItem(obj.ptr(), static_cast<ssize_t>(index));
|
569 |
-
if (!result) { throw error_already_set(); }
|
570 |
-
return reinterpret_steal<object>(result);
|
571 |
-
}
|
572 |
-
|
573 |
-
static void set(handle obj, size_t index, handle val) {
|
574 |
-
// PySequence_SetItem does not steal a reference to 'val'
|
575 |
-
if (PySequence_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.ptr()) != 0) {
|
576 |
-
throw error_already_set();
|
577 |
-
}
|
578 |
-
}
|
579 |
-
};
|
580 |
-
|
581 |
-
struct list_item {
|
582 |
-
using key_type = size_t;
|
583 |
-
|
584 |
-
static object get(handle obj, size_t index) {
|
585 |
-
PyObject *result = PyList_GetItem(obj.ptr(), static_cast<ssize_t>(index));
|
586 |
-
if (!result) { throw error_already_set(); }
|
587 |
-
return reinterpret_borrow<object>(result);
|
588 |
-
}
|
589 |
-
|
590 |
-
static void set(handle obj, size_t index, handle val) {
|
591 |
-
// PyList_SetItem steals a reference to 'val'
|
592 |
-
if (PyList_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.inc_ref().ptr()) != 0) {
|
593 |
-
throw error_already_set();
|
594 |
-
}
|
595 |
-
}
|
596 |
-
};
|
597 |
-
|
598 |
-
struct tuple_item {
|
599 |
-
using key_type = size_t;
|
600 |
-
|
601 |
-
static object get(handle obj, size_t index) {
|
602 |
-
PyObject *result = PyTuple_GetItem(obj.ptr(), static_cast<ssize_t>(index));
|
603 |
-
if (!result) { throw error_already_set(); }
|
604 |
-
return reinterpret_borrow<object>(result);
|
605 |
-
}
|
606 |
-
|
607 |
-
static void set(handle obj, size_t index, handle val) {
|
608 |
-
// PyTuple_SetItem steals a reference to 'val'
|
609 |
-
if (PyTuple_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.inc_ref().ptr()) != 0) {
|
610 |
-
throw error_already_set();
|
611 |
-
}
|
612 |
-
}
|
613 |
-
};
|
614 |
-
PYBIND11_NAMESPACE_END(accessor_policies)
|
615 |
-
|
616 |
-
/// STL iterator template used for tuple, list, sequence and dict
|
617 |
-
template <typename Policy>
|
618 |
-
class generic_iterator : public Policy {
|
619 |
-
using It = generic_iterator;
|
620 |
-
|
621 |
-
public:
|
622 |
-
using difference_type = ssize_t;
|
623 |
-
using iterator_category = typename Policy::iterator_category;
|
624 |
-
using value_type = typename Policy::value_type;
|
625 |
-
using reference = typename Policy::reference;
|
626 |
-
using pointer = typename Policy::pointer;
|
627 |
-
|
628 |
-
generic_iterator() = default;
|
629 |
-
generic_iterator(handle seq, ssize_t index) : Policy(seq, index) { }
|
630 |
-
|
631 |
-
reference operator*() const { return Policy::dereference(); }
|
632 |
-
reference operator[](difference_type n) const { return *(*this + n); }
|
633 |
-
pointer operator->() const { return **this; }
|
634 |
-
|
635 |
-
It &operator++() { Policy::increment(); return *this; }
|
636 |
-
It operator++(int) { auto copy = *this; Policy::increment(); return copy; }
|
637 |
-
It &operator--() { Policy::decrement(); return *this; }
|
638 |
-
It operator--(int) { auto copy = *this; Policy::decrement(); return copy; }
|
639 |
-
It &operator+=(difference_type n) { Policy::advance(n); return *this; }
|
640 |
-
It &operator-=(difference_type n) { Policy::advance(-n); return *this; }
|
641 |
-
|
642 |
-
friend It operator+(const It &a, difference_type n) { auto copy = a; return copy += n; }
|
643 |
-
friend It operator+(difference_type n, const It &b) { return b + n; }
|
644 |
-
friend It operator-(const It &a, difference_type n) { auto copy = a; return copy -= n; }
|
645 |
-
friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); }
|
646 |
-
|
647 |
-
friend bool operator==(const It &a, const It &b) { return a.equal(b); }
|
648 |
-
friend bool operator!=(const It &a, const It &b) { return !(a == b); }
|
649 |
-
friend bool operator< (const It &a, const It &b) { return b - a > 0; }
|
650 |
-
friend bool operator> (const It &a, const It &b) { return b < a; }
|
651 |
-
friend bool operator>=(const It &a, const It &b) { return !(a < b); }
|
652 |
-
friend bool operator<=(const It &a, const It &b) { return !(a > b); }
|
653 |
-
};
|
654 |
-
|
655 |
-
PYBIND11_NAMESPACE_BEGIN(iterator_policies)
|
656 |
-
/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers
|
657 |
-
template <typename T>
|
658 |
-
struct arrow_proxy {
|
659 |
-
T value;
|
660 |
-
|
661 |
-
arrow_proxy(T &&value) : value(std::move(value)) { }
|
662 |
-
T *operator->() const { return &value; }
|
663 |
-
};
|
664 |
-
|
665 |
-
/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS``
|
666 |
-
class sequence_fast_readonly {
|
667 |
-
protected:
|
668 |
-
using iterator_category = std::random_access_iterator_tag;
|
669 |
-
using value_type = handle;
|
670 |
-
using reference = const handle;
|
671 |
-
using pointer = arrow_proxy<const handle>;
|
672 |
-
|
673 |
-
sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) { }
|
674 |
-
|
675 |
-
reference dereference() const { return *ptr; }
|
676 |
-
void increment() { ++ptr; }
|
677 |
-
void decrement() { --ptr; }
|
678 |
-
void advance(ssize_t n) { ptr += n; }
|
679 |
-
bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; }
|
680 |
-
ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; }
|
681 |
-
|
682 |
-
private:
|
683 |
-
PyObject **ptr;
|
684 |
-
};
|
685 |
-
|
686 |
-
/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor``
|
687 |
-
class sequence_slow_readwrite {
|
688 |
-
protected:
|
689 |
-
using iterator_category = std::random_access_iterator_tag;
|
690 |
-
using value_type = object;
|
691 |
-
using reference = sequence_accessor;
|
692 |
-
using pointer = arrow_proxy<const sequence_accessor>;
|
693 |
-
|
694 |
-
sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) { }
|
695 |
-
|
696 |
-
reference dereference() const { return {obj, static_cast<size_t>(index)}; }
|
697 |
-
void increment() { ++index; }
|
698 |
-
void decrement() { --index; }
|
699 |
-
void advance(ssize_t n) { index += n; }
|
700 |
-
bool equal(const sequence_slow_readwrite &b) const { return index == b.index; }
|
701 |
-
ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; }
|
702 |
-
|
703 |
-
private:
|
704 |
-
handle obj;
|
705 |
-
ssize_t index;
|
706 |
-
};
|
707 |
-
|
708 |
-
/// Python's dictionary protocol permits this to be a forward iterator
|
709 |
-
class dict_readonly {
|
710 |
-
protected:
|
711 |
-
using iterator_category = std::forward_iterator_tag;
|
712 |
-
using value_type = std::pair<handle, handle>;
|
713 |
-
using reference = const value_type;
|
714 |
-
using pointer = arrow_proxy<const value_type>;
|
715 |
-
|
716 |
-
dict_readonly() = default;
|
717 |
-
dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); }
|
718 |
-
|
719 |
-
reference dereference() const { return {key, value}; }
|
720 |
-
void increment() { if (!PyDict_Next(obj.ptr(), &pos, &key, &value)) { pos = -1; } }
|
721 |
-
bool equal(const dict_readonly &b) const { return pos == b.pos; }
|
722 |
-
|
723 |
-
private:
|
724 |
-
handle obj;
|
725 |
-
PyObject *key = nullptr, *value = nullptr;
|
726 |
-
ssize_t pos = -1;
|
727 |
-
};
|
728 |
-
PYBIND11_NAMESPACE_END(iterator_policies)
|
729 |
-
|
730 |
-
#if !defined(PYPY_VERSION)
|
731 |
-
using tuple_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
|
732 |
-
using list_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
|
733 |
-
#else
|
734 |
-
using tuple_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
|
735 |
-
using list_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
|
736 |
-
#endif
|
737 |
-
|
738 |
-
using sequence_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
|
739 |
-
using dict_iterator = generic_iterator<iterator_policies::dict_readonly>;
|
740 |
-
|
741 |
-
inline bool PyIterable_Check(PyObject *obj) {
|
742 |
-
PyObject *iter = PyObject_GetIter(obj);
|
743 |
-
if (iter) {
|
744 |
-
Py_DECREF(iter);
|
745 |
-
return true;
|
746 |
-
} else {
|
747 |
-
PyErr_Clear();
|
748 |
-
return false;
|
749 |
-
}
|
750 |
-
}
|
751 |
-
|
752 |
-
inline bool PyNone_Check(PyObject *o) { return o == Py_None; }
|
753 |
-
inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; }
|
754 |
-
|
755 |
-
inline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); }
|
756 |
-
|
757 |
-
inline bool PyStaticMethod_Check(PyObject *o) { return o->ob_type == &PyStaticMethod_Type; }
|
758 |
-
|
759 |
-
class kwargs_proxy : public handle {
|
760 |
-
public:
|
761 |
-
explicit kwargs_proxy(handle h) : handle(h) { }
|
762 |
-
};
|
763 |
-
|
764 |
-
class args_proxy : public handle {
|
765 |
-
public:
|
766 |
-
explicit args_proxy(handle h) : handle(h) { }
|
767 |
-
kwargs_proxy operator*() const { return kwargs_proxy(*this); }
|
768 |
-
};
|
769 |
-
|
770 |
-
/// Python argument categories (using PEP 448 terms)
|
771 |
-
template <typename T> using is_keyword = std::is_base_of<arg, T>;
|
772 |
-
template <typename T> using is_s_unpacking = std::is_same<args_proxy, T>; // * unpacking
|
773 |
-
template <typename T> using is_ds_unpacking = std::is_same<kwargs_proxy, T>; // ** unpacking
|
774 |
-
template <typename T> using is_positional = satisfies_none_of<T,
|
775 |
-
is_keyword, is_s_unpacking, is_ds_unpacking
|
776 |
-
>;
|
777 |
-
template <typename T> using is_keyword_or_ds = satisfies_any_of<T, is_keyword, is_ds_unpacking>;
|
778 |
-
|
779 |
-
// Call argument collector forward declarations
|
780 |
-
template <return_value_policy policy = return_value_policy::automatic_reference>
|
781 |
-
class simple_collector;
|
782 |
-
template <return_value_policy policy = return_value_policy::automatic_reference>
|
783 |
-
class unpacking_collector;
|
784 |
-
|
785 |
-
PYBIND11_NAMESPACE_END(detail)
|
786 |
-
|
787 |
-
// TODO: After the deprecated constructors are removed, this macro can be simplified by
|
788 |
-
// inheriting ctors: `using Parent::Parent`. It's not an option right now because
|
789 |
-
// the `using` statement triggers the parent deprecation warning even if the ctor
|
790 |
-
// isn't even used.
|
791 |
-
#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
|
792 |
-
public: \
|
793 |
-
PYBIND11_DEPRECATED("Use reinterpret_borrow<"#Name">() or reinterpret_steal<"#Name">()") \
|
794 |
-
Name(handle h, bool is_borrowed) : Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) { } \
|
795 |
-
Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) { } \
|
796 |
-
Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \
|
797 |
-
PYBIND11_DEPRECATED("Use py::isinstance<py::python_type>(obj) instead") \
|
798 |
-
bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \
|
799 |
-
static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \
|
800 |
-
template <typename Policy_> \
|
801 |
-
Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) { }
|
802 |
-
|
803 |
-
#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
|
804 |
-
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
|
805 |
-
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
|
806 |
-
Name(const object &o) \
|
807 |
-
: Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) \
|
808 |
-
{ if (!m_ptr) throw error_already_set(); } \
|
809 |
-
Name(object &&o) \
|
810 |
-
: Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) \
|
811 |
-
{ if (!m_ptr) throw error_already_set(); }
|
812 |
-
|
813 |
-
#define PYBIND11_OBJECT(Name, Parent, CheckFun) \
|
814 |
-
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
|
815 |
-
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
|
816 |
-
Name(const object &o) : Parent(o) { } \
|
817 |
-
Name(object &&o) : Parent(std::move(o)) { }
|
818 |
-
|
819 |
-
#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \
|
820 |
-
PYBIND11_OBJECT(Name, Parent, CheckFun) \
|
821 |
-
Name() : Parent() { }
|
822 |
-
|
823 |
-
/// \addtogroup pytypes
|
824 |
-
/// @{
|
825 |
-
|
826 |
-
/** \rst
|
827 |
-
Wraps a Python iterator so that it can also be used as a C++ input iterator
|
828 |
-
|
829 |
-
Caveat: copying an iterator does not (and cannot) clone the internal
|
830 |
-
state of the Python iterable. This also applies to the post-increment
|
831 |
-
operator. This iterator should only be used to retrieve the current
|
832 |
-
value using ``operator*()``.
|
833 |
-
\endrst */
|
834 |
-
class iterator : public object {
|
835 |
-
public:
|
836 |
-
using iterator_category = std::input_iterator_tag;
|
837 |
-
using difference_type = ssize_t;
|
838 |
-
using value_type = handle;
|
839 |
-
using reference = const handle;
|
840 |
-
using pointer = const handle *;
|
841 |
-
|
842 |
-
PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check)
|
843 |
-
|
844 |
-
iterator& operator++() {
|
845 |
-
advance();
|
846 |
-
return *this;
|
847 |
-
}
|
848 |
-
|
849 |
-
iterator operator++(int) {
|
850 |
-
auto rv = *this;
|
851 |
-
advance();
|
852 |
-
return rv;
|
853 |
-
}
|
854 |
-
|
855 |
-
reference operator*() const {
|
856 |
-
if (m_ptr && !value.ptr()) {
|
857 |
-
auto& self = const_cast<iterator &>(*this);
|
858 |
-
self.advance();
|
859 |
-
}
|
860 |
-
return value;
|
861 |
-
}
|
862 |
-
|
863 |
-
pointer operator->() const { operator*(); return &value; }
|
864 |
-
|
865 |
-
/** \rst
|
866 |
-
The value which marks the end of the iteration. ``it == iterator::sentinel()``
|
867 |
-
is equivalent to catching ``StopIteration`` in Python.
|
868 |
-
|
869 |
-
.. code-block:: cpp
|
870 |
-
|
871 |
-
void foo(py::iterator it) {
|
872 |
-
while (it != py::iterator::sentinel()) {
|
873 |
-
// use `*it`
|
874 |
-
++it;
|
875 |
-
}
|
876 |
-
}
|
877 |
-
\endrst */
|
878 |
-
static iterator sentinel() { return {}; }
|
879 |
-
|
880 |
-
friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); }
|
881 |
-
friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); }
|
882 |
-
|
883 |
-
private:
|
884 |
-
void advance() {
|
885 |
-
value = reinterpret_steal<object>(PyIter_Next(m_ptr));
|
886 |
-
if (PyErr_Occurred()) { throw error_already_set(); }
|
887 |
-
}
|
888 |
-
|
889 |
-
private:
|
890 |
-
object value = {};
|
891 |
-
};
|
892 |
-
|
893 |
-
class iterable : public object {
|
894 |
-
public:
|
895 |
-
PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check)
|
896 |
-
};
|
897 |
-
|
898 |
-
class bytes;
|
899 |
-
|
900 |
-
class str : public object {
|
901 |
-
public:
|
902 |
-
PYBIND11_OBJECT_CVT(str, object, detail::PyUnicode_Check_Permissive, raw_str)
|
903 |
-
|
904 |
-
str(const char *c, size_t n)
|
905 |
-
: object(PyUnicode_FromStringAndSize(c, (ssize_t) n), stolen_t{}) {
|
906 |
-
if (!m_ptr) pybind11_fail("Could not allocate string object!");
|
907 |
-
}
|
908 |
-
|
909 |
-
// 'explicit' is explicitly omitted from the following constructors to allow implicit conversion to py::str from C++ string-like objects
|
910 |
-
str(const char *c = "")
|
911 |
-
: object(PyUnicode_FromString(c), stolen_t{}) {
|
912 |
-
if (!m_ptr) pybind11_fail("Could not allocate string object!");
|
913 |
-
}
|
914 |
-
|
915 |
-
str(const std::string &s) : str(s.data(), s.size()) { }
|
916 |
-
|
917 |
-
explicit str(const bytes &b);
|
918 |
-
|
919 |
-
/** \rst
|
920 |
-
Return a string representation of the object. This is analogous to
|
921 |
-
the ``str()`` function in Python.
|
922 |
-
\endrst */
|
923 |
-
explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { }
|
924 |
-
|
925 |
-
operator std::string() const {
|
926 |
-
object temp = *this;
|
927 |
-
if (PyUnicode_Check(m_ptr)) {
|
928 |
-
temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(m_ptr));
|
929 |
-
if (!temp)
|
930 |
-
pybind11_fail("Unable to extract string contents! (encoding issue)");
|
931 |
-
}
|
932 |
-
char *buffer;
|
933 |
-
ssize_t length;
|
934 |
-
if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length))
|
935 |
-
pybind11_fail("Unable to extract string contents! (invalid type)");
|
936 |
-
return std::string(buffer, (size_t) length);
|
937 |
-
}
|
938 |
-
|
939 |
-
template <typename... Args>
|
940 |
-
str format(Args &&...args) const {
|
941 |
-
return attr("format")(std::forward<Args>(args)...);
|
942 |
-
}
|
943 |
-
|
944 |
-
private:
|
945 |
-
/// Return string representation -- always returns a new reference, even if already a str
|
946 |
-
static PyObject *raw_str(PyObject *op) {
|
947 |
-
PyObject *str_value = PyObject_Str(op);
|
948 |
-
if (!str_value) throw error_already_set();
|
949 |
-
#if PY_MAJOR_VERSION < 3
|
950 |
-
PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr);
|
951 |
-
Py_XDECREF(str_value); str_value = unicode;
|
952 |
-
#endif
|
953 |
-
return str_value;
|
954 |
-
}
|
955 |
-
};
|
956 |
-
/// @} pytypes
|
957 |
-
|
958 |
-
inline namespace literals {
|
959 |
-
/** \rst
|
960 |
-
String literal version of `str`
|
961 |
-
\endrst */
|
962 |
-
inline str operator"" _s(const char *s, size_t size) { return {s, size}; }
|
963 |
-
}
|
964 |
-
|
965 |
-
/// \addtogroup pytypes
|
966 |
-
/// @{
|
967 |
-
class bytes : public object {
|
968 |
-
public:
|
969 |
-
PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK)
|
970 |
-
|
971 |
-
// Allow implicit conversion:
|
972 |
-
bytes(const char *c = "")
|
973 |
-
: object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) {
|
974 |
-
if (!m_ptr) pybind11_fail("Could not allocate bytes object!");
|
975 |
-
}
|
976 |
-
|
977 |
-
bytes(const char *c, size_t n)
|
978 |
-
: object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, (ssize_t) n), stolen_t{}) {
|
979 |
-
if (!m_ptr) pybind11_fail("Could not allocate bytes object!");
|
980 |
-
}
|
981 |
-
|
982 |
-
// Allow implicit conversion:
|
983 |
-
bytes(const std::string &s) : bytes(s.data(), s.size()) { }
|
984 |
-
|
985 |
-
explicit bytes(const pybind11::str &s);
|
986 |
-
|
987 |
-
operator std::string() const {
|
988 |
-
char *buffer;
|
989 |
-
ssize_t length;
|
990 |
-
if (PYBIND11_BYTES_AS_STRING_AND_SIZE(m_ptr, &buffer, &length))
|
991 |
-
pybind11_fail("Unable to extract bytes contents!");
|
992 |
-
return std::string(buffer, (size_t) length);
|
993 |
-
}
|
994 |
-
};
|
995 |
-
// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
|
996 |
-
// are included in the doxygen group; close here and reopen after as a workaround
|
997 |
-
/// @} pytypes
|
998 |
-
|
999 |
-
inline bytes::bytes(const pybind11::str &s) {
|
1000 |
-
object temp = s;
|
1001 |
-
if (PyUnicode_Check(s.ptr())) {
|
1002 |
-
temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(s.ptr()));
|
1003 |
-
if (!temp)
|
1004 |
-
pybind11_fail("Unable to extract string contents! (encoding issue)");
|
1005 |
-
}
|
1006 |
-
char *buffer;
|
1007 |
-
ssize_t length;
|
1008 |
-
if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length))
|
1009 |
-
pybind11_fail("Unable to extract string contents! (invalid type)");
|
1010 |
-
auto obj = reinterpret_steal<object>(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length));
|
1011 |
-
if (!obj)
|
1012 |
-
pybind11_fail("Could not allocate bytes object!");
|
1013 |
-
m_ptr = obj.release().ptr();
|
1014 |
-
}
|
1015 |
-
|
1016 |
-
inline str::str(const bytes& b) {
|
1017 |
-
char *buffer;
|
1018 |
-
ssize_t length;
|
1019 |
-
if (PYBIND11_BYTES_AS_STRING_AND_SIZE(b.ptr(), &buffer, &length))
|
1020 |
-
pybind11_fail("Unable to extract bytes contents!");
|
1021 |
-
auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, (ssize_t) length));
|
1022 |
-
if (!obj)
|
1023 |
-
pybind11_fail("Could not allocate string object!");
|
1024 |
-
m_ptr = obj.release().ptr();
|
1025 |
-
}
|
1026 |
-
|
1027 |
-
/// \addtogroup pytypes
|
1028 |
-
/// @{
|
1029 |
-
class none : public object {
|
1030 |
-
public:
|
1031 |
-
PYBIND11_OBJECT(none, object, detail::PyNone_Check)
|
1032 |
-
none() : object(Py_None, borrowed_t{}) { }
|
1033 |
-
};
|
1034 |
-
|
1035 |
-
class ellipsis : public object {
|
1036 |
-
public:
|
1037 |
-
PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check)
|
1038 |
-
ellipsis() : object(Py_Ellipsis, borrowed_t{}) { }
|
1039 |
-
};
|
1040 |
-
|
1041 |
-
class bool_ : public object {
|
1042 |
-
public:
|
1043 |
-
PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool)
|
1044 |
-
bool_() : object(Py_False, borrowed_t{}) { }
|
1045 |
-
// Allow implicit conversion from and to `bool`:
|
1046 |
-
bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) { }
|
1047 |
-
operator bool() const { return m_ptr && PyLong_AsLong(m_ptr) != 0; }
|
1048 |
-
|
1049 |
-
private:
|
1050 |
-
/// Return the truth value of an object -- always returns a new reference
|
1051 |
-
static PyObject *raw_bool(PyObject *op) {
|
1052 |
-
const auto value = PyObject_IsTrue(op);
|
1053 |
-
if (value == -1) return nullptr;
|
1054 |
-
return handle(value ? Py_True : Py_False).inc_ref().ptr();
|
1055 |
-
}
|
1056 |
-
};
|
1057 |
-
|
1058 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
1059 |
-
// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1;
|
1060 |
-
// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned).
|
1061 |
-
// (The distinction is critically important when casting a returned -1 error value to some other
|
1062 |
-
// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes).
|
1063 |
-
template <typename Unsigned>
|
1064 |
-
Unsigned as_unsigned(PyObject *o) {
|
1065 |
-
if (sizeof(Unsigned) <= sizeof(unsigned long)
|
1066 |
-
#if PY_VERSION_HEX < 0x03000000
|
1067 |
-
|| PyInt_Check(o)
|
1068 |
-
#endif
|
1069 |
-
) {
|
1070 |
-
unsigned long v = PyLong_AsUnsignedLong(o);
|
1071 |
-
return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
|
1072 |
-
}
|
1073 |
-
else {
|
1074 |
-
unsigned long long v = PyLong_AsUnsignedLongLong(o);
|
1075 |
-
return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
|
1076 |
-
}
|
1077 |
-
}
|
1078 |
-
PYBIND11_NAMESPACE_END(detail)
|
1079 |
-
|
1080 |
-
class int_ : public object {
|
1081 |
-
public:
|
1082 |
-
PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long)
|
1083 |
-
int_() : object(PyLong_FromLong(0), stolen_t{}) { }
|
1084 |
-
// Allow implicit conversion from C++ integral types:
|
1085 |
-
template <typename T,
|
1086 |
-
detail::enable_if_t<std::is_integral<T>::value, int> = 0>
|
1087 |
-
int_(T value) {
|
1088 |
-
if (sizeof(T) <= sizeof(long)) {
|
1089 |
-
if (std::is_signed<T>::value)
|
1090 |
-
m_ptr = PyLong_FromLong((long) value);
|
1091 |
-
else
|
1092 |
-
m_ptr = PyLong_FromUnsignedLong((unsigned long) value);
|
1093 |
-
} else {
|
1094 |
-
if (std::is_signed<T>::value)
|
1095 |
-
m_ptr = PyLong_FromLongLong((long long) value);
|
1096 |
-
else
|
1097 |
-
m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value);
|
1098 |
-
}
|
1099 |
-
if (!m_ptr) pybind11_fail("Could not allocate int object!");
|
1100 |
-
}
|
1101 |
-
|
1102 |
-
template <typename T,
|
1103 |
-
detail::enable_if_t<std::is_integral<T>::value, int> = 0>
|
1104 |
-
operator T() const {
|
1105 |
-
return std::is_unsigned<T>::value
|
1106 |
-
? detail::as_unsigned<T>(m_ptr)
|
1107 |
-
: sizeof(T) <= sizeof(long)
|
1108 |
-
? (T) PyLong_AsLong(m_ptr)
|
1109 |
-
: (T) PYBIND11_LONG_AS_LONGLONG(m_ptr);
|
1110 |
-
}
|
1111 |
-
};
|
1112 |
-
|
1113 |
-
class float_ : public object {
|
1114 |
-
public:
|
1115 |
-
PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float)
|
1116 |
-
// Allow implicit conversion from float/double:
|
1117 |
-
float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
|
1118 |
-
if (!m_ptr) pybind11_fail("Could not allocate float object!");
|
1119 |
-
}
|
1120 |
-
float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
|
1121 |
-
if (!m_ptr) pybind11_fail("Could not allocate float object!");
|
1122 |
-
}
|
1123 |
-
operator float() const { return (float) PyFloat_AsDouble(m_ptr); }
|
1124 |
-
operator double() const { return (double) PyFloat_AsDouble(m_ptr); }
|
1125 |
-
};
|
1126 |
-
|
1127 |
-
class weakref : public object {
|
1128 |
-
public:
|
1129 |
-
PYBIND11_OBJECT_DEFAULT(weakref, object, PyWeakref_Check)
|
1130 |
-
explicit weakref(handle obj, handle callback = {})
|
1131 |
-
: object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) {
|
1132 |
-
if (!m_ptr) pybind11_fail("Could not allocate weak reference!");
|
1133 |
-
}
|
1134 |
-
};
|
1135 |
-
|
1136 |
-
class slice : public object {
|
1137 |
-
public:
|
1138 |
-
PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check)
|
1139 |
-
slice(ssize_t start_, ssize_t stop_, ssize_t step_) {
|
1140 |
-
int_ start(start_), stop(stop_), step(step_);
|
1141 |
-
m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr());
|
1142 |
-
if (!m_ptr) pybind11_fail("Could not allocate slice object!");
|
1143 |
-
}
|
1144 |
-
bool compute(size_t length, size_t *start, size_t *stop, size_t *step,
|
1145 |
-
size_t *slicelength) const {
|
1146 |
-
return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,
|
1147 |
-
(ssize_t) length, (ssize_t *) start,
|
1148 |
-
(ssize_t *) stop, (ssize_t *) step,
|
1149 |
-
(ssize_t *) slicelength) == 0;
|
1150 |
-
}
|
1151 |
-
bool compute(ssize_t length, ssize_t *start, ssize_t *stop, ssize_t *step,
|
1152 |
-
ssize_t *slicelength) const {
|
1153 |
-
return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,
|
1154 |
-
length, start,
|
1155 |
-
stop, step,
|
1156 |
-
slicelength) == 0;
|
1157 |
-
}
|
1158 |
-
};
|
1159 |
-
|
1160 |
-
class capsule : public object {
|
1161 |
-
public:
|
1162 |
-
PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact)
|
1163 |
-
PYBIND11_DEPRECATED("Use reinterpret_borrow<capsule>() or reinterpret_steal<capsule>()")
|
1164 |
-
capsule(PyObject *ptr, bool is_borrowed) : object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) { }
|
1165 |
-
|
1166 |
-
explicit capsule(const void *value, const char *name = nullptr, void (*destructor)(PyObject *) = nullptr)
|
1167 |
-
: object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) {
|
1168 |
-
if (!m_ptr)
|
1169 |
-
pybind11_fail("Could not allocate capsule object!");
|
1170 |
-
}
|
1171 |
-
|
1172 |
-
PYBIND11_DEPRECATED("Please pass a destructor that takes a void pointer as input")
|
1173 |
-
capsule(const void *value, void (*destruct)(PyObject *))
|
1174 |
-
: object(PyCapsule_New(const_cast<void*>(value), nullptr, destruct), stolen_t{}) {
|
1175 |
-
if (!m_ptr)
|
1176 |
-
pybind11_fail("Could not allocate capsule object!");
|
1177 |
-
}
|
1178 |
-
|
1179 |
-
capsule(const void *value, void (*destructor)(void *)) {
|
1180 |
-
m_ptr = PyCapsule_New(const_cast<void *>(value), nullptr, [](PyObject *o) {
|
1181 |
-
auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));
|
1182 |
-
void *ptr = PyCapsule_GetPointer(o, nullptr);
|
1183 |
-
destructor(ptr);
|
1184 |
-
});
|
1185 |
-
|
1186 |
-
if (!m_ptr)
|
1187 |
-
pybind11_fail("Could not allocate capsule object!");
|
1188 |
-
|
1189 |
-
if (PyCapsule_SetContext(m_ptr, (void *) destructor) != 0)
|
1190 |
-
pybind11_fail("Could not set capsule context!");
|
1191 |
-
}
|
1192 |
-
|
1193 |
-
capsule(void (*destructor)()) {
|
1194 |
-
m_ptr = PyCapsule_New(reinterpret_cast<void *>(destructor), nullptr, [](PyObject *o) {
|
1195 |
-
auto destructor = reinterpret_cast<void (*)()>(PyCapsule_GetPointer(o, nullptr));
|
1196 |
-
destructor();
|
1197 |
-
});
|
1198 |
-
|
1199 |
-
if (!m_ptr)
|
1200 |
-
pybind11_fail("Could not allocate capsule object!");
|
1201 |
-
}
|
1202 |
-
|
1203 |
-
template <typename T> operator T *() const {
|
1204 |
-
auto name = this->name();
|
1205 |
-
T * result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));
|
1206 |
-
if (!result) pybind11_fail("Unable to extract capsule contents!");
|
1207 |
-
return result;
|
1208 |
-
}
|
1209 |
-
|
1210 |
-
const char *name() const { return PyCapsule_GetName(m_ptr); }
|
1211 |
-
};
|
1212 |
-
|
1213 |
-
class tuple : public object {
|
1214 |
-
public:
|
1215 |
-
PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple)
|
1216 |
-
explicit tuple(size_t size = 0) : object(PyTuple_New((ssize_t) size), stolen_t{}) {
|
1217 |
-
if (!m_ptr) pybind11_fail("Could not allocate tuple object!");
|
1218 |
-
}
|
1219 |
-
size_t size() const { return (size_t) PyTuple_Size(m_ptr); }
|
1220 |
-
bool empty() const { return size() == 0; }
|
1221 |
-
detail::tuple_accessor operator[](size_t index) const { return {*this, index}; }
|
1222 |
-
detail::item_accessor operator[](handle h) const { return object::operator[](h); }
|
1223 |
-
detail::tuple_iterator begin() const { return {*this, 0}; }
|
1224 |
-
detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; }
|
1225 |
-
};
|
1226 |
-
|
1227 |
-
class dict : public object {
|
1228 |
-
public:
|
1229 |
-
PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict)
|
1230 |
-
dict() : object(PyDict_New(), stolen_t{}) {
|
1231 |
-
if (!m_ptr) pybind11_fail("Could not allocate dict object!");
|
1232 |
-
}
|
1233 |
-
template <typename... Args,
|
1234 |
-
typename = detail::enable_if_t<detail::all_of<detail::is_keyword_or_ds<Args>...>::value>,
|
1235 |
-
// MSVC workaround: it can't compile an out-of-line definition, so defer the collector
|
1236 |
-
typename collector = detail::deferred_t<detail::unpacking_collector<>, Args...>>
|
1237 |
-
explicit dict(Args &&...args) : dict(collector(std::forward<Args>(args)...).kwargs()) { }
|
1238 |
-
|
1239 |
-
size_t size() const { return (size_t) PyDict_Size(m_ptr); }
|
1240 |
-
bool empty() const { return size() == 0; }
|
1241 |
-
detail::dict_iterator begin() const { return {*this, 0}; }
|
1242 |
-
detail::dict_iterator end() const { return {}; }
|
1243 |
-
void clear() const { PyDict_Clear(ptr()); }
|
1244 |
-
template <typename T> bool contains(T &&key) const {
|
1245 |
-
return PyDict_Contains(m_ptr, detail::object_or_cast(std::forward<T>(key)).ptr()) == 1;
|
1246 |
-
}
|
1247 |
-
|
1248 |
-
private:
|
1249 |
-
/// Call the `dict` Python type -- always returns a new reference
|
1250 |
-
static PyObject *raw_dict(PyObject *op) {
|
1251 |
-
if (PyDict_Check(op))
|
1252 |
-
return handle(op).inc_ref().ptr();
|
1253 |
-
return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr);
|
1254 |
-
}
|
1255 |
-
};
|
1256 |
-
|
1257 |
-
class sequence : public object {
|
1258 |
-
public:
|
1259 |
-
PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check)
|
1260 |
-
size_t size() const {
|
1261 |
-
ssize_t result = PySequence_Size(m_ptr);
|
1262 |
-
if (result == -1)
|
1263 |
-
throw error_already_set();
|
1264 |
-
return (size_t) result;
|
1265 |
-
}
|
1266 |
-
bool empty() const { return size() == 0; }
|
1267 |
-
detail::sequence_accessor operator[](size_t index) const { return {*this, index}; }
|
1268 |
-
detail::item_accessor operator[](handle h) const { return object::operator[](h); }
|
1269 |
-
detail::sequence_iterator begin() const { return {*this, 0}; }
|
1270 |
-
detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; }
|
1271 |
-
};
|
1272 |
-
|
1273 |
-
class list : public object {
|
1274 |
-
public:
|
1275 |
-
PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List)
|
1276 |
-
explicit list(size_t size = 0) : object(PyList_New((ssize_t) size), stolen_t{}) {
|
1277 |
-
if (!m_ptr) pybind11_fail("Could not allocate list object!");
|
1278 |
-
}
|
1279 |
-
size_t size() const { return (size_t) PyList_Size(m_ptr); }
|
1280 |
-
bool empty() const { return size() == 0; }
|
1281 |
-
detail::list_accessor operator[](size_t index) const { return {*this, index}; }
|
1282 |
-
detail::item_accessor operator[](handle h) const { return object::operator[](h); }
|
1283 |
-
detail::list_iterator begin() const { return {*this, 0}; }
|
1284 |
-
detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; }
|
1285 |
-
template <typename T> void append(T &&val) const {
|
1286 |
-
PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());
|
1287 |
-
}
|
1288 |
-
template <typename T> void insert(size_t index, T &&val) const {
|
1289 |
-
PyList_Insert(m_ptr, static_cast<ssize_t>(index),
|
1290 |
-
detail::object_or_cast(std::forward<T>(val)).ptr());
|
1291 |
-
}
|
1292 |
-
};
|
1293 |
-
|
1294 |
-
class args : public tuple { PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check) };
|
1295 |
-
class kwargs : public dict { PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check) };
|
1296 |
-
|
1297 |
-
class set : public object {
|
1298 |
-
public:
|
1299 |
-
PYBIND11_OBJECT_CVT(set, object, PySet_Check, PySet_New)
|
1300 |
-
set() : object(PySet_New(nullptr), stolen_t{}) {
|
1301 |
-
if (!m_ptr) pybind11_fail("Could not allocate set object!");
|
1302 |
-
}
|
1303 |
-
size_t size() const { return (size_t) PySet_Size(m_ptr); }
|
1304 |
-
bool empty() const { return size() == 0; }
|
1305 |
-
template <typename T> bool add(T &&val) const {
|
1306 |
-
return PySet_Add(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 0;
|
1307 |
-
}
|
1308 |
-
void clear() const { PySet_Clear(m_ptr); }
|
1309 |
-
template <typename T> bool contains(T &&val) const {
|
1310 |
-
return PySet_Contains(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 1;
|
1311 |
-
}
|
1312 |
-
};
|
1313 |
-
|
1314 |
-
class function : public object {
|
1315 |
-
public:
|
1316 |
-
PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check)
|
1317 |
-
handle cpp_function() const {
|
1318 |
-
handle fun = detail::get_function(m_ptr);
|
1319 |
-
if (fun && PyCFunction_Check(fun.ptr()))
|
1320 |
-
return fun;
|
1321 |
-
return handle();
|
1322 |
-
}
|
1323 |
-
bool is_cpp_function() const { return (bool) cpp_function(); }
|
1324 |
-
};
|
1325 |
-
|
1326 |
-
class staticmethod : public object {
|
1327 |
-
public:
|
1328 |
-
PYBIND11_OBJECT_CVT(staticmethod, object, detail::PyStaticMethod_Check, PyStaticMethod_New)
|
1329 |
-
};
|
1330 |
-
|
1331 |
-
class buffer : public object {
|
1332 |
-
public:
|
1333 |
-
PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer)
|
1334 |
-
|
1335 |
-
buffer_info request(bool writable = false) const {
|
1336 |
-
int flags = PyBUF_STRIDES | PyBUF_FORMAT;
|
1337 |
-
if (writable) flags |= PyBUF_WRITABLE;
|
1338 |
-
Py_buffer *view = new Py_buffer();
|
1339 |
-
if (PyObject_GetBuffer(m_ptr, view, flags) != 0) {
|
1340 |
-
delete view;
|
1341 |
-
throw error_already_set();
|
1342 |
-
}
|
1343 |
-
return buffer_info(view);
|
1344 |
-
}
|
1345 |
-
};
|
1346 |
-
|
1347 |
-
class memoryview : public object {
|
1348 |
-
public:
|
1349 |
-
PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)
|
1350 |
-
|
1351 |
-
/** \rst
|
1352 |
-
Creates ``memoryview`` from ``buffer_info``.
|
1353 |
-
|
1354 |
-
``buffer_info`` must be created from ``buffer::request()``. Otherwise
|
1355 |
-
throws an exception.
|
1356 |
-
|
1357 |
-
For creating a ``memoryview`` from objects that support buffer protocol,
|
1358 |
-
use ``memoryview(const object& obj)`` instead of this constructor.
|
1359 |
-
\endrst */
|
1360 |
-
explicit memoryview(const buffer_info& info) {
|
1361 |
-
if (!info.view())
|
1362 |
-
pybind11_fail("Prohibited to create memoryview without Py_buffer");
|
1363 |
-
// Note: PyMemoryView_FromBuffer never increments obj reference.
|
1364 |
-
m_ptr = (info.view()->obj) ?
|
1365 |
-
PyMemoryView_FromObject(info.view()->obj) :
|
1366 |
-
PyMemoryView_FromBuffer(info.view());
|
1367 |
-
if (!m_ptr)
|
1368 |
-
pybind11_fail("Unable to create memoryview from buffer descriptor");
|
1369 |
-
}
|
1370 |
-
|
1371 |
-
/** \rst
|
1372 |
-
Creates ``memoryview`` from static buffer.
|
1373 |
-
|
1374 |
-
This method is meant for providing a ``memoryview`` for C/C++ buffer not
|
1375 |
-
managed by Python. The caller is responsible for managing the lifetime
|
1376 |
-
of ``ptr`` and ``format``, which MUST outlive the memoryview constructed
|
1377 |
-
here.
|
1378 |
-
|
1379 |
-
See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
|
1380 |
-
|
1381 |
-
.. _PyMemoryView_FromBuffer: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer
|
1382 |
-
|
1383 |
-
:param ptr: Pointer to the buffer.
|
1384 |
-
:param itemsize: Byte size of an element.
|
1385 |
-
:param format: Pointer to the null-terminated format string. For
|
1386 |
-
homogeneous Buffers, this should be set to
|
1387 |
-
``format_descriptor<T>::value``.
|
1388 |
-
:param shape: Shape of the tensor (1 entry per dimension).
|
1389 |
-
:param strides: Number of bytes between adjacent entries (for each
|
1390 |
-
per dimension).
|
1391 |
-
:param readonly: Flag to indicate if the underlying storage may be
|
1392 |
-
written to.
|
1393 |
-
\endrst */
|
1394 |
-
static memoryview from_buffer(
|
1395 |
-
void *ptr, ssize_t itemsize, const char *format,
|
1396 |
-
detail::any_container<ssize_t> shape,
|
1397 |
-
detail::any_container<ssize_t> strides, bool readonly = false);
|
1398 |
-
|
1399 |
-
static memoryview from_buffer(
|
1400 |
-
const void *ptr, ssize_t itemsize, const char *format,
|
1401 |
-
detail::any_container<ssize_t> shape,
|
1402 |
-
detail::any_container<ssize_t> strides) {
|
1403 |
-
return memoryview::from_buffer(
|
1404 |
-
const_cast<void*>(ptr), itemsize, format, shape, strides, true);
|
1405 |
-
}
|
1406 |
-
|
1407 |
-
template<typename T>
|
1408 |
-
static memoryview from_buffer(
|
1409 |
-
T *ptr, detail::any_container<ssize_t> shape,
|
1410 |
-
detail::any_container<ssize_t> strides, bool readonly = false) {
|
1411 |
-
return memoryview::from_buffer(
|
1412 |
-
reinterpret_cast<void*>(ptr), sizeof(T),
|
1413 |
-
format_descriptor<T>::value, shape, strides, readonly);
|
1414 |
-
}
|
1415 |
-
|
1416 |
-
template<typename T>
|
1417 |
-
static memoryview from_buffer(
|
1418 |
-
const T *ptr, detail::any_container<ssize_t> shape,
|
1419 |
-
detail::any_container<ssize_t> strides) {
|
1420 |
-
return memoryview::from_buffer(
|
1421 |
-
const_cast<T*>(ptr), shape, strides, true);
|
1422 |
-
}
|
1423 |
-
|
1424 |
-
#if PY_MAJOR_VERSION >= 3
|
1425 |
-
/** \rst
|
1426 |
-
Creates ``memoryview`` from static memory.
|
1427 |
-
|
1428 |
-
This method is meant for providing a ``memoryview`` for C/C++ buffer not
|
1429 |
-
managed by Python. The caller is responsible for managing the lifetime
|
1430 |
-
of ``mem``, which MUST outlive the memoryview constructed here.
|
1431 |
-
|
1432 |
-
This method is not available in Python 2.
|
1433 |
-
|
1434 |
-
See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
|
1435 |
-
|
1436 |
-
.. _PyMemoryView_FromMemory: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory
|
1437 |
-
\endrst */
|
1438 |
-
static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) {
|
1439 |
-
PyObject* ptr = PyMemoryView_FromMemory(
|
1440 |
-
reinterpret_cast<char*>(mem), size,
|
1441 |
-
(readonly) ? PyBUF_READ : PyBUF_WRITE);
|
1442 |
-
if (!ptr)
|
1443 |
-
pybind11_fail("Could not allocate memoryview object!");
|
1444 |
-
return memoryview(object(ptr, stolen_t{}));
|
1445 |
-
}
|
1446 |
-
|
1447 |
-
static memoryview from_memory(const void *mem, ssize_t size) {
|
1448 |
-
return memoryview::from_memory(const_cast<void*>(mem), size, true);
|
1449 |
-
}
|
1450 |
-
#endif
|
1451 |
-
};
|
1452 |
-
|
1453 |
-
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
1454 |
-
inline memoryview memoryview::from_buffer(
|
1455 |
-
void *ptr, ssize_t itemsize, const char* format,
|
1456 |
-
detail::any_container<ssize_t> shape,
|
1457 |
-
detail::any_container<ssize_t> strides, bool readonly) {
|
1458 |
-
size_t ndim = shape->size();
|
1459 |
-
if (ndim != strides->size())
|
1460 |
-
pybind11_fail("memoryview: shape length doesn't match strides length");
|
1461 |
-
ssize_t size = ndim ? 1 : 0;
|
1462 |
-
for (size_t i = 0; i < ndim; ++i)
|
1463 |
-
size *= (*shape)[i];
|
1464 |
-
Py_buffer view;
|
1465 |
-
view.buf = ptr;
|
1466 |
-
view.obj = nullptr;
|
1467 |
-
view.len = size * itemsize;
|
1468 |
-
view.readonly = static_cast<int>(readonly);
|
1469 |
-
view.itemsize = itemsize;
|
1470 |
-
view.format = const_cast<char*>(format);
|
1471 |
-
view.ndim = static_cast<int>(ndim);
|
1472 |
-
view.shape = shape->data();
|
1473 |
-
view.strides = strides->data();
|
1474 |
-
view.suboffsets = nullptr;
|
1475 |
-
view.internal = nullptr;
|
1476 |
-
PyObject* obj = PyMemoryView_FromBuffer(&view);
|
1477 |
-
if (!obj)
|
1478 |
-
throw error_already_set();
|
1479 |
-
return memoryview(object(obj, stolen_t{}));
|
1480 |
-
}
|
1481 |
-
#endif // DOXYGEN_SHOULD_SKIP_THIS
|
1482 |
-
/// @} pytypes
|
1483 |
-
|
1484 |
-
/// \addtogroup python_builtins
|
1485 |
-
/// @{
|
1486 |
-
inline size_t len(handle h) {
|
1487 |
-
ssize_t result = PyObject_Length(h.ptr());
|
1488 |
-
if (result < 0)
|
1489 |
-
pybind11_fail("Unable to compute length of object");
|
1490 |
-
return (size_t) result;
|
1491 |
-
}
|
1492 |
-
|
1493 |
-
inline size_t len_hint(handle h) {
|
1494 |
-
#if PY_VERSION_HEX >= 0x03040000
|
1495 |
-
ssize_t result = PyObject_LengthHint(h.ptr(), 0);
|
1496 |
-
#else
|
1497 |
-
ssize_t result = PyObject_Length(h.ptr());
|
1498 |
-
#endif
|
1499 |
-
if (result < 0) {
|
1500 |
-
// Sometimes a length can't be determined at all (eg generators)
|
1501 |
-
// In which case simply return 0
|
1502 |
-
PyErr_Clear();
|
1503 |
-
return 0;
|
1504 |
-
}
|
1505 |
-
return (size_t) result;
|
1506 |
-
}
|
1507 |
-
|
1508 |
-
inline str repr(handle h) {
|
1509 |
-
PyObject *str_value = PyObject_Repr(h.ptr());
|
1510 |
-
if (!str_value) throw error_already_set();
|
1511 |
-
#if PY_MAJOR_VERSION < 3
|
1512 |
-
PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr);
|
1513 |
-
Py_XDECREF(str_value); str_value = unicode;
|
1514 |
-
if (!str_value) throw error_already_set();
|
1515 |
-
#endif
|
1516 |
-
return reinterpret_steal<str>(str_value);
|
1517 |
-
}
|
1518 |
-
|
1519 |
-
inline iterator iter(handle obj) {
|
1520 |
-
PyObject *result = PyObject_GetIter(obj.ptr());
|
1521 |
-
if (!result) { throw error_already_set(); }
|
1522 |
-
return reinterpret_steal<iterator>(result);
|
1523 |
-
}
|
1524 |
-
/// @} python_builtins
|
1525 |
-
|
1526 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
1527 |
-
template <typename D> iterator object_api<D>::begin() const { return iter(derived()); }
|
1528 |
-
template <typename D> iterator object_api<D>::end() const { return iterator::sentinel(); }
|
1529 |
-
template <typename D> item_accessor object_api<D>::operator[](handle key) const {
|
1530 |
-
return {derived(), reinterpret_borrow<object>(key)};
|
1531 |
-
}
|
1532 |
-
template <typename D> item_accessor object_api<D>::operator[](const char *key) const {
|
1533 |
-
return {derived(), pybind11::str(key)};
|
1534 |
-
}
|
1535 |
-
template <typename D> obj_attr_accessor object_api<D>::attr(handle key) const {
|
1536 |
-
return {derived(), reinterpret_borrow<object>(key)};
|
1537 |
-
}
|
1538 |
-
template <typename D> str_attr_accessor object_api<D>::attr(const char *key) const {
|
1539 |
-
return {derived(), key};
|
1540 |
-
}
|
1541 |
-
template <typename D> args_proxy object_api<D>::operator*() const {
|
1542 |
-
return args_proxy(derived().ptr());
|
1543 |
-
}
|
1544 |
-
template <typename D> template <typename T> bool object_api<D>::contains(T &&item) const {
|
1545 |
-
return attr("__contains__")(std::forward<T>(item)).template cast<bool>();
|
1546 |
-
}
|
1547 |
-
|
1548 |
-
template <typename D>
|
1549 |
-
pybind11::str object_api<D>::str() const { return pybind11::str(derived()); }
|
1550 |
-
|
1551 |
-
template <typename D>
|
1552 |
-
str_attr_accessor object_api<D>::doc() const { return attr("__doc__"); }
|
1553 |
-
|
1554 |
-
template <typename D>
|
1555 |
-
handle object_api<D>::get_type() const { return (PyObject *) Py_TYPE(derived().ptr()); }
|
1556 |
-
|
1557 |
-
template <typename D>
|
1558 |
-
bool object_api<D>::rich_compare(object_api const &other, int value) const {
|
1559 |
-
int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value);
|
1560 |
-
if (rv == -1)
|
1561 |
-
throw error_already_set();
|
1562 |
-
return rv == 1;
|
1563 |
-
}
|
1564 |
-
|
1565 |
-
#define PYBIND11_MATH_OPERATOR_UNARY(op, fn) \
|
1566 |
-
template <typename D> object object_api<D>::op() const { \
|
1567 |
-
object result = reinterpret_steal<object>(fn(derived().ptr())); \
|
1568 |
-
if (!result.ptr()) \
|
1569 |
-
throw error_already_set(); \
|
1570 |
-
return result; \
|
1571 |
-
}
|
1572 |
-
|
1573 |
-
#define PYBIND11_MATH_OPERATOR_BINARY(op, fn) \
|
1574 |
-
template <typename D> \
|
1575 |
-
object object_api<D>::op(object_api const &other) const { \
|
1576 |
-
object result = reinterpret_steal<object>( \
|
1577 |
-
fn(derived().ptr(), other.derived().ptr())); \
|
1578 |
-
if (!result.ptr()) \
|
1579 |
-
throw error_already_set(); \
|
1580 |
-
return result; \
|
1581 |
-
}
|
1582 |
-
|
1583 |
-
PYBIND11_MATH_OPERATOR_UNARY (operator~, PyNumber_Invert)
|
1584 |
-
PYBIND11_MATH_OPERATOR_UNARY (operator-, PyNumber_Negative)
|
1585 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator+, PyNumber_Add)
|
1586 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator+=, PyNumber_InPlaceAdd)
|
1587 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator-, PyNumber_Subtract)
|
1588 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator-=, PyNumber_InPlaceSubtract)
|
1589 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator*, PyNumber_Multiply)
|
1590 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator*=, PyNumber_InPlaceMultiply)
|
1591 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator/, PyNumber_TrueDivide)
|
1592 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator/=, PyNumber_InPlaceTrueDivide)
|
1593 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator|, PyNumber_Or)
|
1594 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator|=, PyNumber_InPlaceOr)
|
1595 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator&, PyNumber_And)
|
1596 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator&=, PyNumber_InPlaceAnd)
|
1597 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator^, PyNumber_Xor)
|
1598 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator^=, PyNumber_InPlaceXor)
|
1599 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator<<, PyNumber_Lshift)
|
1600 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator<<=, PyNumber_InPlaceLshift)
|
1601 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator>>, PyNumber_Rshift)
|
1602 |
-
PYBIND11_MATH_OPERATOR_BINARY(operator>>=, PyNumber_InPlaceRshift)
|
1603 |
-
|
1604 |
-
#undef PYBIND11_MATH_OPERATOR_UNARY
|
1605 |
-
#undef PYBIND11_MATH_OPERATOR_BINARY
|
1606 |
-
|
1607 |
-
PYBIND11_NAMESPACE_END(detail)
|
1608 |
-
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/object.h
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
#if !defined(__OBJECT_H)
|
2 |
-
#define __OBJECT_H
|
3 |
-
|
4 |
-
#include <atomic>
|
5 |
-
#include "constructor_stats.h"
|
6 |
-
|
7 |
-
/// Reference counted object base class
|
8 |
-
class Object {
|
9 |
-
public:
|
10 |
-
/// Default constructor
|
11 |
-
Object() { print_default_created(this); }
|
12 |
-
|
13 |
-
/// Copy constructor
|
14 |
-
Object(const Object &) : m_refCount(0) { print_copy_created(this); }
|
15 |
-
|
16 |
-
/// Return the current reference count
|
17 |
-
int getRefCount() const { return m_refCount; };
|
18 |
-
|
19 |
-
/// Increase the object's reference count by one
|
20 |
-
void incRef() const { ++m_refCount; }
|
21 |
-
|
22 |
-
/** \brief Decrease the reference count of
|
23 |
-
* the object and possibly deallocate it.
|
24 |
-
*
|
25 |
-
* The object will automatically be deallocated once
|
26 |
-
* the reference count reaches zero.
|
27 |
-
*/
|
28 |
-
void decRef(bool dealloc = true) const {
|
29 |
-
--m_refCount;
|
30 |
-
if (m_refCount == 0 && dealloc)
|
31 |
-
delete this;
|
32 |
-
else if (m_refCount < 0)
|
33 |
-
throw std::runtime_error("Internal error: reference count < 0!");
|
34 |
-
}
|
35 |
-
|
36 |
-
virtual std::string toString() const = 0;
|
37 |
-
protected:
|
38 |
-
/** \brief Virtual protected deconstructor.
|
39 |
-
* (Will only be called by \ref ref)
|
40 |
-
*/
|
41 |
-
virtual ~Object() { print_destroyed(this); }
|
42 |
-
private:
|
43 |
-
mutable std::atomic<int> m_refCount { 0 };
|
44 |
-
};
|
45 |
-
|
46 |
-
// Tag class used to track constructions of ref objects. When we track constructors, below, we
|
47 |
-
// track and print out the actual class (e.g. ref<MyObject>), and *also* add a fake tracker for
|
48 |
-
// ref_tag. This lets us check that the total number of ref<Anything> constructors/destructors is
|
49 |
-
// correct without having to check each individual ref<Whatever> type individually.
|
50 |
-
class ref_tag {};
|
51 |
-
|
52 |
-
/**
|
53 |
-
* \brief Reference counting helper
|
54 |
-
*
|
55 |
-
* The \a ref refeference template is a simple wrapper to store a
|
56 |
-
* pointer to an object. It takes care of increasing and decreasing
|
57 |
-
* the reference count of the object. When the last reference goes
|
58 |
-
* out of scope, the associated object will be deallocated.
|
59 |
-
*
|
60 |
-
* \ingroup libcore
|
61 |
-
*/
|
62 |
-
template <typename T> class ref {
|
63 |
-
public:
|
64 |
-
/// Create a nullptr reference
|
65 |
-
ref() : m_ptr(nullptr) { print_default_created(this); track_default_created((ref_tag*) this); }
|
66 |
-
|
67 |
-
/// Construct a reference from a pointer
|
68 |
-
ref(T *ptr) : m_ptr(ptr) {
|
69 |
-
if (m_ptr) ((Object *) m_ptr)->incRef();
|
70 |
-
|
71 |
-
print_created(this, "from pointer", m_ptr); track_created((ref_tag*) this, "from pointer");
|
72 |
-
|
73 |
-
}
|
74 |
-
|
75 |
-
/// Copy constructor
|
76 |
-
ref(const ref &r) : m_ptr(r.m_ptr) {
|
77 |
-
if (m_ptr)
|
78 |
-
((Object *) m_ptr)->incRef();
|
79 |
-
|
80 |
-
print_copy_created(this, "with pointer", m_ptr); track_copy_created((ref_tag*) this);
|
81 |
-
}
|
82 |
-
|
83 |
-
/// Move constructor
|
84 |
-
ref(ref &&r) : m_ptr(r.m_ptr) {
|
85 |
-
r.m_ptr = nullptr;
|
86 |
-
|
87 |
-
print_move_created(this, "with pointer", m_ptr); track_move_created((ref_tag*) this);
|
88 |
-
}
|
89 |
-
|
90 |
-
/// Destroy this reference
|
91 |
-
~ref() {
|
92 |
-
if (m_ptr)
|
93 |
-
((Object *) m_ptr)->decRef();
|
94 |
-
|
95 |
-
print_destroyed(this); track_destroyed((ref_tag*) this);
|
96 |
-
}
|
97 |
-
|
98 |
-
/// Move another reference into the current one
|
99 |
-
ref& operator=(ref&& r) {
|
100 |
-
print_move_assigned(this, "pointer", r.m_ptr); track_move_assigned((ref_tag*) this);
|
101 |
-
|
102 |
-
if (*this == r)
|
103 |
-
return *this;
|
104 |
-
if (m_ptr)
|
105 |
-
((Object *) m_ptr)->decRef();
|
106 |
-
m_ptr = r.m_ptr;
|
107 |
-
r.m_ptr = nullptr;
|
108 |
-
return *this;
|
109 |
-
}
|
110 |
-
|
111 |
-
/// Overwrite this reference with another reference
|
112 |
-
ref& operator=(const ref& r) {
|
113 |
-
print_copy_assigned(this, "pointer", r.m_ptr); track_copy_assigned((ref_tag*) this);
|
114 |
-
|
115 |
-
if (m_ptr == r.m_ptr)
|
116 |
-
return *this;
|
117 |
-
if (m_ptr)
|
118 |
-
((Object *) m_ptr)->decRef();
|
119 |
-
m_ptr = r.m_ptr;
|
120 |
-
if (m_ptr)
|
121 |
-
((Object *) m_ptr)->incRef();
|
122 |
-
return *this;
|
123 |
-
}
|
124 |
-
|
125 |
-
/// Overwrite this reference with a pointer to another object
|
126 |
-
ref& operator=(T *ptr) {
|
127 |
-
print_values(this, "assigned pointer"); track_values((ref_tag*) this, "assigned pointer");
|
128 |
-
|
129 |
-
if (m_ptr == ptr)
|
130 |
-
return *this;
|
131 |
-
if (m_ptr)
|
132 |
-
((Object *) m_ptr)->decRef();
|
133 |
-
m_ptr = ptr;
|
134 |
-
if (m_ptr)
|
135 |
-
((Object *) m_ptr)->incRef();
|
136 |
-
return *this;
|
137 |
-
}
|
138 |
-
|
139 |
-
/// Compare this reference with another reference
|
140 |
-
bool operator==(const ref &r) const { return m_ptr == r.m_ptr; }
|
141 |
-
|
142 |
-
/// Compare this reference with another reference
|
143 |
-
bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; }
|
144 |
-
|
145 |
-
/// Compare this reference with a pointer
|
146 |
-
bool operator==(const T* ptr) const { return m_ptr == ptr; }
|
147 |
-
|
148 |
-
/// Compare this reference with a pointer
|
149 |
-
bool operator!=(const T* ptr) const { return m_ptr != ptr; }
|
150 |
-
|
151 |
-
/// Access the object referenced by this reference
|
152 |
-
T* operator->() { return m_ptr; }
|
153 |
-
|
154 |
-
/// Access the object referenced by this reference
|
155 |
-
const T* operator->() const { return m_ptr; }
|
156 |
-
|
157 |
-
/// Return a C++ reference to the referenced object
|
158 |
-
T& operator*() { return *m_ptr; }
|
159 |
-
|
160 |
-
/// Return a const C++ reference to the referenced object
|
161 |
-
const T& operator*() const { return *m_ptr; }
|
162 |
-
|
163 |
-
/// Return a pointer to the referenced object
|
164 |
-
operator T* () { return m_ptr; }
|
165 |
-
|
166 |
-
/// Return a const pointer to the referenced object
|
167 |
-
T* get_ptr() { return m_ptr; }
|
168 |
-
|
169 |
-
/// Return a pointer to the referenced object
|
170 |
-
const T* get_ptr() const { return m_ptr; }
|
171 |
-
private:
|
172 |
-
T *m_ptr;
|
173 |
-
};
|
174 |
-
|
175 |
-
#endif /* __OBJECT_H */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators.h
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/functional/operators/arithmetic_operators.h>
|
21 |
-
#include <thrust/detail/functional/operators/relational_operators.h>
|
22 |
-
#include <thrust/detail/functional/operators/logical_operators.h>
|
23 |
-
#include <thrust/detail/functional/operators/bitwise_operators.h>
|
24 |
-
#include <thrust/detail/functional/operators/compound_assignment_operators.h>
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/dense_heads/retina_head.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
|
3 |
-
|
4 |
-
from ..builder import HEADS
|
5 |
-
from .anchor_head import AnchorHead
|
6 |
-
|
7 |
-
|
8 |
-
@HEADS.register_module()
|
9 |
-
class RetinaHead(AnchorHead):
|
10 |
-
r"""An anchor-based head used in `RetinaNet
|
11 |
-
<https://arxiv.org/pdf/1708.02002.pdf>`_.
|
12 |
-
|
13 |
-
The head contains two subnetworks. The first classifies anchor boxes and
|
14 |
-
the second regresses deltas for the anchors.
|
15 |
-
|
16 |
-
Example:
|
17 |
-
>>> import torch
|
18 |
-
>>> self = RetinaHead(11, 7)
|
19 |
-
>>> x = torch.rand(1, 7, 32, 32)
|
20 |
-
>>> cls_score, bbox_pred = self.forward_single(x)
|
21 |
-
>>> # Each anchor predicts a score for each class except background
|
22 |
-
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
|
23 |
-
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
|
24 |
-
>>> assert cls_per_anchor == (self.num_classes)
|
25 |
-
>>> assert box_per_anchor == 4
|
26 |
-
"""
|
27 |
-
|
28 |
-
def __init__(self,
|
29 |
-
num_classes,
|
30 |
-
in_channels,
|
31 |
-
stacked_convs=4,
|
32 |
-
conv_cfg=None,
|
33 |
-
norm_cfg=None,
|
34 |
-
anchor_generator=dict(
|
35 |
-
type='AnchorGenerator',
|
36 |
-
octave_base_scale=4,
|
37 |
-
scales_per_octave=3,
|
38 |
-
ratios=[0.5, 1.0, 2.0],
|
39 |
-
strides=[8, 16, 32, 64, 128]),
|
40 |
-
**kwargs):
|
41 |
-
self.stacked_convs = stacked_convs
|
42 |
-
self.conv_cfg = conv_cfg
|
43 |
-
self.norm_cfg = norm_cfg
|
44 |
-
super(RetinaHead, self).__init__(
|
45 |
-
num_classes,
|
46 |
-
in_channels,
|
47 |
-
anchor_generator=anchor_generator,
|
48 |
-
**kwargs)
|
49 |
-
|
50 |
-
def _init_layers(self):
|
51 |
-
"""Initialize layers of the head."""
|
52 |
-
self.relu = nn.ReLU(inplace=True)
|
53 |
-
self.cls_convs = nn.ModuleList()
|
54 |
-
self.reg_convs = nn.ModuleList()
|
55 |
-
for i in range(self.stacked_convs):
|
56 |
-
chn = self.in_channels if i == 0 else self.feat_channels
|
57 |
-
self.cls_convs.append(
|
58 |
-
ConvModule(
|
59 |
-
chn,
|
60 |
-
self.feat_channels,
|
61 |
-
3,
|
62 |
-
stride=1,
|
63 |
-
padding=1,
|
64 |
-
conv_cfg=self.conv_cfg,
|
65 |
-
norm_cfg=self.norm_cfg))
|
66 |
-
self.reg_convs.append(
|
67 |
-
ConvModule(
|
68 |
-
chn,
|
69 |
-
self.feat_channels,
|
70 |
-
3,
|
71 |
-
stride=1,
|
72 |
-
padding=1,
|
73 |
-
conv_cfg=self.conv_cfg,
|
74 |
-
norm_cfg=self.norm_cfg))
|
75 |
-
self.retina_cls = nn.Conv2d(
|
76 |
-
self.feat_channels,
|
77 |
-
self.num_anchors * self.cls_out_channels,
|
78 |
-
3,
|
79 |
-
padding=1)
|
80 |
-
self.retina_reg = nn.Conv2d(
|
81 |
-
self.feat_channels, self.num_anchors * 4, 3, padding=1)
|
82 |
-
|
83 |
-
def init_weights(self):
|
84 |
-
"""Initialize weights of the head."""
|
85 |
-
for m in self.cls_convs:
|
86 |
-
normal_init(m.conv, std=0.01)
|
87 |
-
for m in self.reg_convs:
|
88 |
-
normal_init(m.conv, std=0.01)
|
89 |
-
bias_cls = bias_init_with_prob(0.01)
|
90 |
-
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
|
91 |
-
normal_init(self.retina_reg, std=0.01)
|
92 |
-
|
93 |
-
def forward_single(self, x):
|
94 |
-
"""Forward feature of a single scale level.
|
95 |
-
|
96 |
-
Args:
|
97 |
-
x (Tensor): Features of a single scale level.
|
98 |
-
|
99 |
-
Returns:
|
100 |
-
tuple:
|
101 |
-
cls_score (Tensor): Cls scores for a single scale level
|
102 |
-
the channels number is num_anchors * num_classes.
|
103 |
-
bbox_pred (Tensor): Box energies / deltas for a single scale
|
104 |
-
level, the channels number is num_anchors * 4.
|
105 |
-
"""
|
106 |
-
cls_feat = x
|
107 |
-
reg_feat = x
|
108 |
-
for cls_conv in self.cls_convs:
|
109 |
-
cls_feat = cls_conv(cls_feat)
|
110 |
-
for reg_conv in self.reg_convs:
|
111 |
-
reg_feat = reg_conv(reg_feat)
|
112 |
-
cls_score = self.retina_cls(cls_feat)
|
113 |
-
bbox_pred = self.retina_reg(reg_feat)
|
114 |
-
return cls_score, bbox_pred
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/coco.py
DELETED
@@ -1,519 +0,0 @@
|
|
1 |
-
__author__ = 'tylin'
|
2 |
-
__version__ = '2.0'
|
3 |
-
# Interface for accessing the Microsoft COCO dataset.
|
4 |
-
|
5 |
-
# Microsoft COCO is a large image dataset designed for object detection,
|
6 |
-
# segmentation, and caption generation. pycocotools is a Python API that
|
7 |
-
# assists in loading, parsing and visualizing the annotations in COCO.
|
8 |
-
# Please visit http://mscoco.org/ for more information on COCO, including
|
9 |
-
# for the data, paper, and tutorials. The exact format of the annotations
|
10 |
-
# is also described on the COCO website. For example usage of the pycocotools
|
11 |
-
# please see pycocotools_demo.ipynb. In addition to this API, please download
|
12 |
-
# both the COCO images and annotations in order to run the demo.
|
13 |
-
|
14 |
-
# An alternative to using the API is to load the annotations directly
|
15 |
-
# into Python dictionary
|
16 |
-
# Using the API provides additional utility functions. Note that this API
|
17 |
-
# supports both *instance* and *caption* annotations. In the case of
|
18 |
-
# captions not all functions are defined (e.g. categories are undefined).
|
19 |
-
|
20 |
-
# The following API functions are defined:
|
21 |
-
# COCO - COCO api class that loads COCO annotation file and prepare data
|
22 |
-
# structures.
|
23 |
-
# decodeMask - Decode binary mask M encoded via run-length encoding.
|
24 |
-
# encodeMask - Encode binary mask M using run-length encoding.
|
25 |
-
# getAnnIds - Get ann ids that satisfy given filter conditions.
|
26 |
-
# getCatIds - Get cat ids that satisfy given filter conditions.
|
27 |
-
# getImgIds - Get img ids that satisfy given filter conditions.
|
28 |
-
# loadAnns - Load anns with the specified ids.
|
29 |
-
# loadCats - Load cats with the specified ids.
|
30 |
-
# loadImgs - Load imgs with the specified ids.
|
31 |
-
# annToMask - Convert segmentation in an annotation to binary mask.
|
32 |
-
# showAnns - Display the specified annotations.
|
33 |
-
# loadRes - Load algorithm results and create API for accessing them.
|
34 |
-
# download - Download COCO images from mscoco.org server.
|
35 |
-
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
|
36 |
-
# Help on each functions can be accessed by: "help COCO>function".
|
37 |
-
|
38 |
-
# See also COCO>decodeMask,
|
39 |
-
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
|
40 |
-
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
|
41 |
-
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
|
42 |
-
|
43 |
-
# Microsoft COCO Toolbox. version 2.0
|
44 |
-
# Data, paper, and tutorials available at: http://mscoco.org/
|
45 |
-
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
|
46 |
-
# Licensed under the Simplified BSD License [see bsd.txt]
|
47 |
-
|
48 |
-
import copy
|
49 |
-
import itertools
|
50 |
-
import json
|
51 |
-
import os
|
52 |
-
import time
|
53 |
-
from collections import defaultdict
|
54 |
-
from urllib.request import urlretrieve
|
55 |
-
|
56 |
-
import matplotlib.pyplot as plt
|
57 |
-
import numpy as np
|
58 |
-
from matplotlib.collections import PatchCollection
|
59 |
-
from matplotlib.patches import Polygon
|
60 |
-
|
61 |
-
from . import mask as maskUtils
|
62 |
-
|
63 |
-
|
64 |
-
def _isArrayLike(obj):
|
65 |
-
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
|
66 |
-
|
67 |
-
|
68 |
-
class COCO:
|
69 |
-
def __init__(self, annotation_file=None):
|
70 |
-
"""
|
71 |
-
Constructor of Microsoft COCO helper class for reading and visualizing
|
72 |
-
annotations.
|
73 |
-
:param annotation_file (str): location of annotation file
|
74 |
-
:param image_folder (str): location to the folder that hosts images.
|
75 |
-
:return:
|
76 |
-
"""
|
77 |
-
# load dataset
|
78 |
-
self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(
|
79 |
-
), dict()
|
80 |
-
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
|
81 |
-
if annotation_file is not None:
|
82 |
-
print('loading annotations into memory...')
|
83 |
-
tic = time.time()
|
84 |
-
with open(annotation_file, 'r') as f:
|
85 |
-
dataset = json.load(f)
|
86 |
-
assert type(
|
87 |
-
dataset
|
88 |
-
) == dict, 'annotation file format {} not supported'.format(
|
89 |
-
type(dataset))
|
90 |
-
print('Done (t={:0.2f}s)'.format(time.time() - tic))
|
91 |
-
self.dataset = dataset
|
92 |
-
self.createIndex()
|
93 |
-
self.img_ann_map = self.imgToAnns
|
94 |
-
self.cat_img_map = self.catToImgs
|
95 |
-
|
96 |
-
def createIndex(self):
|
97 |
-
# create index
|
98 |
-
print('creating index...')
|
99 |
-
anns, cats, imgs = {}, {}, {}
|
100 |
-
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
|
101 |
-
if 'annotations' in self.dataset:
|
102 |
-
for ann in self.dataset['annotations']:
|
103 |
-
imgToAnns[ann['image_id']].append(ann)
|
104 |
-
anns[ann['id']] = ann
|
105 |
-
|
106 |
-
if 'images' in self.dataset:
|
107 |
-
for img in self.dataset['images']:
|
108 |
-
imgs[img['id']] = img
|
109 |
-
|
110 |
-
if 'categories' in self.dataset:
|
111 |
-
for cat in self.dataset['categories']:
|
112 |
-
cats[cat['id']] = cat
|
113 |
-
|
114 |
-
if 'annotations' in self.dataset and 'categories' in self.dataset:
|
115 |
-
for ann in self.dataset['annotations']:
|
116 |
-
catToImgs[ann['category_id']].append(ann['image_id'])
|
117 |
-
|
118 |
-
print('index created!')
|
119 |
-
|
120 |
-
# create class members
|
121 |
-
self.anns = anns
|
122 |
-
self.imgToAnns = imgToAnns
|
123 |
-
self.catToImgs = catToImgs
|
124 |
-
self.imgs = imgs
|
125 |
-
self.cats = cats
|
126 |
-
|
127 |
-
def info(self):
|
128 |
-
"""
|
129 |
-
Print information about the annotation file.
|
130 |
-
:return:
|
131 |
-
"""
|
132 |
-
for key, value in self.dataset['info'].items():
|
133 |
-
print('{}: {}'.format(key, value))
|
134 |
-
|
135 |
-
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
|
136 |
-
"""
|
137 |
-
Get ann ids that satisfy given filter conditions. default skips that
|
138 |
-
filter
|
139 |
-
:param imgIds (int array) : get anns for given imgs
|
140 |
-
catIds (int array) : get anns for given cats
|
141 |
-
areaRng (float array) : get anns for given area range
|
142 |
-
(e.g. [0 inf])
|
143 |
-
iscrowd (boolean) : get anns for given crowd label
|
144 |
-
(False or True)
|
145 |
-
:return: ids (int array) : integer array of ann ids
|
146 |
-
"""
|
147 |
-
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
|
148 |
-
catIds = catIds if _isArrayLike(catIds) else [catIds]
|
149 |
-
|
150 |
-
if len(imgIds) == len(catIds) == len(areaRng) == 0:
|
151 |
-
anns = self.dataset['annotations']
|
152 |
-
else:
|
153 |
-
if not len(imgIds) == 0:
|
154 |
-
lists = [
|
155 |
-
self.imgToAnns[imgId] for imgId in imgIds
|
156 |
-
if imgId in self.imgToAnns
|
157 |
-
]
|
158 |
-
anns = list(itertools.chain.from_iterable(lists))
|
159 |
-
else:
|
160 |
-
anns = self.dataset['annotations']
|
161 |
-
anns = anns if len(catIds) == 0 else [
|
162 |
-
ann for ann in anns if ann['category_id'] in catIds
|
163 |
-
]
|
164 |
-
anns = anns if len(areaRng) == 0 else [
|
165 |
-
ann for ann in anns
|
166 |
-
if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]
|
167 |
-
]
|
168 |
-
if iscrowd is not None:
|
169 |
-
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
|
170 |
-
else:
|
171 |
-
ids = [ann['id'] for ann in anns]
|
172 |
-
return ids
|
173 |
-
|
174 |
-
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
|
175 |
-
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
|
176 |
-
|
177 |
-
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
|
178 |
-
"""
|
179 |
-
filtering parameters. default skips that filter.
|
180 |
-
:param catNms (str array) : get cats for given cat names
|
181 |
-
:param supNms (str array) : get cats for given supercategory names
|
182 |
-
:param catIds (int array) : get cats for given cat ids
|
183 |
-
:return: ids (int array) : integer array of cat ids
|
184 |
-
"""
|
185 |
-
catNms = catNms if _isArrayLike(catNms) else [catNms]
|
186 |
-
supNms = supNms if _isArrayLike(supNms) else [supNms]
|
187 |
-
catIds = catIds if _isArrayLike(catIds) else [catIds]
|
188 |
-
|
189 |
-
if len(catNms) == len(supNms) == len(catIds) == 0:
|
190 |
-
cats = self.dataset['categories']
|
191 |
-
else:
|
192 |
-
cats = self.dataset['categories']
|
193 |
-
cats = cats if len(catNms) == 0 else [
|
194 |
-
cat for cat in cats if cat['name'] in catNms
|
195 |
-
]
|
196 |
-
cats = cats if len(supNms) == 0 else [
|
197 |
-
cat for cat in cats if cat['supercategory'] in supNms
|
198 |
-
]
|
199 |
-
cats = cats if len(catIds) == 0 else [
|
200 |
-
cat for cat in cats if cat['id'] in catIds
|
201 |
-
]
|
202 |
-
ids = [cat['id'] for cat in cats]
|
203 |
-
return ids
|
204 |
-
|
205 |
-
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
|
206 |
-
return self.getCatIds(cat_names, sup_names, cat_ids)
|
207 |
-
|
208 |
-
def getImgIds(self, imgIds=[], catIds=[]):
|
209 |
-
'''
|
210 |
-
Get img ids that satisfy given filter conditions.
|
211 |
-
:param imgIds (int array) : get imgs for given ids
|
212 |
-
:param catIds (int array) : get imgs with all given cats
|
213 |
-
:return: ids (int array) : integer array of img ids
|
214 |
-
'''
|
215 |
-
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
|
216 |
-
catIds = catIds if _isArrayLike(catIds) else [catIds]
|
217 |
-
|
218 |
-
if len(imgIds) == len(catIds) == 0:
|
219 |
-
ids = self.imgs.keys()
|
220 |
-
else:
|
221 |
-
ids = set(imgIds)
|
222 |
-
for i, catId in enumerate(catIds):
|
223 |
-
if i == 0 and len(ids) == 0:
|
224 |
-
ids = set(self.catToImgs[catId])
|
225 |
-
else:
|
226 |
-
ids &= set(self.catToImgs[catId])
|
227 |
-
return list(ids)
|
228 |
-
|
229 |
-
def get_img_ids(self, img_ids=[], cat_ids=[]):
|
230 |
-
return self.getImgIds(img_ids, cat_ids)
|
231 |
-
|
232 |
-
def loadAnns(self, ids=[]):
|
233 |
-
"""
|
234 |
-
Load anns with the specified ids.
|
235 |
-
:param ids (int array) : integer ids specifying anns
|
236 |
-
:return: anns (object array) : loaded ann objects
|
237 |
-
"""
|
238 |
-
if _isArrayLike(ids):
|
239 |
-
return [self.anns[id] for id in ids]
|
240 |
-
elif type(ids) == int:
|
241 |
-
return [self.anns[ids]]
|
242 |
-
|
243 |
-
load_anns = loadAnns
|
244 |
-
|
245 |
-
def loadCats(self, ids=[]):
|
246 |
-
"""
|
247 |
-
Load cats with the specified ids.
|
248 |
-
:param ids (int array) : integer ids specifying cats
|
249 |
-
:return: cats (object array) : loaded cat objects
|
250 |
-
"""
|
251 |
-
if _isArrayLike(ids):
|
252 |
-
return [self.cats[id] for id in ids]
|
253 |
-
elif type(ids) == int:
|
254 |
-
return [self.cats[ids]]
|
255 |
-
|
256 |
-
load_cats = loadCats
|
257 |
-
|
258 |
-
def loadImgs(self, ids=[]):
|
259 |
-
"""
|
260 |
-
Load anns with the specified ids.
|
261 |
-
:param ids (int array) : integer ids specifying img
|
262 |
-
:return: imgs (object array) : loaded img objects
|
263 |
-
"""
|
264 |
-
if _isArrayLike(ids):
|
265 |
-
return [self.imgs[id] for id in ids]
|
266 |
-
elif type(ids) == int:
|
267 |
-
return [self.imgs[ids]]
|
268 |
-
|
269 |
-
load_imgs = loadImgs
|
270 |
-
|
271 |
-
def showAnns(self, anns, draw_bbox=False):
|
272 |
-
"""
|
273 |
-
Display the specified annotations.
|
274 |
-
:param anns (array of object): annotations to display
|
275 |
-
:return: None
|
276 |
-
"""
|
277 |
-
if len(anns) == 0:
|
278 |
-
return 0
|
279 |
-
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
|
280 |
-
datasetType = 'instances'
|
281 |
-
elif 'caption' in anns[0]:
|
282 |
-
datasetType = 'captions'
|
283 |
-
else:
|
284 |
-
raise Exception('datasetType not supported')
|
285 |
-
if datasetType == 'instances':
|
286 |
-
ax = plt.gca()
|
287 |
-
ax.set_autoscale_on(False)
|
288 |
-
polygons = []
|
289 |
-
color = []
|
290 |
-
for ann in anns:
|
291 |
-
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
|
292 |
-
if 'segmentation' in ann:
|
293 |
-
if type(ann['segmentation']) == list:
|
294 |
-
# polygon
|
295 |
-
for seg in ann['segmentation']:
|
296 |
-
poly = np.array(seg).reshape(
|
297 |
-
(int(len(seg) / 2), 2))
|
298 |
-
polygons.append(Polygon(poly))
|
299 |
-
color.append(c)
|
300 |
-
else:
|
301 |
-
# mask
|
302 |
-
t = self.imgs[ann['image_id']]
|
303 |
-
if type(ann['segmentation']['counts']) == list:
|
304 |
-
rle = maskUtils.frPyObjects([ann['segmentation']],
|
305 |
-
t['height'],
|
306 |
-
t['width'])
|
307 |
-
else:
|
308 |
-
rle = [ann['segmentation']]
|
309 |
-
m = maskUtils.decode(rle)
|
310 |
-
img = np.ones((m.shape[0], m.shape[1], 3))
|
311 |
-
if ann['iscrowd'] == 1:
|
312 |
-
color_mask = np.array([2.0, 166.0, 101.0]) / 255
|
313 |
-
if ann['iscrowd'] == 0:
|
314 |
-
color_mask = np.random.random((1, 3)).tolist()[0]
|
315 |
-
for i in range(3):
|
316 |
-
img[:, :, i] = color_mask[i]
|
317 |
-
ax.imshow(np.dstack((img, m * 0.5)))
|
318 |
-
if 'keypoints' in ann and type(ann['keypoints']) == list:
|
319 |
-
# turn skeleton into zero-based index
|
320 |
-
sks = np.array(
|
321 |
-
self.loadCats(ann['category_id'])[0]['skeleton']) - 1
|
322 |
-
kp = np.array(ann['keypoints'])
|
323 |
-
x = kp[0::3]
|
324 |
-
y = kp[1::3]
|
325 |
-
v = kp[2::3]
|
326 |
-
for sk in sks:
|
327 |
-
if np.all(v[sk] > 0):
|
328 |
-
plt.plot(x[sk], y[sk], linewidth=3, color=c)
|
329 |
-
plt.plot(x[v > 0],
|
330 |
-
y[v > 0],
|
331 |
-
'o',
|
332 |
-
markersize=8,
|
333 |
-
markerfacecolor=c,
|
334 |
-
markeredgecolor='k',
|
335 |
-
markeredgewidth=2)
|
336 |
-
plt.plot(x[v > 1],
|
337 |
-
y[v > 1],
|
338 |
-
'o',
|
339 |
-
markersize=8,
|
340 |
-
markerfacecolor=c,
|
341 |
-
markeredgecolor=c,
|
342 |
-
markeredgewidth=2)
|
343 |
-
|
344 |
-
if draw_bbox:
|
345 |
-
[bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']
|
346 |
-
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h],
|
347 |
-
[bbox_x + bbox_w, bbox_y + bbox_h],
|
348 |
-
[bbox_x + bbox_w, bbox_y]]
|
349 |
-
np_poly = np.array(poly).reshape((4, 2))
|
350 |
-
polygons.append(Polygon(np_poly))
|
351 |
-
color.append(c)
|
352 |
-
|
353 |
-
p = PatchCollection(polygons,
|
354 |
-
facecolor=color,
|
355 |
-
linewidths=0,
|
356 |
-
alpha=0.4)
|
357 |
-
ax.add_collection(p)
|
358 |
-
p = PatchCollection(polygons,
|
359 |
-
facecolor='none',
|
360 |
-
edgecolors=color,
|
361 |
-
linewidths=2)
|
362 |
-
ax.add_collection(p)
|
363 |
-
elif datasetType == 'captions':
|
364 |
-
for ann in anns:
|
365 |
-
print(ann['caption'])
|
366 |
-
|
367 |
-
def loadRes(self, resFile):
|
368 |
-
"""
|
369 |
-
Load result file and return a result api object.
|
370 |
-
:param resFile (str) : file name of result file
|
371 |
-
:return: res (obj) : result api object
|
372 |
-
"""
|
373 |
-
res = COCO()
|
374 |
-
res.dataset['images'] = [img for img in self.dataset['images']]
|
375 |
-
|
376 |
-
print('Loading and preparing results...')
|
377 |
-
tic = time.time()
|
378 |
-
if type(resFile) == str:
|
379 |
-
with open(resFile) as f:
|
380 |
-
anns = json.load(f)
|
381 |
-
elif type(resFile) == np.ndarray:
|
382 |
-
anns = self.loadNumpyAnnotations(resFile)
|
383 |
-
else:
|
384 |
-
anns = resFile
|
385 |
-
assert type(anns) == list, 'results in not an array of objects'
|
386 |
-
annsImgIds = [ann['image_id'] for ann in anns]
|
387 |
-
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
|
388 |
-
'Results do not correspond to current coco set'
|
389 |
-
if 'caption' in anns[0]:
|
390 |
-
imgIds = set([img['id'] for img in res.dataset['images']]) & set(
|
391 |
-
[ann['image_id'] for ann in anns])
|
392 |
-
res.dataset['images'] = [
|
393 |
-
img for img in res.dataset['images'] if img['id'] in imgIds
|
394 |
-
]
|
395 |
-
for id, ann in enumerate(anns):
|
396 |
-
ann['id'] = id + 1
|
397 |
-
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
|
398 |
-
res.dataset['categories'] = copy.deepcopy(
|
399 |
-
self.dataset['categories'])
|
400 |
-
for id, ann in enumerate(anns):
|
401 |
-
bb = ann['bbox']
|
402 |
-
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
|
403 |
-
if 'segmentation' not in ann:
|
404 |
-
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
|
405 |
-
ann['area'] = bb[2] * bb[3]
|
406 |
-
ann['id'] = id + 1
|
407 |
-
ann['iscrowd'] = 0
|
408 |
-
elif 'segmentation' in anns[0]:
|
409 |
-
res.dataset['categories'] = copy.deepcopy(
|
410 |
-
self.dataset['categories'])
|
411 |
-
for id, ann in enumerate(anns):
|
412 |
-
# now only support compressed RLE format as segmentation
|
413 |
-
# results
|
414 |
-
ann['area'] = maskUtils.area(ann['segmentation'])
|
415 |
-
if 'bbox' not in ann:
|
416 |
-
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
|
417 |
-
ann['id'] = id + 1
|
418 |
-
ann['iscrowd'] = 0
|
419 |
-
elif 'keypoints' in anns[0]:
|
420 |
-
res.dataset['categories'] = copy.deepcopy(
|
421 |
-
self.dataset['categories'])
|
422 |
-
for id, ann in enumerate(anns):
|
423 |
-
s = ann['keypoints']
|
424 |
-
x = s[0::3]
|
425 |
-
y = s[1::3]
|
426 |
-
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
|
427 |
-
ann['area'] = (x1 - x0) * (y1 - y0)
|
428 |
-
ann['id'] = id + 1
|
429 |
-
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
|
430 |
-
print('DONE (t={:0.2f}s)'.format(time.time() - tic))
|
431 |
-
|
432 |
-
res.dataset['annotations'] = anns
|
433 |
-
res.createIndex()
|
434 |
-
return res
|
435 |
-
|
436 |
-
def download(self, tarDir=None, imgIds=[]):
|
437 |
-
'''
|
438 |
-
Download COCO images from mscoco.org server.
|
439 |
-
:param tarDir (str): COCO results directory name
|
440 |
-
imgIds (list): images to be downloaded
|
441 |
-
:return:
|
442 |
-
'''
|
443 |
-
if tarDir is None:
|
444 |
-
print('Please specify target directory')
|
445 |
-
return -1
|
446 |
-
if len(imgIds) == 0:
|
447 |
-
imgs = self.imgs.values()
|
448 |
-
else:
|
449 |
-
imgs = self.loadImgs(imgIds)
|
450 |
-
N = len(imgs)
|
451 |
-
if not os.path.exists(tarDir):
|
452 |
-
os.makedirs(tarDir)
|
453 |
-
for i, img in enumerate(imgs):
|
454 |
-
tic = time.time()
|
455 |
-
fname = os.path.join(tarDir, img['file_name'])
|
456 |
-
if not os.path.exists(fname):
|
457 |
-
urlretrieve(img['coco_url'], fname)
|
458 |
-
print('downloaded {}/{} images (t={:0.1f}s)'.format(
|
459 |
-
i, N,
|
460 |
-
time.time() - tic))
|
461 |
-
|
462 |
-
def loadNumpyAnnotations(self, data):
|
463 |
-
"""
|
464 |
-
Convert result data from a numpy array [Nx7] where each row contains
|
465 |
-
{imageID,x1,y1,w,h,score,class}
|
466 |
-
:param data (numpy.ndarray)
|
467 |
-
:return: annotations (python nested list)
|
468 |
-
"""
|
469 |
-
print('Converting ndarray to lists...')
|
470 |
-
assert (type(data) == np.ndarray)
|
471 |
-
print(data.shape)
|
472 |
-
assert (data.shape[1] == 7)
|
473 |
-
N = data.shape[0]
|
474 |
-
ann = []
|
475 |
-
for i in range(N):
|
476 |
-
if i % 1000000 == 0:
|
477 |
-
print('{}/{}'.format(i, N))
|
478 |
-
ann += [{
|
479 |
-
'image_id': int(data[i, 0]),
|
480 |
-
'bbox': [data[i, 1], data[i, 2], data[i, 3], data[i, 4]],
|
481 |
-
'score': data[i, 5],
|
482 |
-
'category_id': int(data[i, 6]),
|
483 |
-
}]
|
484 |
-
return ann
|
485 |
-
|
486 |
-
def annToRLE(self, ann):
|
487 |
-
"""
|
488 |
-
Convert annotation which can be polygons, uncompressed RLE to RLE.
|
489 |
-
:return: binary mask (numpy 2D array)
|
490 |
-
"""
|
491 |
-
t = self.imgs[ann['image_id']]
|
492 |
-
h, w = t['height'], t['width']
|
493 |
-
segm = ann['segmentation']
|
494 |
-
if type(segm) == list:
|
495 |
-
# polygon -- a single object might consist of multiple parts
|
496 |
-
# we merge all parts into one mask rle code
|
497 |
-
rles = maskUtils.frPyObjects(segm, h, w)
|
498 |
-
rle = maskUtils.merge(rles)
|
499 |
-
elif type(segm['counts']) == list:
|
500 |
-
# uncompressed RLE
|
501 |
-
rle = maskUtils.frPyObjects(segm, h, w)
|
502 |
-
else:
|
503 |
-
# rle
|
504 |
-
rle = ann['segmentation']
|
505 |
-
return rle
|
506 |
-
|
507 |
-
ann_to_rle = annToRLE
|
508 |
-
|
509 |
-
def annToMask(self, ann):
|
510 |
-
"""
|
511 |
-
Convert annotation which can be polygons, uncompressed RLE, or RLE to
|
512 |
-
binary mask.
|
513 |
-
:return: binary mask (numpy 2D array)
|
514 |
-
"""
|
515 |
-
rle = self.annToRLE(ann)
|
516 |
-
m = maskUtils.decode(rle)
|
517 |
-
return m
|
518 |
-
|
519 |
-
ann_to_mask = annToMask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/drawings-to-human/frontend/src/app.html
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<link rel="icon" href="%sveltekit.assets%/favicon.png" />
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
7 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
|
8 |
-
%sveltekit.head%
|
9 |
-
</head>
|
10 |
-
<body class="dark:bg-[rgb(11,15,25)] bg-white dark:text-white text-black">
|
11 |
-
%sveltekit.body%
|
12 |
-
</body>
|
13 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/data/transforms/__init__.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from fvcore.transforms.transform import Transform, TransformList # order them first
|
3 |
-
from fvcore.transforms.transform import *
|
4 |
-
from .transform import *
|
5 |
-
from .augmentation import *
|
6 |
-
from .augmentation_impl import *
|
7 |
-
|
8 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
9 |
-
|
10 |
-
|
11 |
-
from detectron2.utils.env import fixup_module_metadata
|
12 |
-
|
13 |
-
fixup_module_metadata(__name__, globals(), __all__)
|
14 |
-
del fixup_module_metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/mmdet_wrapper.py
DELETED
@@ -1,277 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
import itertools
|
4 |
-
import logging
|
5 |
-
import numpy as np
|
6 |
-
from collections import OrderedDict
|
7 |
-
from collections.abc import Mapping
|
8 |
-
from typing import Dict, List, Optional, Tuple, Union
|
9 |
-
import torch
|
10 |
-
from omegaconf import DictConfig, OmegaConf
|
11 |
-
from torch import Tensor, nn
|
12 |
-
|
13 |
-
from detectron2.layers import ShapeSpec
|
14 |
-
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
|
15 |
-
from detectron2.utils.events import get_event_storage
|
16 |
-
|
17 |
-
from .backbone import Backbone
|
18 |
-
|
19 |
-
logger = logging.getLogger(__name__)
|
20 |
-
|
21 |
-
|
22 |
-
def _to_container(cfg):
|
23 |
-
"""
|
24 |
-
mmdet will assert the type of dict/list.
|
25 |
-
So convert omegaconf objects to dict/list.
|
26 |
-
"""
|
27 |
-
if isinstance(cfg, DictConfig):
|
28 |
-
cfg = OmegaConf.to_container(cfg, resolve=True)
|
29 |
-
from mmcv.utils import ConfigDict
|
30 |
-
|
31 |
-
return ConfigDict(cfg)
|
32 |
-
|
33 |
-
|
34 |
-
class MMDetBackbone(Backbone):
|
35 |
-
"""
|
36 |
-
Wrapper of mmdetection backbones to use in detectron2.
|
37 |
-
|
38 |
-
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
|
39 |
-
produce a dict of tensors. This class wraps the given backbone to produce
|
40 |
-
output in detectron2's convention, so it can be used in place of detectron2
|
41 |
-
backbones.
|
42 |
-
"""
|
43 |
-
|
44 |
-
def __init__(
|
45 |
-
self,
|
46 |
-
backbone: Union[nn.Module, Mapping],
|
47 |
-
neck: Union[nn.Module, Mapping, None] = None,
|
48 |
-
*,
|
49 |
-
pretrained_backbone: Optional[str] = None,
|
50 |
-
output_shapes: List[ShapeSpec],
|
51 |
-
output_names: Optional[List[str]] = None,
|
52 |
-
):
|
53 |
-
"""
|
54 |
-
Args:
|
55 |
-
backbone: either a backbone module or a mmdet config dict that defines a
|
56 |
-
backbone. The backbone takes a 4D image tensor and returns a
|
57 |
-
sequence of tensors.
|
58 |
-
neck: either a backbone module or a mmdet config dict that defines a
|
59 |
-
neck. The neck takes outputs of backbone and returns a
|
60 |
-
sequence of tensors. If None, no neck is used.
|
61 |
-
pretrained_backbone: defines the backbone weights that can be loaded by
|
62 |
-
mmdet, such as "torchvision://resnet50".
|
63 |
-
output_shapes: shape for every output of the backbone (or neck, if given).
|
64 |
-
stride and channels are often needed.
|
65 |
-
output_names: names for every output of the backbone (or neck, if given).
|
66 |
-
By default, will use "out0", "out1", ...
|
67 |
-
"""
|
68 |
-
super().__init__()
|
69 |
-
if isinstance(backbone, Mapping):
|
70 |
-
from mmdet.models import build_backbone
|
71 |
-
|
72 |
-
backbone = build_backbone(_to_container(backbone))
|
73 |
-
self.backbone = backbone
|
74 |
-
|
75 |
-
if isinstance(neck, Mapping):
|
76 |
-
from mmdet.models import build_neck
|
77 |
-
|
78 |
-
neck = build_neck(_to_container(neck))
|
79 |
-
self.neck = neck
|
80 |
-
|
81 |
-
# It's confusing that backbone weights are given as a separate argument,
|
82 |
-
# but "neck" weights, if any, are part of neck itself. This is the interface
|
83 |
-
# of mmdet so we follow it. Reference:
|
84 |
-
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
|
85 |
-
logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...")
|
86 |
-
self.backbone.init_weights(pretrained_backbone)
|
87 |
-
# train() in mmdet modules is non-trivial, and has to be explicitly
|
88 |
-
# called. Reference:
|
89 |
-
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
|
90 |
-
self.backbone.train()
|
91 |
-
if self.neck is not None:
|
92 |
-
logger.info("Initializing mmdet neck weights ...")
|
93 |
-
if isinstance(self.neck, nn.Sequential):
|
94 |
-
for m in self.neck:
|
95 |
-
m.init_weights()
|
96 |
-
else:
|
97 |
-
self.neck.init_weights()
|
98 |
-
self.neck.train()
|
99 |
-
|
100 |
-
self._output_shapes = output_shapes
|
101 |
-
if not output_names:
|
102 |
-
output_names = [f"out{i}" for i in range(len(output_shapes))]
|
103 |
-
self._output_names = output_names
|
104 |
-
|
105 |
-
def forward(self, x) -> Dict[str, Tensor]:
|
106 |
-
outs = self.backbone(x)
|
107 |
-
if self.neck is not None:
|
108 |
-
outs = self.neck(outs)
|
109 |
-
assert isinstance(
|
110 |
-
outs, (list, tuple)
|
111 |
-
), "mmdet backbone should return a list/tuple of tensors!"
|
112 |
-
if len(outs) != len(self._output_shapes):
|
113 |
-
raise ValueError(
|
114 |
-
"Length of output_shapes does not match outputs from the mmdet backbone: "
|
115 |
-
f"{len(outs)} != {len(self._output_shapes)}"
|
116 |
-
)
|
117 |
-
return {k: v for k, v in zip(self._output_names, outs)}
|
118 |
-
|
119 |
-
def output_shape(self) -> Dict[str, ShapeSpec]:
|
120 |
-
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
|
121 |
-
|
122 |
-
|
123 |
-
class MMDetDetector(nn.Module):
|
124 |
-
"""
|
125 |
-
Wrapper of a mmdetection detector model, for detection and instance segmentation.
|
126 |
-
Input/output formats of this class follow detectron2's convention, so a
|
127 |
-
mmdetection model can be trained and evaluated in detectron2.
|
128 |
-
"""
|
129 |
-
|
130 |
-
def __init__(
|
131 |
-
self,
|
132 |
-
detector: Union[nn.Module, Mapping],
|
133 |
-
*,
|
134 |
-
# Default is 32 regardless of model:
|
135 |
-
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
|
136 |
-
size_divisibility=32,
|
137 |
-
pixel_mean: Tuple[float],
|
138 |
-
pixel_std: Tuple[float],
|
139 |
-
):
|
140 |
-
"""
|
141 |
-
Args:
|
142 |
-
detector: a mmdet detector, or a mmdet config dict that defines a detector.
|
143 |
-
size_divisibility: pad input images to multiple of this number
|
144 |
-
pixel_mean: per-channel mean to normalize input image
|
145 |
-
pixel_std: per-channel stddev to normalize input image
|
146 |
-
"""
|
147 |
-
super().__init__()
|
148 |
-
if isinstance(detector, Mapping):
|
149 |
-
from mmdet.models import build_detector
|
150 |
-
|
151 |
-
detector = build_detector(_to_container(detector))
|
152 |
-
self.detector = detector
|
153 |
-
self.size_divisibility = size_divisibility
|
154 |
-
|
155 |
-
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
|
156 |
-
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
|
157 |
-
assert (
|
158 |
-
self.pixel_mean.shape == self.pixel_std.shape
|
159 |
-
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
|
160 |
-
|
161 |
-
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
162 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
163 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
164 |
-
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
|
165 |
-
metas = []
|
166 |
-
rescale = {"height" in x for x in batched_inputs}
|
167 |
-
if len(rescale) != 1:
|
168 |
-
raise ValueError("Some inputs have original height/width, but some don't!")
|
169 |
-
rescale = list(rescale)[0]
|
170 |
-
output_shapes = []
|
171 |
-
for input in batched_inputs:
|
172 |
-
meta = {}
|
173 |
-
c, h, w = input["image"].shape
|
174 |
-
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
|
175 |
-
if rescale:
|
176 |
-
scale_factor = np.array(
|
177 |
-
[w / input["width"], h / input["height"]] * 2, dtype="float32"
|
178 |
-
)
|
179 |
-
ori_shape = (input["height"], input["width"])
|
180 |
-
output_shapes.append(ori_shape)
|
181 |
-
meta["ori_shape"] = ori_shape + (c,)
|
182 |
-
else:
|
183 |
-
scale_factor = 1.0
|
184 |
-
output_shapes.append((h, w))
|
185 |
-
meta["scale_factor"] = scale_factor
|
186 |
-
meta["flip"] = False
|
187 |
-
padh, padw = images.shape[-2:]
|
188 |
-
meta["pad_shape"] = (padh, padw, c)
|
189 |
-
metas.append(meta)
|
190 |
-
|
191 |
-
if self.training:
|
192 |
-
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
|
193 |
-
if gt_instances[0].has("gt_masks"):
|
194 |
-
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
|
195 |
-
|
196 |
-
def convert_mask(m, shape):
|
197 |
-
# mmdet mask format
|
198 |
-
if isinstance(m, BitMasks):
|
199 |
-
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
|
200 |
-
else:
|
201 |
-
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
|
202 |
-
|
203 |
-
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
|
204 |
-
losses_and_metrics = self.detector.forward_train(
|
205 |
-
images,
|
206 |
-
metas,
|
207 |
-
[x.gt_boxes.tensor for x in gt_instances],
|
208 |
-
[x.gt_classes for x in gt_instances],
|
209 |
-
gt_masks=gt_masks,
|
210 |
-
)
|
211 |
-
else:
|
212 |
-
losses_and_metrics = self.detector.forward_train(
|
213 |
-
images,
|
214 |
-
metas,
|
215 |
-
[x.gt_boxes.tensor for x in gt_instances],
|
216 |
-
[x.gt_classes for x in gt_instances],
|
217 |
-
)
|
218 |
-
return _parse_losses(losses_and_metrics)
|
219 |
-
else:
|
220 |
-
results = self.detector.simple_test(images, metas, rescale=rescale)
|
221 |
-
results = [
|
222 |
-
{"instances": _convert_mmdet_result(r, shape)}
|
223 |
-
for r, shape in zip(results, output_shapes)
|
224 |
-
]
|
225 |
-
return results
|
226 |
-
|
227 |
-
@property
|
228 |
-
def device(self):
|
229 |
-
return self.pixel_mean.device
|
230 |
-
|
231 |
-
|
232 |
-
# Reference: show_result() in
|
233 |
-
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
|
234 |
-
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
|
235 |
-
if isinstance(result, tuple):
|
236 |
-
bbox_result, segm_result = result
|
237 |
-
if isinstance(segm_result, tuple):
|
238 |
-
segm_result = segm_result[0]
|
239 |
-
else:
|
240 |
-
bbox_result, segm_result = result, None
|
241 |
-
|
242 |
-
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
|
243 |
-
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
|
244 |
-
labels = [
|
245 |
-
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
|
246 |
-
]
|
247 |
-
labels = torch.cat(labels)
|
248 |
-
inst = Instances(shape)
|
249 |
-
inst.pred_boxes = Boxes(bboxes)
|
250 |
-
inst.scores = scores
|
251 |
-
inst.pred_classes = labels
|
252 |
-
|
253 |
-
if segm_result is not None and len(labels) > 0:
|
254 |
-
segm_result = list(itertools.chain(*segm_result))
|
255 |
-
segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]
|
256 |
-
segm_result = torch.stack(segm_result, dim=0)
|
257 |
-
inst.pred_masks = segm_result
|
258 |
-
return inst
|
259 |
-
|
260 |
-
|
261 |
-
# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
|
262 |
-
def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:
|
263 |
-
log_vars = OrderedDict()
|
264 |
-
for loss_name, loss_value in losses.items():
|
265 |
-
if isinstance(loss_value, torch.Tensor):
|
266 |
-
log_vars[loss_name] = loss_value.mean()
|
267 |
-
elif isinstance(loss_value, list):
|
268 |
-
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
|
269 |
-
else:
|
270 |
-
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
|
271 |
-
|
272 |
-
if "loss" not in loss_name:
|
273 |
-
# put metrics to storage; don't return them
|
274 |
-
storage = get_event_storage()
|
275 |
-
value = log_vars.pop(loss_name).cpu().item()
|
276 |
-
storage.put_scalar(loss_name, value)
|
277 |
-
return log_vars
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Candyraider/Proxy4/Dockerfile
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
FROM node:18-bullseye-slim
|
2 |
-
|
3 |
-
RUN apt-get update && \
|
4 |
-
|
5 |
-
apt-get install -y git
|
6 |
-
|
7 |
-
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
8 |
-
|
9 |
-
WORKDIR /app
|
10 |
-
|
11 |
-
RUN npm install
|
12 |
-
|
13 |
-
COPY Dockerfile greeting.md* .env* ./
|
14 |
-
|
15 |
-
RUN npm run build
|
16 |
-
|
17 |
-
EXPOSE 7860
|
18 |
-
|
19 |
-
ENV NODE_ENV=production
|
20 |
-
|
21 |
-
CMD [ "npm", "start" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/pinecone.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import pinecone
|
2 |
-
from colorama import Fore, Style
|
3 |
-
|
4 |
-
from autogpt.llm_utils import create_embedding_with_ada
|
5 |
-
from autogpt.logs import logger
|
6 |
-
from autogpt.memory.base import MemoryProviderSingleton
|
7 |
-
|
8 |
-
|
9 |
-
class PineconeMemory(MemoryProviderSingleton):
|
10 |
-
def __init__(self, cfg):
|
11 |
-
pinecone_api_key = cfg.pinecone_api_key
|
12 |
-
pinecone_region = cfg.pinecone_region
|
13 |
-
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
14 |
-
dimension = 1536
|
15 |
-
metric = "cosine"
|
16 |
-
pod_type = "p1"
|
17 |
-
table_name = "auto-gpt"
|
18 |
-
# this assumes we don't start with memory.
|
19 |
-
# for now this works.
|
20 |
-
# we'll need a more complicated and robust system if we want to start with
|
21 |
-
# memory.
|
22 |
-
self.vec_num = 0
|
23 |
-
|
24 |
-
try:
|
25 |
-
pinecone.whoami()
|
26 |
-
except Exception as e:
|
27 |
-
logger.typewriter_log(
|
28 |
-
"FAILED TO CONNECT TO PINECONE",
|
29 |
-
Fore.RED,
|
30 |
-
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
31 |
-
)
|
32 |
-
logger.double_check(
|
33 |
-
"Please ensure you have setup and configured Pinecone properly for use."
|
34 |
-
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
|
35 |
-
"https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup"
|
36 |
-
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
|
37 |
-
)
|
38 |
-
exit(1)
|
39 |
-
|
40 |
-
if table_name not in pinecone.list_indexes():
|
41 |
-
pinecone.create_index(
|
42 |
-
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
43 |
-
)
|
44 |
-
self.index = pinecone.Index(table_name)
|
45 |
-
|
46 |
-
def add(self, data):
|
47 |
-
vector = create_embedding_with_ada(data)
|
48 |
-
# no metadata here. We may wish to change that long term.
|
49 |
-
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
50 |
-
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
51 |
-
self.vec_num += 1
|
52 |
-
return _text
|
53 |
-
|
54 |
-
def get(self, data):
|
55 |
-
return self.get_relevant(data, 1)
|
56 |
-
|
57 |
-
def clear(self):
|
58 |
-
self.index.delete(deleteAll=True)
|
59 |
-
return "Obliviated"
|
60 |
-
|
61 |
-
def get_relevant(self, data, num_relevant=5):
|
62 |
-
"""
|
63 |
-
Returns all the data in the memory that is relevant to the given data.
|
64 |
-
:param data: The data to compare to.
|
65 |
-
:param num_relevant: The number of relevant data to return. Defaults to 5
|
66 |
-
"""
|
67 |
-
query_embedding = create_embedding_with_ada(data)
|
68 |
-
results = self.index.query(
|
69 |
-
query_embedding, top_k=num_relevant, include_metadata=True
|
70 |
-
)
|
71 |
-
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
72 |
-
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
|
73 |
-
|
74 |
-
def get_stats(self):
|
75 |
-
return self.index.describe_index_stats()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|