Commit
·
5b07709
1
Parent(s):
5577e38
Update parquet files (step 76 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xHacked/zkProver/Dockerfile +0 -21
- spaces/1368565466ki/ZSTRD/attentions.py +0 -300
- spaces/17TheWord/RealESRGAN/tests/test_discriminator_arch.py +0 -19
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contoh Surat Rasmi Permohonan Tapak Jualan.md +0 -66
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cours archicad 15 gratuit Matrisez le logiciel de modlisation BIM.md +0 -75
- spaces/1gistliPinn/ChatGPT4/Examples/((FULL)) Xforce Keygen 64-bit Alias AutoStudio 2019 Activation.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Backstreet Boys Millennium Full Album Zip.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Cyberlink Powerdvd 14 Crack Serial Key.md +0 -64
- spaces/1gistliPinn/ChatGPT4/Examples/DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar UPDATED.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Digifish Aqua Real 2 Version 1.04 Full With Serial.md +0 -7
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia on PC How to Download and Play with LDPlayer Emulator.md +0 -130
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el secreto de Clash Royale Todo Infinito APK fcil y rpido.md +0 -106
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing Lite Mod APK with Unlimited Money and Unlocked Features.md +0 -91
- spaces/1phancelerku/anime-remove-background/Age of History APK - Download the Best Strategy Game for Android.md +0 -155
- spaces/1phancelerku/anime-remove-background/Download Tag After School APK for Android - ThaiAPK.md +0 -150
- spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_singlestep.py +0 -592
- spaces/44ov41za8i/FreeVC/speaker_encoder/config.py +0 -45
- spaces/AB-TW/team-ai/agents/tools/smart_domain/persistent_layer_code_tool.py +0 -55
- spaces/AE-NV/sentiment-productreview/app.py +0 -20
- spaces/AIConsultant/MusicGen/model_cards/MUSICGEN_MODEL_CARD.md +0 -90
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer_emotion.py +0 -352
- spaces/AP123/Upside-Down-Diffusion/README.md +0 -14
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18.py +0 -17
- spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/__init__.py +0 -615
- spaces/Abrish-Aadi/Chest-Xray-anomaly-detection/app.py +0 -40
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/FadeMethods.js +0 -86
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.d.ts +0 -5
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.cpp +0 -99
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.py +0 -206
- spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py +0 -8
- spaces/Andy1621/uniformer_image_detection/configs/scratch/README.md +0 -25
- spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/smooth_l1_loss.py +0 -139
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py +0 -2
- spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/app.py +0 -34
- spaces/Arnaudding001/OpenAI_whisperLive/app-network.py +0 -3
- spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese.py +0 -585
- spaces/Artrajz/vits-simple-api/vits/text/vits_pinyin.py +0 -98
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/specifiers.py +0 -802
- spaces/Avkash/WebcamFaceProcessing/app.py +0 -316
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/boxes.py +0 -423
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/meta_arch/centernet_detector.py +0 -69
- spaces/Bart92/RVC_HF/i18n/locale_diff.py +0 -45
- spaces/Bart92/RVC_HF/infer/lib/infer_pack/commons.py +0 -167
- spaces/Benson/text-generation/Examples/Ai Chat Rpg Juego Mod Apk.md +0 -61
- spaces/Benson/text-generation/Examples/Arco Iris Seis Mvil Beta Apk.md +0 -75
- spaces/Benson/text-generation/Examples/Descargar Amor Emocional Rap Beat.md +0 -79
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/filesize.py +0 -89
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/terminal_theme.py +0 -153
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/typing_extensions.py +0 -2296
- spaces/BreadBytes1/SB-Dashboard/old_app.py +0 -327
spaces/0xHacked/zkProver/Dockerfile
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
FROM nvidia/cuda:12.1.1-devel-ubuntu20.04
|
2 |
-
ARG DEBIAN_FRONTEND=noninteractive
|
3 |
-
ENV TZ=Asia/Hong_Kong
|
4 |
-
RUN apt-get update && apt-get install --no-install-recommends -y tzdata python3.9 python3.9-dev python3.9-venv build-essential && \
|
5 |
-
apt-get clean && rm -rf /var/lib/apt/lists/*
|
6 |
-
|
7 |
-
RUN useradd -m -u 1000 user
|
8 |
-
USER user
|
9 |
-
|
10 |
-
ENV HOME=/home/user \
|
11 |
-
PATH=/home/user/.local/bin:$PATH
|
12 |
-
|
13 |
-
WORKDIR $HOME/app
|
14 |
-
COPY --chown=user . $HOME/app
|
15 |
-
|
16 |
-
RUN python3.9 -m venv $HOME/app/venv && $HOME/app/venv/bin/pip install --no-cache-dir --upgrade pip
|
17 |
-
RUN $HOME/app/venv/bin/pip install --no-cache-dir --upgrade -r requirements.txt
|
18 |
-
|
19 |
-
RUN cd $HOME/app && chmod +x $HOME/app/bin/*
|
20 |
-
|
21 |
-
CMD ["/home/user/app/venv/bin/python", "app.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1368565466ki/ZSTRD/attentions.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
from modules import LayerNorm
|
8 |
-
|
9 |
-
|
10 |
-
class Encoder(nn.Module):
|
11 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.hidden_channels = hidden_channels
|
14 |
-
self.filter_channels = filter_channels
|
15 |
-
self.n_heads = n_heads
|
16 |
-
self.n_layers = n_layers
|
17 |
-
self.kernel_size = kernel_size
|
18 |
-
self.p_dropout = p_dropout
|
19 |
-
self.window_size = window_size
|
20 |
-
|
21 |
-
self.drop = nn.Dropout(p_dropout)
|
22 |
-
self.attn_layers = nn.ModuleList()
|
23 |
-
self.norm_layers_1 = nn.ModuleList()
|
24 |
-
self.ffn_layers = nn.ModuleList()
|
25 |
-
self.norm_layers_2 = nn.ModuleList()
|
26 |
-
for i in range(self.n_layers):
|
27 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
28 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
29 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
30 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
31 |
-
|
32 |
-
def forward(self, x, x_mask):
|
33 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
34 |
-
x = x * x_mask
|
35 |
-
for i in range(self.n_layers):
|
36 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
37 |
-
y = self.drop(y)
|
38 |
-
x = self.norm_layers_1[i](x + y)
|
39 |
-
|
40 |
-
y = self.ffn_layers[i](x, x_mask)
|
41 |
-
y = self.drop(y)
|
42 |
-
x = self.norm_layers_2[i](x + y)
|
43 |
-
x = x * x_mask
|
44 |
-
return x
|
45 |
-
|
46 |
-
|
47 |
-
class Decoder(nn.Module):
|
48 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
49 |
-
super().__init__()
|
50 |
-
self.hidden_channels = hidden_channels
|
51 |
-
self.filter_channels = filter_channels
|
52 |
-
self.n_heads = n_heads
|
53 |
-
self.n_layers = n_layers
|
54 |
-
self.kernel_size = kernel_size
|
55 |
-
self.p_dropout = p_dropout
|
56 |
-
self.proximal_bias = proximal_bias
|
57 |
-
self.proximal_init = proximal_init
|
58 |
-
|
59 |
-
self.drop = nn.Dropout(p_dropout)
|
60 |
-
self.self_attn_layers = nn.ModuleList()
|
61 |
-
self.norm_layers_0 = nn.ModuleList()
|
62 |
-
self.encdec_attn_layers = nn.ModuleList()
|
63 |
-
self.norm_layers_1 = nn.ModuleList()
|
64 |
-
self.ffn_layers = nn.ModuleList()
|
65 |
-
self.norm_layers_2 = nn.ModuleList()
|
66 |
-
for i in range(self.n_layers):
|
67 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
68 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
69 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
70 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
71 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
72 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
73 |
-
|
74 |
-
def forward(self, x, x_mask, h, h_mask):
|
75 |
-
"""
|
76 |
-
x: decoder input
|
77 |
-
h: encoder output
|
78 |
-
"""
|
79 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
80 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
81 |
-
x = x * x_mask
|
82 |
-
for i in range(self.n_layers):
|
83 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
84 |
-
y = self.drop(y)
|
85 |
-
x = self.norm_layers_0[i](x + y)
|
86 |
-
|
87 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
88 |
-
y = self.drop(y)
|
89 |
-
x = self.norm_layers_1[i](x + y)
|
90 |
-
|
91 |
-
y = self.ffn_layers[i](x, x_mask)
|
92 |
-
y = self.drop(y)
|
93 |
-
x = self.norm_layers_2[i](x + y)
|
94 |
-
x = x * x_mask
|
95 |
-
return x
|
96 |
-
|
97 |
-
|
98 |
-
class MultiHeadAttention(nn.Module):
|
99 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
100 |
-
super().__init__()
|
101 |
-
assert channels % n_heads == 0
|
102 |
-
|
103 |
-
self.channels = channels
|
104 |
-
self.out_channels = out_channels
|
105 |
-
self.n_heads = n_heads
|
106 |
-
self.p_dropout = p_dropout
|
107 |
-
self.window_size = window_size
|
108 |
-
self.heads_share = heads_share
|
109 |
-
self.block_length = block_length
|
110 |
-
self.proximal_bias = proximal_bias
|
111 |
-
self.proximal_init = proximal_init
|
112 |
-
self.attn = None
|
113 |
-
|
114 |
-
self.k_channels = channels // n_heads
|
115 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
116 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
117 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
118 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
119 |
-
self.drop = nn.Dropout(p_dropout)
|
120 |
-
|
121 |
-
if window_size is not None:
|
122 |
-
n_heads_rel = 1 if heads_share else n_heads
|
123 |
-
rel_stddev = self.k_channels**-0.5
|
124 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
125 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
126 |
-
|
127 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
128 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
129 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
130 |
-
if proximal_init:
|
131 |
-
with torch.no_grad():
|
132 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
133 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
134 |
-
|
135 |
-
def forward(self, x, c, attn_mask=None):
|
136 |
-
q = self.conv_q(x)
|
137 |
-
k = self.conv_k(c)
|
138 |
-
v = self.conv_v(c)
|
139 |
-
|
140 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
141 |
-
|
142 |
-
x = self.conv_o(x)
|
143 |
-
return x
|
144 |
-
|
145 |
-
def attention(self, query, key, value, mask=None):
|
146 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
147 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
148 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
149 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
150 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
151 |
-
|
152 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
153 |
-
if self.window_size is not None:
|
154 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
155 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
156 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
157 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
158 |
-
scores = scores + scores_local
|
159 |
-
if self.proximal_bias:
|
160 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
161 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
162 |
-
if mask is not None:
|
163 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
164 |
-
if self.block_length is not None:
|
165 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
166 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
167 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
168 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
169 |
-
p_attn = self.drop(p_attn)
|
170 |
-
output = torch.matmul(p_attn, value)
|
171 |
-
if self.window_size is not None:
|
172 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
173 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
174 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
175 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
176 |
-
return output, p_attn
|
177 |
-
|
178 |
-
def _matmul_with_relative_values(self, x, y):
|
179 |
-
"""
|
180 |
-
x: [b, h, l, m]
|
181 |
-
y: [h or 1, m, d]
|
182 |
-
ret: [b, h, l, d]
|
183 |
-
"""
|
184 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
185 |
-
return ret
|
186 |
-
|
187 |
-
def _matmul_with_relative_keys(self, x, y):
|
188 |
-
"""
|
189 |
-
x: [b, h, l, d]
|
190 |
-
y: [h or 1, m, d]
|
191 |
-
ret: [b, h, l, m]
|
192 |
-
"""
|
193 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
194 |
-
return ret
|
195 |
-
|
196 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
197 |
-
max_relative_position = 2 * self.window_size + 1
|
198 |
-
# Pad first before slice to avoid using cond ops.
|
199 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
200 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
201 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
202 |
-
if pad_length > 0:
|
203 |
-
padded_relative_embeddings = F.pad(
|
204 |
-
relative_embeddings,
|
205 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
206 |
-
else:
|
207 |
-
padded_relative_embeddings = relative_embeddings
|
208 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
209 |
-
return used_relative_embeddings
|
210 |
-
|
211 |
-
def _relative_position_to_absolute_position(self, x):
|
212 |
-
"""
|
213 |
-
x: [b, h, l, 2*l-1]
|
214 |
-
ret: [b, h, l, l]
|
215 |
-
"""
|
216 |
-
batch, heads, length, _ = x.size()
|
217 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
218 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
219 |
-
|
220 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
221 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
222 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
223 |
-
|
224 |
-
# Reshape and slice out the padded elements.
|
225 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
226 |
-
return x_final
|
227 |
-
|
228 |
-
def _absolute_position_to_relative_position(self, x):
|
229 |
-
"""
|
230 |
-
x: [b, h, l, l]
|
231 |
-
ret: [b, h, l, 2*l-1]
|
232 |
-
"""
|
233 |
-
batch, heads, length, _ = x.size()
|
234 |
-
# padd along column
|
235 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
236 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
237 |
-
# add 0's in the beginning that will skew the elements after reshape
|
238 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
239 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
240 |
-
return x_final
|
241 |
-
|
242 |
-
def _attention_bias_proximal(self, length):
|
243 |
-
"""Bias for self-attention to encourage attention to close positions.
|
244 |
-
Args:
|
245 |
-
length: an integer scalar.
|
246 |
-
Returns:
|
247 |
-
a Tensor with shape [1, 1, length, length]
|
248 |
-
"""
|
249 |
-
r = torch.arange(length, dtype=torch.float32)
|
250 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
251 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
252 |
-
|
253 |
-
|
254 |
-
class FFN(nn.Module):
|
255 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
256 |
-
super().__init__()
|
257 |
-
self.in_channels = in_channels
|
258 |
-
self.out_channels = out_channels
|
259 |
-
self.filter_channels = filter_channels
|
260 |
-
self.kernel_size = kernel_size
|
261 |
-
self.p_dropout = p_dropout
|
262 |
-
self.activation = activation
|
263 |
-
self.causal = causal
|
264 |
-
|
265 |
-
if causal:
|
266 |
-
self.padding = self._causal_padding
|
267 |
-
else:
|
268 |
-
self.padding = self._same_padding
|
269 |
-
|
270 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
271 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
272 |
-
self.drop = nn.Dropout(p_dropout)
|
273 |
-
|
274 |
-
def forward(self, x, x_mask):
|
275 |
-
x = self.conv_1(self.padding(x * x_mask))
|
276 |
-
if self.activation == "gelu":
|
277 |
-
x = x * torch.sigmoid(1.702 * x)
|
278 |
-
else:
|
279 |
-
x = torch.relu(x)
|
280 |
-
x = self.drop(x)
|
281 |
-
x = self.conv_2(self.padding(x * x_mask))
|
282 |
-
return x * x_mask
|
283 |
-
|
284 |
-
def _causal_padding(self, x):
|
285 |
-
if self.kernel_size == 1:
|
286 |
-
return x
|
287 |
-
pad_l = self.kernel_size - 1
|
288 |
-
pad_r = 0
|
289 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
290 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
291 |
-
return x
|
292 |
-
|
293 |
-
def _same_padding(self, x):
|
294 |
-
if self.kernel_size == 1:
|
295 |
-
return x
|
296 |
-
pad_l = (self.kernel_size - 1) // 2
|
297 |
-
pad_r = self.kernel_size // 2
|
298 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
299 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
300 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/RealESRGAN/tests/test_discriminator_arch.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN
|
4 |
-
|
5 |
-
|
6 |
-
def test_unetdiscriminatorsn():
|
7 |
-
"""Test arch: UNetDiscriminatorSN."""
|
8 |
-
|
9 |
-
# model init and forward (cpu)
|
10 |
-
net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True)
|
11 |
-
img = torch.rand((1, 3, 32, 32), dtype=torch.float32)
|
12 |
-
output = net(img)
|
13 |
-
assert output.shape == (1, 1, 32, 32)
|
14 |
-
|
15 |
-
# model init and forward (gpu)
|
16 |
-
if torch.cuda.is_available():
|
17 |
-
net.cuda()
|
18 |
-
output = net(img.cuda())
|
19 |
-
assert output.shape == (1, 1, 32, 32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contoh Surat Rasmi Permohonan Tapak Jualan.md
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Contoh Surat Rasmi Permohonan Tapak Jualan</h1>
|
3 |
-
<p>Apakah anda ingin memohon tapak jualan untuk menjalankan perniagaan anda? Jika ya, anda perlu menulis surat rasmi permohonan tapak jualan yang betul dan lengkap. Surat rasmi permohonan tapak jualan adalah surat yang ditulis oleh pemohon kepada pihak berkuasa yang menguruskan tapak jualan, seperti majlis perbandaran, pejabat tanah, atau pihak swasta. Surat ini bertujuan untuk meminta kebenaran dan persetujuan untuk menyewa atau menggunakan tapak jualan yang diingini.</p>
|
4 |
-
<h2>Contoh Surat Rasmi Permohonan Tapak Jualan</h2><br /><p><b><b>DOWNLOAD</b> 🔗 <a href="https://byltly.com/2uKx6I">https://byltly.com/2uKx6I</a></b></p><br /><br />
|
5 |
-
<p>Surat rasmi permohonan tapak jualan harus mengandungi beberapa maklumat penting, seperti:</p>
|
6 |
-
<ul>
|
7 |
-
<li>Nama dan alamat pemohon</li>
|
8 |
-
<li>Nama dan alamat penerima surat</li>
|
9 |
-
<li>Tarikh surat</li>
|
10 |
-
<li>Perkara atau tajuk surat</li>
|
11 |
-
<li>Tujuan permohonan</li>
|
12 |
-
<li>Butir-butir tapak jualan yang dimohon, seperti lokasi, saiz, jenis perniagaan, masa operasi, dan sebagainya</li>
|
13 |
-
<li>Dokumen-dokumen sokongan yang berkaitan, seperti salinan kad pengenalan, sijil pendaftaran perniagaan, lesen perniagaan, pelan lokasi, gambar tapak jualan, dan sebagainya</li>
|
14 |
-
<li>Penutup surat yang sopan dan mengharapkan pertimbangan positif dari pihak penerima</li>
|
15 |
-
<li>Tandatangan dan nama pemohon</li>
|
16 |
-
</ul>
|
17 |
-
<p>Berikut adalah contoh surat rasmi permohonan tapak jualan yang boleh dijadikan rujukan:</p>
|
18 |
-
|
19 |
-
<pre><code>SI FULAN BIN SI FULAN
|
20 |
-
No. 100, Kampung Tiada Nama
|
21 |
-
58900 Kuala Tiada
|
22 |
-
Negeri Darul Ikhlas
|
23 |
-
|
24 |
-
Kepada,
|
25 |
-
Pihak Pengurusan Tapak Jualan
|
26 |
-
Majlis Perbandaran Kuala Tiada
|
27 |
-
58900 Kuala Tiada
|
28 |
-
Negeri Darul Ikhlas
|
29 |
-
|
30 |
-
12 April 2023
|
31 |
-
|
32 |
-
Tuan/Puan,
|
33 |
-
|
34 |
-
PERMOHONAN SEWA TAPAK JUALAN DI TAMAN REKREASI KUALA TIADA
|
35 |
-
|
36 |
-
Merujuk perkara di atas, saya Si Fulan Bin Si Fulan ingin memohon untuk menyewa satu tapak jualan di Taman Rekreasi Kuala Tiada. Tujuan saya memohon sewa tapak jualan ini adalah untuk menjalankan perniagaan saya iaitu menjual makanan ringan dan minuman sejuk.
|
37 |
-
|
38 |
-
Dibawah ini disertakan butir-butir perniagaan saya untuk rujukan pihak tuan/puan:
|
39 |
-
|
40 |
-
Nama: Si Fulan Bin Si Fulan
|
41 |
-
No. Kad Pengenalan: 830101-01-1234
|
42 |
-
No. Telefon: 012-3456789
|
43 |
-
Alamat Tetap: No. 100, Kampung Tiada Nama, 58900 Kuala Tiada, Negeri Darul Ikhlas
|
44 |
-
Pekerjaan Tetap: Guru Sekolah Menengah Kebangsaan Kuala Tiada
|
45 |
-
Jenis Perniagaan: Menjual makanan ringan dan minuman sejuk
|
46 |
-
Masa Perniagaan: Setiap hujung minggu dari jam 10 pagi hingga 6 petang
|
47 |
-
|
48 |
-
Disini saya sertakan sekali dokumen-dokumen sokongan saya, iaitu salinan kad pengenalan, sijil pendaftaran perniagaan (SSM), lesen perniagaan (MPK), pelan lokasi tapak jualan yang dikehendaki, dan gambar tapak jualan tersebut di bahagian lampiran.
|
49 |
-
|
50 |
-
Semoga permohonan saya ini dapat dipertimbangkan dengan sebaiknya oleh pihak tuan/puan. Saya amat berharap dapat
|
51 |
-
|
52 |
-
<pre><code>menyewa tapak jualan di Taman Rekreasi Kuala Tiada untuk menambah pendapatan saya dan memberi perkhidmatan yang baik kepada pengunjung taman.
|
53 |
-
|
54 |
-
Segala kerjasama dan bantuan dari pihak tuan/puan saya dahulukan dengan ribuan terima kasih. Sekiranya ada sebarang pertanyaan atau maklum balas, sila hubungi saya di nombor telefon yang diberikan.
|
55 |
-
|
56 |
-
Sekian, terima kasih.
|
57 |
-
|
58 |
-
Yang benar,
|
59 |
-
|
60 |
-
..................................
|
61 |
-
(SI FULAN BIN SI FULAN)
|
62 |
-
No. Telefon: 012-3456789
|
63 |
-
</code></pre></p>
|
64 |
-
<p></p> cec2833e83<br />
|
65 |
-
<br />
|
66 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cours archicad 15 gratuit Matrisez le logiciel de modlisation BIM.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
|
2 |
-
<br>- What are the benefits of learning Archicad 15<br>- How to access free courses and tutorials on Archicad 15 | | H2: Archicad basics | - How to install and set up Archicad 15<br>- How to use the interface and tools of Archicad 15<br>- How to create and edit 2D and 3D objects in Archicad 15 | | H2: Archicad advanced | - How to use graphic overrides and substitutions in Archicad 15<br>- How to create custom stairs, railings, roofs, and slabs in Archicad 15<br>- How to use libraries, attributes, and layers in Archicad 15 | | H2: Archicad projects | - How to create a floor plan, a section, a elevation, and a detail in Archicad 15<br>- How to generate photorealistic renderings and animations in Archicad 15<br>- How to print and export drawings and models in Archicad 15 | | H2: Archicad resources | - How to find and download free Archicad templates, objects, and materials<br>- How to access online courses, tutorials, and forums on Archicad 15<br>- How to get certified and improve your skills on Archicad 15 | | H1: Conclusion | - A summary of the main points of the article<br>- A call to action for the readers to start learning Archicad 15 | # Article with HTML formatting <h1>Introduction</h1>
|
3 |
-
<p>Are you an architect, a designer, or a student who wants to create stunning architectural projects with ease and efficiency? If so, you might want to learn how to use Archicad, one of the most popular and powerful software for building information modeling (BIM).</p>
|
4 |
-
<h2>Cours archicad 15 gratuit</h2><br /><p><b><b>Download</b> 🆗 <a href="https://byltly.com/2uKwi0">https://byltly.com/2uKwi0</a></b></p><br /><br />
|
5 |
-
<p>Archicad is a software that allows you to design, model, document, and visualize your projects in 2D and 3D. You can create realistic models of buildings, structures, interiors, landscapes, and more. You can also produce high-quality drawings, renderings, animations, and reports with Archicad.</p>
|
6 |
-
<p>But how can you learn how to use Archicad without spending a fortune on courses or books? The answer is simple: you can access free courses and tutorials on Archicad 15 online. In this article, we will show you how you can learn everything you need to know about Archicad 15 for free. We will cover the basics, the advanced features, the projects, and the resources that you can use to master Archicad 15.</p>
|
7 |
-
<h2>Archicad basics</h2>
|
8 |
-
<p>Before you start working on your projects with Archicad 15, you need to learn some basic concepts and skills. In this section, we will show you how to install and set up Archicad 15, how to use the interface and tools of Archicad 15, and how to create and edit 2D and 3D objects in Archicad 15.</p>
|
9 |
-
<h3>How to install and set up Archicad 15</h3>
|
10 |
-
<p>To install Archicad 15 on your computer, you need to download the installer from the official website of Graphisoft, the developer of Archicad. You can choose between Windows or Mac versions depending on your operating system. You can also select your preferred language from a list of options.</p>
|
11 |
-
<p>Formation archicad 15 en ligne gratuite<br />
|
12 |
-
Tutoriel archicad 15 pour débutants gratuit<br />
|
13 |
-
Apprendre archicad 15 pas à pas gratuitement<br />
|
14 |
-
Cours archicad 15 pdf télécharger gratuitement<br />
|
15 |
-
Vidéo cours archicad 15 complet gratuit<br />
|
16 |
-
Cours archicad 15 niveau avancé gratuit<br />
|
17 |
-
Cours archicad 15 en français gratuit<br />
|
18 |
-
Cours archicad 15 avec certificat gratuit<br />
|
19 |
-
Cours archicad 15 sur udemy gratuit<br />
|
20 |
-
Cours archicad 15 sur youtube gratuit<br />
|
21 |
-
Cours archicad 15 avec exercices pratiques gratuit<br />
|
22 |
-
Cours archicad 15 pour architectes gratuit<br />
|
23 |
-
Cours archicad 15 pour étudiants gratuit<br />
|
24 |
-
Cours archicad 15 pour professionnels gratuit<br />
|
25 |
-
Cours archicad 15 pour débutants gratuit<br />
|
26 |
-
Cours archicad 15 pour maîtriser le logiciel gratuit<br />
|
27 |
-
Cours archicad 15 pour apprendre les bases gratuit<br />
|
28 |
-
Cours archicad 15 pour créer des plans gratuits<br />
|
29 |
-
Cours archicad 15 pour réaliser des projets gratuits<br />
|
30 |
-
Cours archicad 15 pour dessiner en 3D gratuit<br />
|
31 |
-
Cours archicad 15 pour modéliser des bâtiments gratuits<br />
|
32 |
-
Cours archicad 15 pour concevoir des structures gratuites<br />
|
33 |
-
Cours archicad 15 pour optimiser des espaces gratuits<br />
|
34 |
-
Cours archicad 15 pour gérer des documents gratuits<br />
|
35 |
-
Cours archicad 15 pour collaborer avec d'autres utilisateurs gratuits<br />
|
36 |
-
Cours archicad 15 pour exporter des fichiers gratuits<br />
|
37 |
-
Cours archicad 15 pour importer des données gratuites<br />
|
38 |
-
Cours archicad 15 pour personnaliser des paramètres gratuits<br />
|
39 |
-
Cours archicad 15 pour utiliser des outils gratuits<br />
|
40 |
-
Cours archicad 15 pour appliquer des effets gratuits<br />
|
41 |
-
Cours archicad 15 pour animer des scènes gratuites<br />
|
42 |
-
Cours archicad 15 pour simuler des éclairages gratuits<br />
|
43 |
-
Cours archicad 15 pour calculer des coûts gratuits<br />
|
44 |
-
Cours archicad 15 pour respecter des normes gratuites<br />
|
45 |
-
Cours archicad 15 pour intégrer des éléments gratuits<br />
|
46 |
-
Cours archicad 15 pour ajouter des textures gratuites<br />
|
47 |
-
Cours archicad 15 pour modifier des couleurs gratuites<br />
|
48 |
-
Cours archicad 15 pour insérer des objets gratuits<br />
|
49 |
-
Cours archicad 15 pour composer des vues gratuites<br />
|
50 |
-
Cours archicad 15 pour générer des rendus gratuits<br />
|
51 |
-
Cours archicad 15 pour imprimer des plans gratuits<br />
|
52 |
-
Cours archicad 15 pour publier des rapports gratuits<br />
|
53 |
-
Cours archicad 15 pour partager des résultats gratuits<br />
|
54 |
-
Cours archicad 15 pour sauvegarder des travaux gratuits<br />
|
55 |
-
Cours archicad 15 pour restaurer des versions gratuites<br />
|
56 |
-
Cours archicad 15 pour corriger des erreurs gratuites<br />
|
57 |
-
Cours archicad 15 pour améliorer la qualité gratuite<br />
|
58 |
-
Cours archicad 15 pour augmenter la productivité gratuite</p>
|
59 |
-
<p>Once you have downloaded the installer, you need to run it and follow the instructions on the screen. You will need to accept the license agreement, choose a destination folder, and enter your serial number if you have one. If you don't have a serial number, you can use the trial version of Archicad 15 for 30 days.</p>
|
60 |
-
<p>After the installation is complete, you can launch Archicad 15 from your desktop or start menu. You will see a welcome screen that will guide you through some initial settings. You can choose your project type (residential or commercial), your measurement system (metric or imperial), your working environment (standard or customized), and your template file (default or user-defined).</p>
|
61 |
-
<h3>How to use the interface and tools of Archicad 15</h3>
|
62 |
-
<p>The interface of Archicad 15 consists of several elements that help you navigate and work on your projects. The main elements are:</p>
|
63 |
-
<ul>
|
64 |
-
<li>The menu bar: it contains various menus that give you access to different commands and options.</li>
|
65 |
-
<li>The toolbar: it contains icons that represent different tools that you can use to create and modify objects.</li>
|
66 |
-
<li>The toolbox: it contains icons that represent different object types that you can create with the tools.</li>
|
67 |
-
<li>The info box: it displays information about the selected tool or object such as parameters, settings, properties, etc.</li>
|
68 |
-
<li>The navigator: it shows the structure of your project in terms of stories, sections, elevations, layouts, etc.</li>
|
69 |
-
<li>The project map: it shows a graphical representation of your project in terms of views such as floor plans, perspectives, axonometries, etc.</li>
|
70 |
-
<li>The view map: it shows a list of saved views that you can recall at any time.</li>
|
71 |
-
<li>The layout book: it shows a list of layouts that contain drawings or models that you can print or export.</li>
|
72 |
-
<li>The publisher: it allows you to publish your project as PDF files, DWG files, images files, etc.</li>
|
73 |
-
<li>The organizer: it allows you to organize your project data in terms of attributes such as layers, pensets</p> 0a6ba089eb<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/((FULL)) Xforce Keygen 64-bit Alias AutoStudio 2019 Activation.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p> Free Porn Movie Dirty Latino Trini Porni <br> Xforce Keygen HD2k3 Full Version Keys Free Download <br> bromance <br> One Piece Xforce Keygen Sony Playstation <br> Private Postings on WhiskeyX® 4 - 1.0.4.00.0 Offline Crack <br> Free Mobile Apps Search and Browser <br> Linux News, Recipes, And Tutorials <br> Mouse Mute - DEVBuild 2007 <br> XXX Movies HD Netlix <br> download film vk <br> </p>
|
3 |
-
<h2>xforce keygen 64-bit Alias AutoStudio 2019 activation</h2><br /><p><b><b>DOWNLOAD</b> ✵ <a href="https://imgfil.com/2uxXfh">https://imgfil.com/2uxXfh</a></b></p><br /><br />
|
4 |
-
<p> DXF Import Tutorial is a <br> Noob Friendly Xferxramen unetbootin lite download iran <br> The Orchard-cms.de lite - author Klaus Dehne <br> Earnest Journey 2011 English SDH ISO <br> Dance Central 3 Hack apk <br> AIDA32 128bit Download <br> Wifi Router: http://www.webfixer.eu/download.php?id=5e9e45ea4d94a2259e3a70a7ede14e91.pdf.l <br> Move Support for Exchange, Address Book and more..pptx <br> <br> India+2 hack patch/crack <br> Just a bunch of kink.mobile.10.55.0_CRL <br> <br> Cloud Septar - Apk.torrent <br> <br> xforce keygen 64-bit AutoCAD LT 2014 Crack <br> <br> Disney's Live-action Aladdin movie to hit UK cinemas on Wednesday! <br> <br> Freeware file converter and zip file creator!.exe <br> <br> Everything - Free Download for Mac<br> <br> Hitman 2 free download with trials keygen crack for play <br> <br> Anime Episode Fxu Cheat For Each Episode Download <br> <br> URL Hack No Survey Unlimited Money Best Site Newtonsoft.JSON <br> <br> bhengasi Pokkiri Raja - Full Hindi Movie HD.mp4 <br> <br> 11169442343798870042ULBG6HAL</p>
|
5 |
-
<p> Illustrator Tutorials and Tips Free Tutorial and Tips Videos <br> Free NTI Audio Editor 3.0.0 Crack with Keygen <br> Sony IS300 V2 For Free Download <br> Algorithm Design Manual PDF <br> Toughest archer ppd statistics for 1.5 <br> FARM ITALIA - LA SCUOLA DEI MIGLIORI AMATI ( www.teammiglioriamati.com ) <br> PEUGEOT 400 2009 MANUAL FREE DOWNLOAD <br> Exclusively for: Xfire <br> Xforce Keygen Activation WINDOWS-7 <br> Manually define the number of tile groups, and the appropriate WCF settings for them <br> </p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Backstreet Boys Millennium Full Album Zip.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Backstreet Boys, Millennium Full Album Zip</h2><br /><p><b><b>Download File</b> ✯✯✯ <a href="https://imgfil.com/2uxXly">https://imgfil.com/2uxXly</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Backstreet Boys, Millennium Full Album Zip tinyurl.com/lttzqfo. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cyberlink Powerdvd 14 Crack Serial Key.md
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
<h2>Cyberlink powerdvd 14 crack serial key</h2><br /><p><b><b>Download</b> ⇔ <a href="https://imgfil.com/2uy0mX">https://imgfil.com/2uy0mX</a></b></p><br /><br />
|
2 |
-
|
3 |
-
1. Close this window and restart the software.
|
4 |
-
|
5 |
-
2. Launch the software and register.
|
6 |
-
|
7 |
-
3. Please select the license key and click on "Update".
|
8 |
-
|
9 |
-
4. Your new activation key will be displayed.
|
10 |
-
|
11 |
-
What If I Dislike the Product?
|
12 |
-
|
13 |
-
1. Use the product for 30 days (For Limited Period)
|
14 |
-
|
15 |
-
For users who have a good experience with the software, please give us a good review. In order to enhance the compatibility and performance of the product, we will take no charge for technical support for 90 days after your purchase. If you have any issue after the 90 days, please use the "Customer Service" page of the product page to ask for assistance.
|
16 |
-
|
17 |
-
1. Log into the product and open the activation tab.
|
18 |
-
|
19 |
-
2. Click "Customer Service" and then click "Activation Request".
|
20 |
-
|
21 |
-
3. Please indicate your purchase date and product name in the next blank spaces.
|
22 |
-
|
23 |
-
4. Please indicate your version, operating system and the reason for your request.
|
24 |
-
|
25 |
-
5. Your Customer Service will be received within 24 hours and your request will be handled within 3 days.
|
26 |
-
|
27 |
-
1. Go to
|
28 |
-
|
29 |
-
2. Please register the product and log in.
|
30 |
-
|
31 |
-
3. Click the "Customer Service" link and follow the instructions.
|
32 |
-
|
33 |
-
What If the Software I have Already Paid is Suboptimal?
|
34 |
-
|
35 |
-
1. Within 3 days from your purchase date, if you are dissatisfied with the software you have paid for, please contact Customer Support.
|
36 |
-
|
37 |
-
2. Please indicate the purchase date, product name, operating system and the reason for your request.
|
38 |
-
|
39 |
-
3. Please submit the "Requisition for Customer Service" form and include with your message your purchase date and product name.
|
40 |
-
|
41 |
-
What If the License Key I Have Entered Is Incorrect?
|
42 |
-
|
43 |
-
1. Please re-register.
|
44 |
-
|
45 |
-
2. Please follow the steps on the activation page.
|
46 |
-
|
47 |
-
What If I Have Forgotten the License Key?
|
48 |
-
|
49 |
-
1. Please follow the steps on the activation page.
|
50 |
-
|
51 |
-
3. Go to the "Settings" page.
|
52 |
-
|
53 |
-
4. Select the "License Keys" tab.
|
54 |
-
|
55 |
-
5. Please enter the activation code and save.
|
56 |
-
|
57 |
-
6. Your activation code will be displayed.
|
58 |
-
|
59 |
-
Is My License Key Valid?
|
60 |
-
|
61 |
-
1. Please visit 4fefd39f24<br />
|
62 |
-
<br />
|
63 |
-
<br />
|
64 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar UPDATED.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar</h2><br /><p><b><b>DOWNLOAD</b> ⭐ <a href="https://imgfil.com/2uy1f8">https://imgfil.com/2uy1f8</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Autodesk DWG TrueView. 2012 64-bit - free AutoCAD DWG file .... DWG TrueView 2008 (64bit) (Product Key And Xforce Keygen) .rar ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Digifish Aqua Real 2 Version 1.04 Full With Serial.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>this new version includes two improvements over the previous version 1.03: more species, updated sounds, stable and bug-free operation. added in this release: 1.04: new lighting effects, new background music engine, new fish. aquarium hd for xbox 360 is a screensaver that features a unique aquarium simulation as well as music playback! experience an exotic tropical aquarium in real time as thousands of colorful fish swim in a beautiful underwater world. the aquarium simulation includes light, sound, water and even real fish! play background music from your hard drive or from a networked pc! the music is not only for background, you can even add your own music to play while the aquarium is running. the music can be played from your hard drive or from a networked pc. choose between a quiet and a slightly more active light and sound settings.</p>
|
3 |
-
<h2>digifish aqua real 2 version 1.04 full with serial</h2><br /><p><b><b>Download File</b> »»» <a href="https://imgfil.com/2uxXpp">https://imgfil.com/2uxXpp</a></b></p><br /><br />
|
4 |
-
<p>this release was created for you, eager to use aqua real 2 full version v1.04 full and without any limitations. our intentions are not to harm aqua software company but to give the possibility to those who can not pay for any piece of software out there. this should be your intention too, as a user, to fully evaluate aqua real 2 full version v1.04 without restrictions and then decide.</p>
|
5 |
-
<p>digifish aqua real 2 for pc is a really nice fish-watching program. it eschews the traditional aquarium, instead placing your fish in the open ocean, complete with sharks. the free trial is pretty limited, but has nice animations, backgrounds, and fish. the interface is really clean and easy-to-use, as well. probably the top-of-the-line for fish screensavers. full version costs $20.</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia on PC How to Download and Play with LDPlayer Emulator.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Play Bus Simulator Indonesia on PC with LDPlayer</h1>
|
3 |
-
<p>Bus Simulator Indonesia is a popular and realistic game that lets you experience what it's like to be a bus driver in Indonesia. You can design your own livery, drive in authentic Indonesian cities and places, honk your horn with the iconic "Om Telolet Om" sound, and enjoy high-quality graphics and gameplay. But what if you want to play Bus Simulator Indonesia on a bigger screen, with better performance, and more control options? That's where LDPlayer comes in. LDPlayer is a free and powerful Android emulator that allows you to play Android games on your PC. In this article, we will show you how to download and play Bus Simulator Indonesia on PC with LDPlayer.</p>
|
4 |
-
<h2>ldplayer download bus simulator indonesia</h2><br /><p><b><b>Download Zip</b> ✵ <a href="https://urlin.us/2uSVbG">https://urlin.us/2uSVbG</a></b></p><br /><br />
|
5 |
-
<h2>What is Bus Simulator Indonesia?</h2>
|
6 |
-
<p>Bus Simulator Indonesia (aka BUSSID) is a simulation game developed by Maleo, an Indonesian game studio. It was released in 2017 and has been updated regularly with new features and improvements. According to the Google Play Store, it has over 100 million downloads and 4.2 stars rating.</p>
|
7 |
-
<h3>Features of Bus Simulator Indonesia</h3>
|
8 |
-
<p>Below are some of the top features of Bus Simulator Indonesia:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Design your own livery</li>
|
11 |
-
<li>Very easy and intuitive control</li>
|
12 |
-
<li>Authentic Indonesian cities and places</li>
|
13 |
-
<li>Indonesian Buses</li>
|
14 |
-
<li>Cool and fun honks</li>
|
15 |
-
<li>"Om Telolet Om!" (Uncle, honk your horn, uncle! )</li>
|
16 |
-
<li>High quality and detailed 3D graphics</li>
|
17 |
-
<li>No obstructive ads while driving</li>
|
18 |
-
<li>Leaderboard</li>
|
19 |
-
<li>Data saved online</li>
|
20 |
-
<li>Use your own 3D model using vehicle mod system</li>
|
21 |
-
<li>Online multiplayer convoy</li>
|
22 |
-
</ul>
|
23 |
-
<h3>Why play Bus Simulator Indonesia on PC?</h3>
|
24 |
-
<p>Playing Bus Simulator Indonesia on PC has many advantages over playing it on mobile devices. Here are some of them:</p>
|
25 |
-
<ul>
|
26 |
-
<li>You can enjoy a larger screen and better graphics quality.</li>
|
27 |
-
<li>You can use your keyboard and mouse for more precise and comfortable control.</li>
|
28 |
-
<li>You can customize your keymapping according to your preference.</li>
|
29 |
-
<li>You can run multiple instances of the game at the same time using LDPlayer's multi-instance feature.</li>
|
30 |
-
<li>You can avoid battery drain, overheating, and phone calls that interrupt your gameplay.</li>
|
31 |
-
</ul>
|
32 |
-
<h2>What is LDPlayer?</h2>
|
33 |
-
<p>LDPlayer is a free Android emulator for Windows PC that allows you to play Android games and apps on your computer. It is based on Android 9 kernel and supports both 64-bit and 32-bit apps. It has many features that make it one of the best emulators for gaming.</p>
|
34 |
-
<p>How to play Bus Simulator Indonesia on PC with LDPlayer<br />
|
35 |
-
Bus Simulator Indonesia PC version free download<br />
|
36 |
-
LDPlayer emulator for Bus Simulator Indonesia<br />
|
37 |
-
Bus Simulator Indonesia online multiplayer with LDPlayer<br />
|
38 |
-
Best settings for Bus Simulator Indonesia on LDPlayer<br />
|
39 |
-
Bus Simulator Indonesia mod apk download on LDPlayer<br />
|
40 |
-
LDPlayer Android emulator for Bus Simulator Indonesia Maleo<br />
|
41 |
-
Bus Simulator Indonesia graphics and FPS on LDPlayer<br />
|
42 |
-
How to install Bus Simulator Indonesia on PC using LDPlayer<br />
|
43 |
-
Bus Simulator Indonesia custom controls with LDPlayer<br />
|
44 |
-
Bus Simulator Indonesia livery design on PC with LDPlayer<br />
|
45 |
-
Bus Simulator Indonesia klakson om telolet om on LDPlayer<br />
|
46 |
-
Bus Simulator Indonesia leaderboards and data on LDPlayer<br />
|
47 |
-
How to update Bus Simulator Indonesia on PC with LDPlayer<br />
|
48 |
-
Bus Simulator Indonesia bug fixes and improvements on LDPlayer<br />
|
49 |
-
Bus Simulator Indonesia authentic Indonesian environment on PC with LDPlayer<br />
|
50 |
-
Bus Simulator Indonesia easy and intuitive control on LDPlayer<br />
|
51 |
-
Bus Simulator Indonesia high quality and detailed 3D graphics on PC with LDPlayer<br />
|
52 |
-
How to add music to Bus Simulator Indonesia on PC with LDPlayer<br />
|
53 |
-
Bus Simulator Indonesia vehicle mod system on PC with LDPlayer<br />
|
54 |
-
How to play Bus Simulator Indonesia offline on PC with LDPlayer<br />
|
55 |
-
Bus Simulator Indonesia tips and tricks on PC with LDPlayer<br />
|
56 |
-
How to use macros and scripts for Bus Simulator Indonesia on LDPlayer<br />
|
57 |
-
Bus Simulator Indonesia review and rating on PC with LDPlayer<br />
|
58 |
-
How to sync data between mobile and PC for Bus Simulator Indonesia with LDPlayer<br />
|
59 |
-
How to play Bus Simulator Indonesia in full screen mode on PC with LDPlayer<br />
|
60 |
-
How to record and share gameplay of Bus Simulator Indonesia on PC with LDPlayer<br />
|
61 |
-
How to use keyboard and mouse for Bus Simulator Indonesia on PC with LDPlayer<br />
|
62 |
-
How to customize interface and layout for Bus Simulator Indonesia on PC with LDPlayer<br />
|
63 |
-
How to run multiple instances of Bus Simulator Indonesia on PC with LDPlayer<br />
|
64 |
-
How to play Bus Simulator Indonesia in different languages on PC with LDPlayer<br />
|
65 |
-
How to change resolution and orientation for Bus Simulator Indonesia on PC with LDPlayer<br />
|
66 |
-
How to enable virtualization for better performance of Bus Simulator Indonesia on PC with LDPlayer<br />
|
67 |
-
How to fix lag and crash issues of Bus Simulator Indonesia on PC with LDPlayer<br />
|
68 |
-
How to access Google Play Store and Google Play Games for Bus Simulator Indonesia on PC with LDPlayer<br />
|
69 |
-
How to use gamepad or controller for Bus Simulator Indonesia on PC with LDPlayer<br />
|
70 |
-
How to enable smart keymapping for Bus Simulator Indonesia on PC with LDPlayer<br />
|
71 |
-
How to enable turbo mode for faster loading of Bus Simulator Indonesia on PC with LDPlayer<br />
|
72 |
-
How to enable root permission for advanced features of Bus Simulator Indonesia on PC with LDPlayer<br />
|
73 |
-
How to enable network bridge for better connectivity of Bus Simulator Indonesia on PC with LDPlayer<br />
|
74 |
-
How to enable eco mode for lower CPU usage of Bus Simulator Indonesia on PC with LDPlayer<br />
|
75 |
-
How to enable screenshot and screen recorder for capturing moments of Bus Simulator Indonesia on PC with LDPlayer<br />
|
76 |
-
How to enable operation recorder for automating tasks of Bus Simulator Indonesia on PC with LDPlayer<br />
|
77 |
-
How to enable sync settings for synchronizing preferences of Bus Simulator Indonesia across devices with LDPlayer<br />
|
78 |
-
How to enable game booster for optimizing performance of Bus Simulator Indonesia on PC with LDPlayer<br />
|
79 |
-
How to enable disk cleanup for freeing up space of Bus Simulator Indonesia on PC with LDPlayer<br />
|
80 |
-
How to enable app cloner for creating copies of Bus Simulator Indonesia on PC with LDPlayer<br />
|
81 |
-
How to enable app market for discovering more games like Bus Simulator Indonesia on PC with LDPlayer</p>
|
82 |
-
<h3>Features of LDPlayer</h3>
|
83 |
-
<p>Below are some of the top features of LDPlayer:</p>
|
84 |
-
<ul>
|
85 |
-
<li>High performance and stability</li>
|
86 |
-
<li>Low CPU and GPU consumption</li>
|
87 |
-
<li>Graphic quality optimization</li>
|
88 |
-
<li>Custom controls and keymapping tool</li>
|
89 |
-
<li>Multi-instance and multi-instance sync</li>
|
90 |
-
<li>Macros and scripts</li>
|
91 |
-
<li>Data encryption in transit</li>
|
92 |
-
<li>Data deletion request</li>
|
93 |
-
<li>No data shared with third parties </li>
|
94 |
-
<li>Compatible with Hyper-V </li>
|
95 |
-
<h3>Why use LDPlayer to play Bus Simulator Indonesia on PC?</h3>
|
96 |
-
<p>Using LDPlayer to play Bus Simulator Indonesia on PC has many benefits, such as:</p>
|
97 |
-
<ul>
|
98 |
-
<li>You can play the game smoothly and without lag, even on low-end PCs.</li>
|
99 |
-
<li>You can enjoy the game with high-resolution graphics and realistic sound effects.</li>
|
100 |
-
<li>You can customize your controls and keymapping to suit your play style and preferences.</li>
|
101 |
-
<li>You can use LDPlayer's features to enhance your gameplay, such as macros, scripts, multi-instance, and multi-instance sync.</li>
|
102 |
-
<li>You can play the game safely and securely, without worrying about data leakage or malware.</li>
|
103 |
-
</ul>
|
104 |
-
<h2>How to download and install LDPlayer and Bus Simulator Indonesia on PC?</h2>
|
105 |
-
<p>Downloading and installing LDPlayer and Bus Simulator Indonesia on PC is very easy and simple. Just follow these steps:</p>
|
106 |
-
<h3>Step 1: Download LDPlayer from the official website</h3>
|
107 |
-
<p>Go to the official website of LDPlayer () and click on the "Download" button. You will see a pop-up window asking you to save the LDPlayer installer file. Choose a location where you want to save the file and click "Save". The file size is about 500 MB, so it may take some time depending on your internet speed.</p>
|
108 |
-
<h3>Step 2: Install LDPlayer on your PC</h3>
|
109 |
-
<p>Once the download is complete, locate the LDPlayer installer file and double-click on it. You will see a window asking you to choose the installation language. Select your preferred language and click "OK". Then, follow the instructions on the screen to complete the installation process. It may take a few minutes depending on your PC specifications.</p>
|
110 |
-
<h3>Step 3: Launch LDPlayer and search for Bus Simulator Indonesia on the Play Store</h3>
|
111 |
-
<p>After the installation is done, launch LDPlayer from your desktop or start menu. You will see the LDPlayer home screen with various icons and options. Click on the "Play Store" icon to open the Google Play Store app. You will need to sign in with your Google account or create a new one if you don't have one. Then, type "Bus Simulator Indonesia" in the search bar and hit enter. You will see a list of results related to your search query.</p>
|
112 |
-
<h3>Step 4: Install Bus Simulator Indonesia and enjoy the game</h3>
|
113 |
-
<p>Find the Bus Simulator Indonesia app from the list of results and click on it. You will see a page with more information about the app, such as screenshots, ratings, reviews, and description. Click on the "Install" button to start downloading and installing the app on your PC. The app size is about 300 MB, so it may take some time depending on your internet speed. Once the installation is complete, you can click on the "Open" button to launch the game. Alternatively, you can also find the game icon on your LDPlayer home screen or app drawer and click on it to start playing.</p>
|
114 |
-
<h2>Conclusion</h2>
|
115 |
-
<p>Bus Simulator Indonesia is a fun and realistic game that lets you experience what it's like to be a bus driver in Indonesia. You can design your own livery, drive in authentic Indonesian cities and places, honk your horn with the iconic "Om Telolet Om" sound, and enjoy high-quality graphics and gameplay. However, playing Bus Simulator Indonesia on mobile devices may not give you the best gaming experience due to small screen size, limited control options, low performance, battery drain, overheating, phone calls, etc. That's why we recommend you to play Bus Simulator Indonesia on PC with LDPlayer, a free and powerful Android emulator that allows you to play Android games on your computer with larger screen size, better graphics quality, more control options, higher performance, and more features. In this article, we have shown you how to download and play Bus Simulator Indonesia on PC with LDPlayer in four easy steps. We hope you find this article helpful and enjoy playing Bus Simulator Indonesia on PC with LDPlayer.</p>
|
116 |
-
<h2>FAQs</h2>
|
117 |
-
<p>Here are some frequently asked questions about playing Bus Simulator Indonesia on PC with LDPlayer:</p>
|
118 |
-
<h4>Q: Is LDPlayer safe to use?</h4>
|
119 |
-
<p>A: Yes, LDPlayer is safe to use. It does not contain any malware or virus that can harm your PC or data. It also does not share your data with any third parties without your consent . You can use LDPlayer with confidence and peace of mind.</p>
|
120 |
-
<h4>Q: Is LDPlayer free to use?</h4>
|
121 |
-
<p>A: Yes, LDPlayer is free to use. You don't have to pay anything to download or use LDPlayer. However, some optional features may require payment or subscription, such as removing ads or unlocking premium features . You can choose whether to use these features or not according to your needs.</p <h4>Q: How can I update LDPlayer and Bus Simulator Indonesia on PC?</h4>
|
122 |
-
<p>A: To update LDPlayer, you can go to the LDPlayer settings and click on the "Check for updates" button. You will see a pop-up window telling you whether there is a new version available or not. If there is, you can click on the "Update" button to download and install the latest version of LDPlayer. To update Bus Simulator Indonesia, you can go to the Play Store app and search for Bus Simulator Indonesia. You will see a page with more information about the app, such as screenshots, ratings, reviews, and description. If there is an update available, you will see an "Update" button next to the "Open" button. You can click on the "Update" button to download and install the latest version of Bus Simulator Indonesia.</p>
|
123 |
-
<h4>Q: How can I use LDPlayer's features to enhance my gameplay of Bus Simulator Indonesia?</h4>
|
124 |
-
<p>A: LDPlayer has many features that can enhance your gameplay of Bus Simulator Indonesia, such as macros, scripts, multi-instance, and multi-instance sync. Macros and scripts allow you to automate certain actions or commands in the game, such as honking, braking, accelerating, etc. You can record your own macros or scripts using LDPlayer's built-in tool, or import them from other sources. Multi-instance and multi-instance sync allow you to run multiple instances of the game at the same time on your PC, and synchronize your actions across all instances. This way, you can play with multiple accounts or characters, or join online multiplayer convoys with yourself.</p>
|
125 |
-
<h4>Q: How can I contact LDPlayer's customer service if I have any questions or issues?</h4>
|
126 |
-
<p>A: If you have any questions or issues regarding LDPlayer or Bus Simulator Indonesia, you can contact LDPlayer's customer service through various channels, such as email, Facebook, Twitter, Discord, Reddit, YouTube, etc. You can find the contact information on the official website of LDPlayer () or on the LDPlayer app itself. You can also check out the FAQ section or the blog section on the website for more information and tips.</p>
|
127 |
-
<h4>Q: How can I share my feedback or suggestions about LDPlayer or Bus Simulator Indonesia?</h4>
|
128 |
-
<p>A: We appreciate your feedback and suggestions about LDPlayer or Bus Simulator Indonesia. You can share your thoughts with us through various channels, such as email, Facebook, Twitter, Discord, Reddit, YouTube, etc. You can also leave a comment or a review on the Play Store app or on the official website of LDPlayer (). Your feedback and suggestions will help us improve our products and services and provide you with a better gaming experience.</p> 197e85843d<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el secreto de Clash Royale Todo Infinito APK fcil y rpido.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Clash Royale Todo Infinito Apk: What Is It and How to Get It?</h1>
|
3 |
-
<p>If you are a fan of Clash Royale, you might have heard of Clash Royale Todo Infinito Apk. This is a modified version of the game that gives you unlimited resources, such as gems, gold, and cards. But what exactly is Clash Royale, and how can you get this apk? In this article, we will answer these questions and give you some tips and tricks for playing the game.</p>
|
4 |
-
<h2>What is Clash Royale?</h2>
|
5 |
-
<p>Clash Royale is a popular mobile game developed and published by Supercell, the same company behind Clash of Clans. It is a real-time multiplayer battle game that combines elements of collectible card games and strategy games. Here are some of the main features of the game:</p>
|
6 |
-
<h2>clash royale todo infinito apk</h2><br /><p><b><b>Download</b> ✔✔✔ <a href="https://urlin.us/2uT29L">https://urlin.us/2uT29L</a></b></p><br /><br />
|
7 |
-
<h3>A real-time multiplayer battle game</h3>
|
8 |
-
<p>In Clash Royale, you can challenge players from around the world in fast-paced duels. The goal is to destroy your opponent's three towers, or at least more towers than they do, before the time runs out. You can use a variety of units, spells, and buildings to attack and defend. Each match lasts for three minutes, or longer if there is a tie.</p>
|
9 |
-
<h3>A collectible card game</h3>
|
10 |
-
<p>Clash Royale features over 100 cards that represent different troops, spells, and buildings from the Clash universe. You can collect and upgrade these cards by winning battles, opening chests, or buying them with gems or gold. You can also create your own battle deck with eight cards that suit your play style and strategy.</p>
|
11 |
-
<h3>A strategic game</h3>
|
12 |
-
<p>Clash Royale is not just about spamming cards on the battlefield. You need to think carefully about when and where to place your cards, how to counter your opponent's moves, and how to manage your elixir. Elixir is the resource that you use to play cards, and it regenerates over time. You also need to consider the strengths and weaknesses of each card, as well as their synergies and interactions.</p>
|
13 |
-
<h2>What is Clash Royale Todo Infinito Apk?</h2>
|
14 |
-
<p>Clash Royale Todo Infinito Apk is a modified version of the game that gives you access to unlimited resources. This means that you can get as many gems, gold, and cards as you want without spending any money or time. You can also unlock all the arenas, chests, and features that are normally restricted by your level or progress. Here are some of the benefits and risks of using this apk:</p>
|
15 |
-
<h3>The benefits of using it</h3>
|
16 |
-
<ul>
|
17 |
-
<li>You can enjoy the game without any limitations or frustrations.</li>
|
18 |
-
<li>You can experiment with different cards and decks without worrying about wasting resources.</li>
|
19 |
-
<li>You can dominate your opponents with powerful cards and strategies.</li>
|
20 |
-
<li>You can have more fun and excitement in the game.</li>
|
21 |
-
</ul>
|
22 |
-
<h3>The risks of using it</h3>
|
23 |
-
<ul>
|
24 |
-
<li>You might lose the challenge and thrill of the game.</li>
|
25 |
-
<li>You might get bored or lose interest in the game.</li>
|
26 |
-
<li>You might face technical issues or errors in the game.</li>
|
27 |
-
<li>You might get banned or suspended by Supercell for violating their terms of service.</li>
|
28 |
-
</ul>
|
29 |
-
<h2>How to get Clash Royale Todo Infinito Apk?</h2>
|
30 |
-
<p>If you want to try Clash Royale Todo Infinito Apk, you need to follow these steps:</p>
|
31 |
-
<h3>Download from a reliable source</h3>
|
32 |
-
<p>There are many websites that claim to offer Clash Royale Todo Infinito Apk, but not all of them are safe or trustworthy. Some of them might contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before downloading anything. You can check the reviews, ratings, and comments of other users to see if the apk is reliable and working. You can also use antivirus software or scan the apk file before installing it.</p>
|
33 |
-
<h3>Install on your device</h3>
|
34 |
-
<p>Once you have downloaded the apk file, you need to install it on your device. However, you might encounter some issues or errors during the installation process. This is because Clash Royale Todo Infinito Apk is not an official version of the game, and it might not be compatible with your device or operating system. To fix this, you need to enable the unknown sources option on your device settings. This will allow you to install apps from sources other than the Google Play Store. You can also uninstall the original version of Clash Royale if you have it on your device, as it might cause conflicts or crashes with the apk.</p>
|
35 |
-
<p>clash royale mod apk unlimited gems and coins<br />
|
36 |
-
clash royale hack apk download for android<br />
|
37 |
-
clash royale private server apk with all cards unlocked<br />
|
38 |
-
clash royale apk mod menu with god mode<br />
|
39 |
-
clash royale latest version apk free download<br />
|
40 |
-
clash royale cheat apk no root no survey<br />
|
41 |
-
clash royale online generator apk without human verification<br />
|
42 |
-
clash royale modded apk by master royale infinity<br />
|
43 |
-
clash royale apk atualizado 2023 com tudo infinito<br />
|
44 |
-
clash royale apk mediafire link download 2023<br />
|
45 |
-
clash royale apk modificado com gemas e ouro infinito<br />
|
46 |
-
clash royale hackeado apk descargar gratis para android<br />
|
47 |
-
clash royale servidor privado apk con todos los personajes desbloqueados<br />
|
48 |
-
clash royale apk mod menu con modo dios<br />
|
49 |
-
clash royale ultima version apk descargar gratis<br />
|
50 |
-
clash royale truco apk sin root sin encuesta<br />
|
51 |
-
clash royale generador online apk sin verificacion humana<br />
|
52 |
-
clash royale modificado apk por master royale infinity<br />
|
53 |
-
clash royale descargar master royale infinity 3.2729 apk gratis para android<br />
|
54 |
-
clash royale novo apk mod do clash royale todo infinito youtube<br />
|
55 |
-
clash royale download the apk from uptodown for android<br />
|
56 |
-
clash royale unlimited resources apk for android and ios<br />
|
57 |
-
clash royale best private server apk with custom cards and skins<br />
|
58 |
-
clash royale hack tool apk no password no jailbreak<br />
|
59 |
-
clash royale working cheats apk 2023 updated daily<br />
|
60 |
-
clash royale free gems and gold apk without offers or surveys<br />
|
61 |
-
clash royale cracked apk by nulls royal latest version<br />
|
62 |
-
clash royale baixar master royale infinity 3.2729 apk gratis para android<br />
|
63 |
-
clash royale novo apk mod do clash royale tudo infinito mediafire<br />
|
64 |
-
clash royale baixar o apk do uptodown para android<br />
|
65 |
-
clash royale recursos ilimitados apk para android e ios<br />
|
66 |
-
clash royale melhor servidor privado apk com cartas e skins personalizados<br />
|
67 |
-
clash royale ferramenta de hackear apk sem senha sem jailbreak<br />
|
68 |
-
clash royale truques funcionando apk 2023 atualizado diariamente<br />
|
69 |
-
clash royale gemas e ouro gratis apk sem ofertas ou pesquisas<br />
|
70 |
-
clash royale apk rachado por nulls royal ultima versao<br />
|
71 |
-
descargar master royal infinity 3.2729 APK gratis para Android - Malavida.com/clashroyal/<br />
|
72 |
-
nuevo APK mod de Clash Royale todo infinito YouTube - YouTube.com/watch?v=EOCZMUdAql4 <br />
|
73 |
-
descargar el APK de Uptodown para Android - Clash-Royal.en.uptodown.com/android <br />
|
74 |
-
recursos ilimitados APK para Android y iOS - Clash-Royals.com/unlimited-resources-apk/ <br />
|
75 |
-
mejor servidor privado APK con cartas y skins personalizados - Clash-Royals.net/best-private-server-apk/ <br />
|
76 |
-
herramienta de hackear APK sin contraseña sin jailbreak - Clash-Royals.org/hack-tool-apk/ <br />
|
77 |
-
trucos funcionando APK 2023 actualizado diariamente - Clash-Royals.info/working-cheats-apk/ <br />
|
78 |
-
gemas y oro gratis APK sin ofertas o encuestas - Clash-Royals.co/free-gems-and-gold-apk/ <br />
|
79 |
-
APK agrietado por nulls royal última versión - Clash-Royals.io/cracked-apk-by-nulls/</p>
|
80 |
-
<h3>Enjoy the game</h3>
|
81 |
-
<p>After installing the apk, you can launch the game and enjoy the unlimited resources and features. You can create your own custom deck with any cards you want, unlock all the chests and arenas, and challenge anyone in the game. You can also join a clan and share cards with other players who use the same apk. However, you should be aware that using Clash Royale Todo Infinito Apk might affect your game experience and performance. You might face lag, glitches, or bugs in the game. You might also lose your progress or account if Supercell detects that you are using a modified version of the game.</p>
|
82 |
-
<h2>Tips and tricks for playing Clash Royale</h2>
|
83 |
-
<p>Whether you use Clash Royale Todo Infinito Apk or not, there are some tips and tricks that can help you improve your skills and win more battles in Clash Royale. Here are some of them:</p>
|
84 |
-
<h3>Join a clan and share cards</h3>
|
85 |
-
<p>One of the best ways to progress faster and get more cards in Clash Royale is to join a clan and share cards with other members. You can request and donate cards every day, which will give you gold and experience points. You can also chat with your clanmates, ask for advice, and practice with friendly battles. You can also participate in clan wars and clan chest events, which will give you more rewards and fun.</p>
|
86 |
-
<h3>Attack in pairs and use combos</h3>
|
87 |
-
<p>Another important tip for playing Clash Royale is to attack in pairs and use combos. This means that you should not play your cards one by one, but rather combine them to create powerful attacks and defenses. For example, you can pair a tank unit like a giant or a golem with a support unit like a wizard or a musketeer behind it. This will create a strong push that can deal a lot of damage to your opponent's towers. You can also use spells like fireball or zap to support your units or counter your opponent's units.</p>
|
88 |
-
<h3>Be patient and count elixir</h3>
|
89 |
-
<p>The last tip for playing Clash Royale is to be patient and count elixir. This means that you should not rush into attacking or defending without thinking first. You should wait for the right moment to play your cards, depending on the situation and your elixir advantage. Elixir advantage is the difference between your elixir and your opponent's elixir at any given time. You can gain elixir advantage by playing cheaper cards than your opponent, by making positive elixir trades (using less elixir to counter more elixir), or by letting your opponent waste their elixir on unnecessary moves.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>Clash Royale is a fun and addictive game that combines real-time multiplayer battles, collectible card games, and strategy games. Clash Royale Todo Infinito Apk is a modified version of the game that gives you unlimited resources and features. However, it also comes with some risks and drawbacks that might affect your game experience and performance. If you want to try it, you need to download it from a reliable source, install it on your device, and enjoy the game. You can also follow some tips and tricks to improve your skills and win more battles in Clash Royale.</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<ul>
|
94 |
-
<li><b>What is the difference between Clash Royale Todo Infinito Apk and Clash Royale Mod Apk?</b></li>
|
95 |
-
<li>A: Clash Royale Todo Infinito Apk and Clash Royale Mod Apk are both modified versions of Clash Royale that give you unlimited resources and features. However they might have different sources, versions, or features. For example, some Clash Royale Mod Apks might have custom servers, private servers, or unlimited chests, while others might not. You should always check the details and specifications of the apk before downloading it.</li>
|
96 |
-
<li><b>Is Clash Royale Todo Infinito Apk safe to use?</b></li>
|
97 |
-
<li>A: Clash Royale Todo Infinito Apk is not an official version of the game, and it might not be safe to use. It might contain viruses, malware, or spyware that can harm your device or steal your personal information. It might also cause technical issues or errors in the game. It might also get you banned or suspended by Supercell for violating their terms of service. Therefore, you should use it at your own risk and discretion.</li>
|
98 |
-
<li><b>Can I play Clash Royale Todo Infinito Apk with my friends who use the original version of the game?</b></li>
|
99 |
-
<li>A: No, you cannot play Clash Royale Todo Infinito Apk with your friends who use the original version of the game. This is because Clash Royale Todo Infinito Apk uses a different server and database than the original version of the game. Therefore, you can only play with other players who use the same apk as you.</li>
|
100 |
-
<li><b>How can I update Clash Royale Todo Infinito Apk?</b></li>
|
101 |
-
<li>A: Clash Royale Todo Infinito Apk does not update automatically like the original version of the game. You need to manually download and install the latest version of the apk from a reliable source whenever there is a new update. However, you might lose your progress or account if you update the apk, as it might not be compatible with the previous version.</li>
|
102 |
-
<li><b>Where can I find more information about Clash Royale Todo Infinito Apk?</b></li>
|
103 |
-
<li>A: You can find more information about Clash Royale Todo Infinito Apk on various websites, blogs, forums, or social media platforms that are dedicated to Clash Royale or mobile gaming. You can also watch videos, tutorials, or reviews of the apk on YouTube or other streaming platforms. However, you should always verify the credibility and accuracy of the information before trusting it.</li>
|
104 |
-
</ul></p> 197e85843d<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing Lite Mod APK with Unlimited Money and Unlocked Features.md
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CarX Drift Racing Lite Mod APK: A Fun and Free Racing Game</h1>
|
3 |
-
<p>If you are a fan of car racing games, you might have heard of CarX Drift Racing Lite. It is a popular game that lets you experience the thrill of drifting on different tracks with realistic physics and graphics. But what if you want to enjoy the game without any limitations or interruptions? That's where CarX Drift Racing Lite Mod APK comes in handy. In this article, we will tell you what CarX Drift Racing Lite is, how to download and install the mod apk version, and why you should use it.</p>
|
4 |
-
<h2>carx drift racing lite mod apk happymod</h2><br /><p><b><b>Download Zip</b> »»» <a href="https://urlin.us/2uSV3T">https://urlin.us/2uSV3T</a></b></p><br /><br />
|
5 |
-
<h2>What is CarX Drift Racing Lite?</h2>
|
6 |
-
<p>CarX Drift Racing Lite is a racing game developed by CarX Technologies. It is a lite version of the original CarX Drift Racing game, which means it has fewer features and content, but it is also more suitable for low-end devices. The game allows you to choose from different cars and tracks, customize your vehicle, and compete with other players online or offline. You can also earn coins by drifting and use them to upgrade your car or unlock new scenes.</p>
|
7 |
-
<h3>Features of CarX Drift Racing Lite</h3>
|
8 |
-
<p>CarX Drift Racing Lite has many features that make it an enjoyable and addictive game for racing enthusiasts. Here are some of them:</p>
|
9 |
-
<h4>Realistic physics and graphics</h4>
|
10 |
-
<p>The game uses a sophisticated car physics engine that simulates the behavior of real cars on different surfaces and conditions. You can feel the difference between asphalt, grass, sand, or snow, as well as the impact of speed, weight, and inertia on your car's performance. The game also has stunning graphics that create a realistic atmosphere for your racing experience. You can see the smoke, dust, sparks, and skid marks as you drift on the tracks.</p>
|
11 |
-
<h4>Customizable cars and tracks</h4>
|
12 |
-
<p>The game offers you a variety of cars to choose from, each with its own characteristics and specifications. You can also customize your car's appearance, color, wheels, engine, suspension, and more. You can also choose from different tracks that have different layouts, obstacles, and weather effects. You can even create your own tracks using the track editor feature.</p>
|
13 |
-
<h4>Leaderboards and achievements</h4>
|
14 |
-
<p>The game has a competitive mode that lets you challenge other players online or offline. You can see your ranking on the global or local leaderboards, as well as your personal statistics and records. You can also earn achievements by completing various tasks and challenges in the game.</p>
|
15 |
-
<p>carx drift racing lite unlimited money mod apk<br />
|
16 |
-
carx drift racing lite hack apk download<br />
|
17 |
-
carx drift racing lite mod apk latest version<br />
|
18 |
-
carx drift racing lite unlocked cars mod apk<br />
|
19 |
-
carx drift racing lite mod apk android 1<br />
|
20 |
-
carx drift racing lite mod apk revdl<br />
|
21 |
-
carx drift racing lite mod apk free download<br />
|
22 |
-
carx drift racing lite mod apk offline<br />
|
23 |
-
carx drift racing lite mod apk no root<br />
|
24 |
-
carx drift racing lite mod apk obb<br />
|
25 |
-
carx drift racing lite mod apk unlimited coins<br />
|
26 |
-
carx drift racing lite mod apk 1.1<br />
|
27 |
-
carx drift racing lite mod apk rexdl<br />
|
28 |
-
carx drift racing lite mod apk pure<br />
|
29 |
-
carx drift racing lite mod apk 2023<br />
|
30 |
-
carx drift racing lite mod apk happymod.com<br />
|
31 |
-
carx drift racing lite mod apk all cars unlocked<br />
|
32 |
-
carx drift racing lite mod apk unlimited everything<br />
|
33 |
-
carx drift racing lite mod apk for pc<br />
|
34 |
-
carx drift racing lite mod apk ios<br />
|
35 |
-
carx drift racing lite cheat codes mod apk<br />
|
36 |
-
carx drift racing lite hack tool mod apk<br />
|
37 |
-
carx drift racing lite premium mod apk<br />
|
38 |
-
carx drift racing lite pro mod apk<br />
|
39 |
-
carx drift racing lite full version mod apk<br />
|
40 |
-
carx drift racing lite mega mod apk<br />
|
41 |
-
carx drift racing lite vip mod apk<br />
|
42 |
-
carx drift racing lite gold mod apk<br />
|
43 |
-
carx drift racing lite cracked mod apk<br />
|
44 |
-
carx drift racing lite original mod apk<br />
|
45 |
-
download game carx drift racing lite mod apk<br />
|
46 |
-
how to install carx drift racing lite mod apk<br />
|
47 |
-
cara download carx drift racing lite mod apk<br />
|
48 |
-
descargar carx drift racing lite mod apk<br />
|
49 |
-
telecharger carx drift racing lite mod apk<br />
|
50 |
-
baixar carx drift racing lite mod apk<br />
|
51 |
-
indir carx drift racing lite mod apk<br />
|
52 |
-
scaricare carx drift racing lite mod apk<br />
|
53 |
-
unduh carx drift racing lite mod apk<br />
|
54 |
-
скачать carx drift racing lite мод апк</p>
|
55 |
-
<h3>How to download and install CarX Drift Racing Lite Mod APK?</h3>
|
56 |
-
<p>If you want to download and install CarX Drift Racing Lite Mod APK, you need to follow these simple steps:</p>
|
57 |
-
<h4>Download the mod apk file from a trusted source</h4>
|
58 |
-
<p>You can find many websites that offer CarX Drift Racing Lite Mod APK files for free download. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful when choosing a source for downloading the mod apk file. One of the trusted sources that we recommend is HappyMod. It is a platform that provides modded versions of various games and apps with 3x speed download and verified by users.</p>
|
59 |
-
<h4>Enable unknown sources on your device</h4>
|
60 |
-
<p>Before you can install the mod apk file, you need to enable unknown sources on your device. This is a security setting that prevents the installation of apps from sources other than the official app store. To enable unknown sources, you need to go to your device's settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". This may vary depending on your device model and Android version.</p>
|
61 |
-
<h4>Install the mod apk file and enjoy the game</h4>
|
62 |
-
<p>Once you have downloaded the mod apk file and enabled unknown sources, you can install the mod apk file by tapping on it and following the instructions. After the installation is complete, you can launch the game and enjoy the modded features. You don't need to uninstall the original game or create a new account. You can use your existing account and data with the mod apk version.</p>
|
63 |
-
<h2>Why use CarX Drift Racing Lite Mod APK?</h2>
|
64 |
-
<p>You might be wondering why you should use CarX Drift Racing Lite Mod APK instead of the original game. Well, there are many benefits of using the mod apk version that will enhance your gaming experience. Here are some of them:</p>
|
65 |
-
<h3>Benefits of using the mod apk version</h3>
|
66 |
-
<p>The mod apk version of CarX Drift Racing Lite has many advantages over the original game. Some of them are:</p>
|
67 |
-
<h4>Unlimited coins and unlocked scenes</h4>
|
68 |
-
<p>One of the main benefits of using the mod apk version is that you get unlimited coins and unlocked scenes in the game. Coins are the currency that you use to buy and upgrade cars, as well as unlock new tracks and scenes. Normally, you have to earn coins by drifting or watching ads, which can be time-consuming and annoying. But with the mod apk version, you get unlimited coins that you can spend as you wish. You also get access to all the scenes that are otherwise locked in the original game. You can enjoy different environments and challenges without any restrictions.</p>
|
69 |
-
<h4>No ads and no root required</h4>
|
70 |
-
<p>Another benefit of using the mod apk version is that you don't have to deal with any ads or root your device. Ads are annoying and distracting, especially when they pop up in the middle of your game. They can also consume your data and battery. But with the mod apk version, you don't have to worry about any ads interrupting your game. You can play smoothly and peacefully without any interruptions. You also don't need to root your device to use the mod apk version. Rooting is a process that gives you full control over your device, but it also voids your warranty and exposes your device to risks. But with the mod apk version, you don't need to root your device at all. You can use it safely and easily without any complications.</p>
|
71 |
-
<h4>Compatible with most devices and easy to use</h4>
|
72 |
-
<p>The last benefit of using the mod apk version is that it is compatible with most devices and easy to use. The mod apk version is optimized for low-end devices, which means it runs smoothly and efficiently on most Android devices. You don't need a high-end device or a lot of storage space to play the game. You also don't need any special skills or knowledge to use the mod apk version. You just need to follow the simple steps mentioned above and you are good to go.</p>
|
73 |
-
<h2>Conclusion</h2>
|
74 |
-
<p>CarX Drift Racing Lite is a fun and free racing game that lets you drift on different tracks with realistic physics and graphics. It has many features that make it an enjoyable and addictive game for racing enthusiasts. However, if you want to enjoy the game without any limitations or interruptions, you should use CarX Drift Racing Lite Mod APK. It is a modified version of the original game that gives you unlimited coins, unlocked scenes, no ads, no root required, and compatibility with most devices. You can download and install it easily from HappyMod and enjoy the game with more fun and freedom.</p>
|
75 |
-
<p>I hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
76 |
-
<h3>FAQs</h3>
|
77 |
-
<p>Here are some frequently asked questions about CarX Drift Racing Lite Mod APK:</p>
|
78 |
-
<ol>
|
79 |
-
<li><b>Is CarX Drift Racing Lite Mod APK safe?</b></li>
|
80 |
-
<p>Yes, CarX Drift Racing Lite Mod APK is safe to use as long as you download it from a trusted source like HappyMod. It does not contain any viruses, malware, or spyware that can harm your device or steal your personal information.</p>
|
81 |
-
<li><b>Is CarX Drift Racing Lite Mod APK legal?</b></li>
|
82 |
-
<p>Yes, CarX Drift Racing Lite Mod APK is legal to use as long as you don t use it for any illegal or unethical purposes, such as cheating, hacking, or pirating. You should also respect the rights and interests of the original developers and publishers of the game.</p>
|
83 |
-
<li><b>Can I update CarX Drift Racing Lite Mod APK?</b></li>
|
84 |
-
<p>Yes, you can update CarX Drift Racing Lite Mod APK whenever there is a new version available. However, you need to download and install the new mod apk file from the same source that you used before. You don't need to uninstall the previous mod apk file or lose your data. You can simply overwrite the old file with the new one and enjoy the updated features.</p>
|
85 |
-
<li><b>Can I play CarX Drift Racing Lite Mod APK online?</b></li>
|
86 |
-
<p>Yes, you can play CarX Drift Racing Lite Mod APK online with other players. However, you need to be careful when playing online, as some players may report you for using the mod apk version. This may result in your account being banned or suspended by the game's servers. Therefore, you should use the mod apk version at your own risk and discretion when playing online.</p>
|
87 |
-
<li><b>Can I use CarX Drift Racing Lite Mod APK on iOS devices?</b></li>
|
88 |
-
<p>No, CarX Drift Racing Lite Mod APK is only compatible with Android devices. It is not available for iOS devices, such as iPhones or iPads. If you want to play CarX Drift Racing Lite on iOS devices, you need to download and install the original game from the App Store.</p>
|
89 |
-
</ol></p> 197e85843d<br />
|
90 |
-
<br />
|
91 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Age of History APK - Download the Best Strategy Game for Android.md
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Age of History APK Indir: A Grand Strategy Game for Android</h1>
|
3 |
-
<p>If you are a fan of strategy games and history, you might want to check out Age of History APK indir, a grand strategy wargame that is simple to learn yet hard to master. In this game, you can use military tactics and cunning diplomacy to either unify the world or conquer it. You can also create your own history using in-game editors and play with as many players as civilizations in scenario. In this article, we will tell you everything you need to know about this game, including how to download and install it, how to play it, and how it compares to other strategy games.</p>
|
4 |
-
<h2>age of history apk indir</h2><br /><p><b><b>Download Zip</b> ✫ <a href="https://jinyurl.com/2uNLz0">https://jinyurl.com/2uNLz0</a></b></p><br /><br />
|
5 |
-
<h2>What is Age of History?</h2>
|
6 |
-
<p>Age of History is a turn-based strategy game that covers the whole history of humanity, from the dawn of civilization to the far future. You can play as many civilizations ranging from the largest empire to the smallest tribe, and lead your people to glory in a campaign spanning thousands of years. You can also choose from different ages, such as the Age of Civilizations, the Age of Feudalism, the Age of Industrialization, the Age of World War II, and more.</p>
|
7 |
-
<p>One of the most interesting features of this game is that it lets you create your own scenarios and civilizations using in-game editors. You can customize everything from the map, the provinces, the terrain types, the growth rate, the cities, the flags, the alliances, the diplomacy colors, and more. You can also share your creations with other players online and download their scenarios as well.</p>
|
8 |
-
<p>Another feature that makes this game unique is that it has two maps: Earth and Kepler-22b. Earth is a detailed map of the world with many historical borders and realistic geography. Kepler-22b is a fictional planet that has 404 provinces and different terrain types. You can explore both maps and see how different civilizations interact with each other.</p>
|
9 |
-
<h2>How to download and install Age of History APK?</h2>
|
10 |
-
<p>If you want to play this game on your Android device, you will need to download and install its APK file from a reliable source. One of the best sources for this is APKCombo, a website that offers free APK downloads for various apps and games. Here are the steps to download and install Age of History APK from APKCombo:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Go to <a href="(^1^)">https://apkcombo.com/tr/age-of-history-ii/age.of.civilizations2.jakowski.lukasz.lite/</a> on your browser.</li>
|
13 |
-
<li>Click on the green "Download APK" button on the page.</li>
|
14 |
-
<li>Select your preferred version (the latest one is recommended) and click on "Download".</li>
|
15 |
-
<li>Wait for the download to finish and then open the APK file on your device.</li>
|
16 |
-
<li>Follow the instructions on your screen to install the game on your device.</li>
|
17 |
-
</ol>
|
18 |
-
<p>Some of the requirements and permissions for the game are:</p>
|
19 |
-
<ul>
|
20 |
-
<li>Android version: 4.4 or higher</li>
|
21 |
-
<li>Storage: 87 MB or more</li>
|
22 |
-
<li>Internet: required for online features</li>
|
23 |
-
<li>Other permissions: access network state, access wifi state, internet, read external storage, write external storage</li>
|
24 |
-
</ul>
|
25 |
-
<p>Some of the advantages of downloading the game from APKCombo are:</p>
|
26 |
-
<ul>
|
27 |
-
<li>You can get the latest version of the game without waiting for updates from Google Play Store.</li>
|
28 |
-
<li>You can download the game for free without any ads or in-app purchases.</li>
|
29 |
-
<li>You can download the game safely and securely without any viruses or malware.</li>
|
30 |
-
</ul>
|
31 |
-
<h2>How to play Age of History?</h2>
|
32 |
-
<p>Age of History is a game that requires both strategic thinking and historical knowledge. You will need to manage your economy, your military, your diplomacy, and your population as you expand your territory and influence. Here are some of the basics of the game that you should know before you start playing:</p>
|
33 |
-
<p>The game is divided into turns, each representing a certain amount of time depending on the age you are playing. Each turn, you can issue orders to your provinces using movement points (MP). You can move your troops, build buildings, recruit units, declare war, make peace, form alliances, and more. You can also use diplomacy points (DP) to influence other civilizations and improve your relations with them. You can also use gold to buy more MP or DP, or to bribe other leaders.</p>
|
34 |
-
<p>age of history apk download free<br />
|
35 |
-
age of history apk full version<br />
|
36 |
-
age of history apk mod unlimited money<br />
|
37 |
-
age of history apk android oyun club<br />
|
38 |
-
age of history apk latest version<br />
|
39 |
-
age of history apk hile indir<br />
|
40 |
-
age of history apk obb<br />
|
41 |
-
age of history apk revdl<br />
|
42 |
-
age of history apk hack<br />
|
43 |
-
age of history apk 1.1548a<br />
|
44 |
-
age of history apk uptodown<br />
|
45 |
-
age of history apk rexdl<br />
|
46 |
-
age of history apk pure<br />
|
47 |
-
age of history apk 2023<br />
|
48 |
-
age of history apk para hilesi<br />
|
49 |
-
age of history apk pc<br />
|
50 |
-
age of history apk tam sürüm indir<br />
|
51 |
-
age of history apk son sürüm indir<br />
|
52 |
-
age of history apk güncel indir<br />
|
53 |
-
age of history apk bedava indir<br />
|
54 |
-
age of history apk mega hile indir<br />
|
55 |
-
age of history apk altın hilesi indir<br />
|
56 |
-
age of history apk kurulumu<br />
|
57 |
-
age of history apk nasıl indirilir<br />
|
58 |
-
age of history apk nasıl yüklenir<br />
|
59 |
-
age of history lite apk indir<br />
|
60 |
-
age of history 2 apk indir<br />
|
61 |
-
age of history europe apk indir<br />
|
62 |
-
age of history world war 2 apk indir<br />
|
63 |
-
age of history world war 1 apk indir<br />
|
64 |
-
age of history civil war apk indir<br />
|
65 |
-
age of history asia apk indir<br />
|
66 |
-
age of history africa apk indir<br />
|
67 |
-
age of history america apk indir<br />
|
68 |
-
age of civilizations 2 apk indir<br />
|
69 |
-
age of civilizations 2 lite apk indir<br />
|
70 |
-
age of civilizations 2 europe apk indir<br />
|
71 |
-
age of civilizations 2 world war 2 apk indir<br />
|
72 |
-
age of civilizations 2 world war 1 apk indir<br />
|
73 |
-
age of civilizations 2 civil war apk indir<br />
|
74 |
-
age of civilizations 2 asia apk indir<br />
|
75 |
-
age of civilizations 2 africa apk indir<br />
|
76 |
-
age of civilizations 2 america apk indir<br />
|
77 |
-
download game age of history mod apk android 1.com <br />
|
78 |
-
download game android offline mod terbaik - Age Of History APK <br />
|
79 |
-
download game perang offline mod - Age Of History APK <br />
|
80 |
-
download game strategi offline mod - Age Of History APK <br />
|
81 |
-
download game sejarah offline mod - Age Of History APK</p>
|
82 |
-
<p>The combat system in the game is based on dice rolls and modifiers. Each unit has a certain attack and defense value, as well as a morale value that affects its performance. When two units clash, they roll dice to determine the outcome of the battle. The modifiers depend on factors such as terrain type, fortification level, technology level, and leader skill. The winner of the battle is the one who inflicts more casualties on the enemy or forces them to retreat. The loser may lose some units or provinces as a result.</p>
|
83 |
-
<p>Some of the tips and tricks for playing the game effectively are:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Plan ahead and prioritize your goals. Do you want to conquer the world or unify your region? Do you want to focus on military or economic development? Do you want to ally with other civilizations or go solo?</li>
|
86 |
-
<li>Balance your budget and resources. Do not spend more than you earn or you will go into debt. Do not overextend your army or you will lose morale and supply. Do not neglect your population or you will face rebellions and unrest.</li>
|
87 |
-
<li>Research new technologies and upgrade your units. Technology is the key to progress and power in this game. You can research new technologies using research points (RP) that you gain from buildings, events, or achievements. You can also upgrade your units using gold or RP to improve their stats and abilities.</li>
|
88 |
-
<li>Explore the map and discover new lands and civilizations. The map is full of secrets and surprises that can benefit or harm you. You can find new resources, events, wonders, relics, and more. You can also encounter new civilizations that can be friendly or hostile to you.</li>
|
89 |
-
</ul>
|
90 |
-
<p>Some of the features and modes of the game are:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Historical grand campaign: This is the main mode of the game where you can play as any civilization from any age and try to achieve your objectives.</li>
|
93 |
-
<li>Scenario editor: This is where you can create your own scenarios using in-game editors. You can customize everything from the map, the provinces, the terrain types, the growth rate, the cities, the flags, the alliances, the diplomacy colors, and more.</li>
|
94 |
-
<li>Civilization creator: This is where you can create your own civilizations using in-game editors. You can customize everything from the name, the flag, the color, the leader, the ideology, the government type, the religion, the culture, and more.</li>
|
95 |
-
<li>Sandbox mode: This is where you can play with as many players as civilizations in scenario. You can set up your own rules and conditions for the game.</li>
|
96 |
-
<li>Online mode: This is where you can play with other players online using multiplayer servers. You can chat with them, cooperate with them, or compete with them.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>How does Age of History compare to other strategy games?</h2>
|
99 |
-
<p>Age of History is a game that has many similarities and differences with other strategy games in terms of gameplay, graphics, sound, and content. Here are some of them:</p>
|
100 |
-
<h3>The similarities and differences between Age of History and Age of Empires</h3>
|
101 |
-
<p>Age of Empires is a real-time strategy game that covers different historical periods from the Stone Age to the Iron Age. You can play as different civilizations and build your empire by collecting resources, training units, constructing buildings, and fighting enemies. You can also advance through different ages and unlock new technologies and units.</p>
|
102 |
-
<p>Some of the similarities between Age of History and Age of Empires are:</p>
|
103 |
-
<ul>
|
104 |
-
<li>Both games are strategy games that involve historical civilizations and scenarios.</li>
|
105 |
-
<li>Both games have different ages and technologies that affect the gameplay and the units.</li>
|
106 |
-
<li>Both games have editors that allow you to create your own maps and scenarios.</li>
|
107 |
-
</ul>
|
108 |
-
<p>Some of the differences between Age of History and Age of Empires are:</p>
|
109 |
-
<ul>
|
110 |
-
<li>Age of History is a turn-based game while Age of Empires is a real-time game.</li>
|
111 |
-
<li>Age of History covers the whole history of humanity while Age of Empires covers only a few historical periods.</li>
|
112 |
-
<li>Age of History has more civilizations and provinces than Age of Empires.</li>
|
113 |
-
</ul>
|
114 |
-
<h3>The similarities and differences between Age of History and Hearts of Iron IV</h3>
|
115 |
-
<p>Hearts of Iron IV is a grand strategy game that focuses on the World War II era. You can play as any country in the world and lead it to victory or defeat in the global conflict. You can also customize your country's political, economic, military, and diplomatic aspects. You can also join or create factions, declare war, make peace, research new technologies, and more.</p>
|
116 |
-
<p>Some of the similarities between Age of History and Hearts of Iron IV are:</p>
|
117 |
-
<ul>
|
118 |
-
<li>Both games are grand strategy games that involve historical scenarios and events.</li>
|
119 |
-
<li>Both games have a detailed map of the world with many provinces and regions.</li>
|
120 |
-
<li>Both games have a complex combat system that involves dice rolls, modifiers, and morale.</li>
|
121 |
-
</ul>
|
122 |
-
<p>Some of the differences between Age of History and Hearts of Iron IV are:</p>
|
123 |
-
<ul>
|
124 |
-
<li>Age of History covers the whole history of humanity while Hearts of Iron IV covers only the World War II era.</li>
|
125 |
-
<li>Age of History has more civilizations and ages than Hearts of Iron IV.</li>
|
126 |
-
<li>Hearts of Iron IV has more features and mechanics than Age of History, such as air warfare, naval warfare, espionage, resistance, supply lines, etc.</li>
|
127 |
-
</ul>
|
128 |
-
<h3>The similarities and differences between Age of History and Civilization VI</h3>
|
129 |
-
<p>Civilization VI is a turn-based strategy game that lets you build your own civilization from scratch. You can choose from different leaders and civilizations, each with their own unique abilities and bonuses. You can also explore the map, found cities, develop districts, build wonders, research technologies, adopt policies, engage in diplomacy, wage war, and more. You can also win the game by achieving one of several victory conditions, such as science, culture, religion, domination, or diplomacy.</p>
|
130 |
-
<p>Some of the similarities between Age of History and Civilization VI are:</p>
|
131 |
-
<ul>
|
132 |
-
<li>Both games are turn-based strategy games that involve historical civilizations and leaders.</li>
|
133 |
-
<li>Both games have different ages and technologies that affect the gameplay and the units.</li>
|
134 |
-
<li>Both games have diplomacy points and options that allow you to interact with other civilizations.</li>
|
135 |
-
</ul>
|
136 |
-
<p>Some of the differences between Age of History and Civilization VI are:</p>
|
137 |
-
<ul>
|
138 |
-
<li>Age of History covers the whole history of humanity while Civilization VI covers only a few historical periods.</li>
|
139 |
-
<li>Civilization VI has more features and mechanics than Age of History, such as city management, district placement, wonder construction, policy cards, religion system, loyalty system, etc.</li>
|
140 |
-
<li>Civilization VI has different victory conditions while Age of History has only one: world domination.</li>
|
141 |
-
</ul>
|
142 |
-
<h2>Conclusion</h2>
|
143 |
-
<p>In conclusion, Age of History APK indir is a grand strategy game for Android that covers the whole history of humanity, from the dawn of civilization to the far future. You can play as any civilization and lead it to glory or ruin in a campaign spanning thousands of years. You can also create your own scenarios and civilizations using in-game editors and share them with other players online. You can also explore two maps: Earth and Kepler-22b, and see how different civilizations interact with each other. Age of History is a game that is simple to learn yet hard to master, and it will challenge your strategic thinking and historical knowledge. If you are looking for a game that combines history, strategy, and creativity, you should definitely give Age of History a try.</p>
|
144 |
-
<p>If you want to download and install Age of History APK on your Android device, you can do so easily and safely from APKCombo, a website that offers free APK downloads for various apps and games. You can get the latest version of the game without any ads or in-app purchases, and enjoy its features and modes without any hassle. You can also compare this game to other strategy games, such as Age of Empires, Hearts of Iron IV, and Civilization VI, and see how it differs from them in terms of gameplay, graphics, sound, and content.</p>
|
145 |
-
<p>So what are you waiting for? Download Age of History APK indir today and start creating your own history!</p>
|
146 |
-
<h3>Five unique FAQs about the game</h3>
|
147 |
-
<ol>
|
148 |
-
<li>Q: How many civilizations are there in Age of History? <br>A: There are over 250 civilizations in the game, each with their own flag, leader, ideology, government type, religion, culture, and more.</li>
|
149 |
-
<li>Q: How can I change the language of the game? <br>A: You can change the language of the game from the settings menu. The game supports 11 languages: English, Polish, French, German, Russian, Spanish, Portuguese, Turkish, Italian, Chinese, and Japanese.</li>
|
150 |
-
<li>Q: How can I play with other players online? <br>A: You can play with other players online using multiplayer servers. You can join or create a server from the online mode menu. You can also chat with other players using the chat feature.</li>
|
151 |
-
<li>Q: How can I save and load my game progress? <br>A: You can save and load your game progress from the pause menu. You can have up to 10 save slots for each scenario. You can also autosave your game every turn or every 10 turns.</li>
|
152 |
-
<li>Q: How can I get more gold, MP, DP, or RP in the game? <br>A: You can get more gold by collecting taxes from your provinces or by trading with other civilizations. You can get more MP by building roads or ports in your provinces or by researching new technologies. You can get more DP by improving your relations with other civilizations or by completing achievements. You can get more RP by building universities or libraries in your provinces or by researching new technologies.</li>
|
153 |
-
</ol></p> 197e85843d<br />
|
154 |
-
<br />
|
155 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Tag After School APK for Android - ThaiAPK.md
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Tag After School APK: A Horror School Life Simulation Game</h1>
|
3 |
-
<p>If you are looking for a thrilling and exciting game that combines horror, romance, and mystery, then you should try Tag After School APK. This is a game developed by Genius Studio Japan Inc., a company that specializes in creating anime-style games for Android devices. In this game, you will play as Shota-Kun, a high school student who gets involved in a deadly game of tag with ghostly girls. You will have to make choices that will affect the outcome of the story and your relationships with the girls. Are you ready to face the horrors of Tag After School APK? Read on to find out more about this game.</p>
|
4 |
-
<h2>tag after school apk thaiapk</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://jinyurl.com/2uNK1K">https://jinyurl.com/2uNK1K</a></b></p><br /><br />
|
5 |
-
<h2>What is Tag After School APK?</h2>
|
6 |
-
<p>Tag After School APK is a horror school life simulation game that was released in January 2023. It is available for free download on various websites, such as ThaiAPK, APKCombo, and others. The game has an age rating of 18+, as it contains mature visuals and themes that are not suitable for younger audiences. The game has a file size of about 100 MB, and it requires Android 5.0 or higher to run smoothly.</p>
|
7 |
-
<h3>The story and the characters of Tag After School APK</h3>
|
8 |
-
<p>The game follows the story of Shota-Kun, a normal high school student who has a crush on his childhood friend, Yui-Chan. One day, he decides to confess his feelings to her after school, but he gets interrupted by a mysterious voice that invites him to play a game of tag. He soon realizes that he is trapped in a haunted school with four ghostly girls who are after him. He has to survive until dawn by hiding from them or fighting them back. However, he also discovers that each girl has a tragic backstory that explains why they became ghosts. He can choose to help them or ignore them, depending on his actions and decisions.</p>
|
9 |
-
<p>The four ghostly girls are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Ayumi-Chan: She is the first girl that Shota-Kun encounters in the game. She is a cheerful and energetic girl who loves sports and music. She died in a car accident while going to a concert with her friends.</li>
|
12 |
-
<li>Miyuki-Chan: She is the second girl that Shota-Kun meets in the game. She is a shy and timid girl who loves books and animals. She died from an illness that made her unable to breathe properly.</li>
|
13 |
-
<li>Sakura-Chan: She is the third girl that Shota-Kun runs into in the game. She is a sweet and kind girl who loves flowers and gardening. She died from a fire that burned down her house.</li>
|
14 |
-
<li>Rin-Chan: She is the fourth and final girl that Shota-Kun faces in the game. She is a cold and aloof girl who hates everyone and everything. She died from suicide after being bullied at school.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>The gameplay and the features of Tag After School APK</h3>
|
17 |
-
<p>The game is divided into several chapters, each focusing on one of the ghostly girls. The game has two modes: story mode and free mode. In story mode, you will follow the main plot and make choices that will affect the ending of each chapter. You will also have to interact with the girls by talking to them, giving them gifts, or fighting them. In free mode, you can replay any chapter you have completed and explore different outcomes.</p>
|
18 |
-
<p>The game also has several features that make it more enjoyable and challenging, such as: Tag After School APK has a timer, a map, a inventory, and a status bar. You can use these tools to plan your strategy and manage your resources. You can also collect items and clues that will help you solve the mystery of the school and the girls. Some items can also be used as weapons or gifts for the girls. You can also unlock achievements and gallery images as you progress through the game.</p>
|
19 |
-
<p>tag after school android game thaiapk<br />
|
20 |
-
tag after school apk download thaiapk<br />
|
21 |
-
tag after school apk latest version thaiapk<br />
|
22 |
-
tag after school apk mod thaiapk<br />
|
23 |
-
tag after school apk offline thaiapk<br />
|
24 |
-
tag after school apk update thaiapk<br />
|
25 |
-
tag after school app thaiapk<br />
|
26 |
-
tag after school game thaiapk<br />
|
27 |
-
tag after school gameplay thaiapk<br />
|
28 |
-
tag after school guide thaiapk<br />
|
29 |
-
tag after school review thaiapk<br />
|
30 |
-
tag after school tips thaiapk<br />
|
31 |
-
tag after school walkthrough thaiapk<br />
|
32 |
-
thaiapk tag after school 18+<br />
|
33 |
-
thaiapk tag after school android<br />
|
34 |
-
thaiapk tag after school apk 2023<br />
|
35 |
-
thaiapk tag after school apk free<br />
|
36 |
-
thaiapk tag after school apk full<br />
|
37 |
-
thaiapk tag after school apk v5.0<br />
|
38 |
-
thaiapk tag after school cheats<br />
|
39 |
-
thaiapk tag after school codes<br />
|
40 |
-
thaiapk tag after school hack<br />
|
41 |
-
thaiapk tag after school how to play<br />
|
42 |
-
thaiapk tag after school install<br />
|
43 |
-
thaiapk tag after school link<br />
|
44 |
-
thaiapk tag after school online<br />
|
45 |
-
thaiapk tag after school video<br />
|
46 |
-
ดาวน์โหลด tag after school apk thaiapk<br />
|
47 |
-
วิธีเล่น tag after school apk thaiapk<br />
|
48 |
-
สอนเล่น tag after school apk thaiapk<br />
|
49 |
-
เกม tag after school apk thaiapk<br />
|
50 |
-
เกมส์ tag after school apk thaiapk<br />
|
51 |
-
เล่นเกมส์ tag after school apk thaiapk<br />
|
52 |
-
เวอร์ชั่นล่าสุดของเกมส์ tag after school apk thaiapk<br />
|
53 |
-
แจกเกมส์ tag after school apk thaiapk<br />
|
54 |
-
แนะนำเกมส์ tag after school apk thaiapk<br />
|
55 |
-
โปรเกมส์ tag after school apk thaiapk<br />
|
56 |
-
โหลดเกมส์ tag after school apk thaiapk<br />
|
57 |
-
ไทยแอปคอม เกมส์tagafter-school-apk-thai-apk <br />
|
58 |
-
ไทยแอปคอม เกมส์tag-after-school-apk-thai-apk <br />
|
59 |
-
ไทยแอปคอม เกมส์tagafterschool-apk-thai-apk <br />
|
60 |
-
ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk <br />
|
61 |
-
ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk <br />
|
62 |
-
ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk <br />
|
63 |
-
ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk</p>
|
64 |
-
<h3>The graphics and the sound of Tag After School APK</h3>
|
65 |
-
<p>The game has stunning graphics that create a realistic and immersive atmosphere. The game uses 3D models and animations for the characters and the environments. The game also has a dark and gloomy color scheme that enhances the horror vibe. The game also has a great sound design that adds to the tension and suspense. The game has voice acting for the main characters, as well as sound effects and background music that match the mood of each scene.</p>
|
66 |
-
<h2>How to download and install Tag After School APK on Android devices?</h2>
|
67 |
-
<p>If you want to play Tag After School APK on your Android device, you will need to download and install it from a third-party source, as it is not available on the Google Play Store. Here are the steps you need to follow:</p>
|
68 |
-
<h3>Requirements and compatibility of Tag After School APK</h3>
|
69 |
-
<p>Before you download and install Tag After School APK, you need to make sure that your device meets the following requirements:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Your device must have Android 5.0 or higher.</li>
|
72 |
-
<li>Your device must have at least 1 GB of RAM and 200 MB of free storage space.</li>
|
73 |
-
<li>Your device must have a stable internet connection.</li>
|
74 |
-
<li>Your device must allow installation of apps from unknown sources. You can enable this option by going to Settings > Security > Unknown Sources.</li>
|
75 |
-
</ul>
|
76 |
-
<h3>Steps to download and install Tag After School APK</h3>
|
77 |
-
<p>Once you have checked the requirements and compatibility of Tag After School APK, you can follow these steps to download and install it:</p>
|
78 |
-
<ol>
|
79 |
-
<li>Go to a reliable website that offers Tag After School APK for free download, such as ThaiAPK, APKCombo, or others.</li>
|
80 |
-
<li>Click on the download button and wait for the file to be downloaded on your device.</li>
|
81 |
-
<li>Locate the downloaded file in your file manager and tap on it to start the installation process.</li>
|
82 |
-
<li>Follow the instructions on the screen and wait for the installation to be completed.</li>
|
83 |
-
<li>Launch the game from your app drawer and enjoy playing Tag After School APK.</li>
|
84 |
-
</ol>
|
85 |
-
<h3>Tips and tricks for playing Tag After School APK</h3>
|
86 |
-
<p>If you want to have a better gaming experience with Tag After School APK, you can use these tips and tricks:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Save your game frequently, as you may encounter different endings depending on your choices.</li>
|
89 |
-
<li>Explore every corner of the school, as you may find hidden items and secrets that will help you in your quest.</li>
|
90 |
-
<li>Pay attention to the timer, as you only have until dawn to survive and escape from the school.</li>
|
91 |
-
<li>Use your map wisely, as it will show you where you are and where the girls are.</li>
|
92 |
-
<li>Use your inventory smartly, as it will store your items and clues. You can also use some items as weapons or gifts for the girls.</li>
|
93 |
-
<li>Use your status bar carefully, as it will show you your health and stamina. You need to keep them high by resting, eating, or drinking.</li>
|
94 |
-
<li>Talk to the girls whenever you can, as it will affect your relationship with them. You can also give them gifts to increase their affection towards you.</li>
|
95 |
-
<li>Fight back when necessary, as some girls may attack you if they catch you. You can use items or skills to defend yourself or escape from them.</li>
|
96 |
-
<li>Be careful with your decisions, as they will have consequences on the story and the ending. You can also replay any chapter in free mode to see different outcomes.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>Why should you play Tag After School APK?</h2>
|
99 |
-
<p>If you are still wondering whether Tag After School APK is worth playing or not, here are some reasons why you should give it a try:</p>
|
100 |
-
<h3>The pros and cons of Tag After School APK</h3>
|
101 |
-
<p>Like any other game, Tag After School APK has its pros and cons. Here are some of them:</p>
|
102 |
-
<table style="border: 1px solid black;">
|
103 |
-
<tr style="border: 1px solid black;">
|
104 |
-
<th style="border: 1px solid black;">Pros</th><th style="border: 1px solid black;">Cons</th></tr>
|
105 |
-
<tr style="border: 1px solid black;">
|
106 |
-
<td style="border: 1px solid black;">It has a captivating and original story that will keep you hooked until the end.</td><td style="border: 1px solid black;">It has some mature and disturbing scenes that may not be suitable for everyone.</td></tr>
|
107 |
-
<tr style="border: 1px solid black;">
|
108 |
-
<td style="border: 1px solid black;">It has beautiful and realistic graphics that create a immersive atmosphere.</td><td style="border: 1px solid black;">It has a large file size that may take up a lot of storage space on your device.</td></tr>
|
109 |
-
<tr style="border: 1px solid black;">
|
110 |
-
<td style="border: 1px solid black;">It has a great sound design that adds to the tension and suspense.</td><td style="border: 1px solid black;">It has some bugs and glitches that may affect the gameplay and performance.</td></tr>
|
111 |
-
<tr style="border: 1px solid black;">
|
112 |
-
<td style="border: 1px solid black;">It has multiple endings and outcomes that depend on your choices and actions.</td><td style="border: 1px solid black;">It has some repetitive and tedious tasks that may bore you after a while.</td></tr>
|
113 |
-
<tr style="border: 1px solid black;">
|
114 |
-
<td style="border: 1px solid black;">It has a variety of items and clues that will help you in your quest.</td><td style="border: 1px solid black;">It has a limited inventory space that may force you to discard some items.</td></tr>
|
115 |
-
<tr style="border: 1px solid black;">
|
116 |
-
<td style="border: 1px solid black;">It has a fun and challenging gameplay that will test your skills and strategy.</td><td style="border: 1px solid black;">It has a difficult and unforgiving gameplay that may frustrate you at times.</td></tr>
|
117 |
-
</table>
|
118 |
-
<h3>The ratings and reviews of Tag After School APK</h3>
|
119 |
-
<p>Tag After School APK has received positive ratings and reviews from many players who have tried it. The game has an average rating of 4.5 out of 5 stars on ThaiAPK, based on more than 1000 votes. The game also has more than 500 comments from satisfied users who have praised the game for its story, graphics, sound, gameplay, and features. Here are some of the comments from the users:</p>
|
120 |
-
<blockquote>
|
121 |
-
<p>"This game is amazing! I love the story and the characters. It is so scary and exciting at the same time. I can't wait to see what happens next."</p>
|
122 |
-
<p>"This game is awesome! I love the graphics and the sound. It is so realistic and immersive. I feel like I am really in the haunted school."</p>
|
123 |
-
<p>"This game is fantastic! I love the gameplay and the features. It is so fun and challenging. I have to think carefully before making any decision."</p>
|
124 |
-
</blockquote>
|
125 |
-
<h3>The alternatives and similar games to Tag After School APK</h3>
|
126 |
-
<p>If you like Tag After School APK, you may also like these games that are similar to it in terms of genre, theme, or style:</p>
|
127 |
-
<ul>
|
128 |
-
<li>High School Simulator: This is a game developed by KUMA GAMES, a company that also creates anime-style games for Android devices. In this game, you will play as a high school student who can do whatever you want in a realistic school environment. You can interact with other students, teachers, or objects, as well as use weapons or vehicles. You can also customize your character and your school.</li>
|
129 |
-
<li>Horrorfield: This is a game developed by Skytec Games, Inc., a company that specializes in creating horror games for Android devices. In this game, you will play as either a survivor or a killer in a multiplayer mode. You will have to cooperate with other survivors or hunt them down as a killer in various maps. You can also upgrade your skills and equipment.</li>
|
130 |
-
<li>School Days: This is a game developed by MDickie, a company that produces simulation games for Android devices. In this game, you will play as a student who has to survive the drama and chaos of school life. You can interact with other students, teachers, or objects, as well as fight or romance them. You can also customize your character and your school.</li>
|
131 |
-
</ul>
|
132 |
-
<h2>Conclusion</h2>
|
133 |
-
<p>Tag After School APK is a horror school life simulation game that will give you a thrilling and exciting gaming experience. You will play as Shota-Kun, a high school student who gets trapped in a haunted school with four ghostly girls who are after him. You will have to make choices that will affect the story and the ending of each chapter. You will also have to interact with the girls by talking to them, giving them gifts, or fighting them. You will also have to use your skills and strategy to survive until dawn by hiding from them or escaping from them. The game has stunning graphics, great sound, multiple endings, and various features that make it more enjoyable and challenging. You can download and install Tag After School APK from a third-party source, as it is not available on the Google Play Store. You can also use some tips and tricks to have a better gaming experience with Tag After School APK. If you like this game, you may also like some alternatives and similar games that are also available for Android devices.</p>
|
134 |
-
<p>Tag After School APK is a game that will keep you on the edge of your seat and make you feel a range of emotions. It is a game that will make you laugh, cry, scream, and smile. It is a game that will make you think, feel, and act. It is a game that will make you love, hate, and fear. It is a game that will make you live a horror school life simulation.</p>
|
135 |
-
<h2>FAQs</h2>
|
136 |
-
<p>Here are some frequently asked questions about Tag After School APK:</p>
|
137 |
-
<ol>
|
138 |
-
<li>Q: Is Tag After School APK safe to download and install?</li>
|
139 |
-
<li>A: Yes, Tag After School APK is safe to download and install, as long as you use a reliable website that offers the original and virus-free file. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain malware or spyware that can harm your device or compromise your privacy.</li>
|
140 |
-
<li>Q: Is Tag After School APK free to play?</li>
|
141 |
-
<li>A: Yes, Tag After School APK is free to play, as it does not require any payment or subscription to access the full content of the game. However, the game may contain some ads or in-app purchases that can enhance your gaming experience or support the developers.</li>
|
142 |
-
<li>Q: How long does it take to finish Tag After School APK?</li>
|
143 |
-
<li>A: The length of Tag After School APK depends on your choices and actions, as well as the mode and the difficulty level you choose. However, on average, it may take you about 5 to 10 hours to complete the game.</li>
|
144 |
-
<li>Q: How many endings does Tag After School APK have?</li>
|
145 |
-
<li>A: Tag After School APK has multiple endings that vary depending on your choices and actions throughout the game. There are four main endings for each girl, as well as a true ending that reveals the whole truth behind the game of tag.</li>
|
146 |
-
<li>Q: How can I get the true ending of Tag After School APK?</li>
|
147 |
-
<li>A: To get the true ending of Tag After School APK, you need to complete all the chapters with all the girls and unlock all the achievements and gallery images. You also need to make the right choices that will lead you to the true ending.</li>
|
148 |
-
</ol></p> 401be4b1e0<br />
|
149 |
-
<br />
|
150 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_singlestep.py
DELETED
@@ -1,592 +0,0 @@
|
|
1 |
-
# Copyright 2022 TSAIL Team and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
|
16 |
-
|
17 |
-
import math
|
18 |
-
from typing import List, Optional, Tuple, Union
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import paddle
|
22 |
-
|
23 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
24 |
-
from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS
|
25 |
-
from .scheduling_utils import SchedulerMixin, SchedulerOutput
|
26 |
-
|
27 |
-
|
28 |
-
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
|
29 |
-
"""
|
30 |
-
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
31 |
-
(1-beta) over time from t = [0,1].
|
32 |
-
|
33 |
-
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
34 |
-
to that part of the diffusion process.
|
35 |
-
|
36 |
-
|
37 |
-
Args:
|
38 |
-
num_diffusion_timesteps (`int`): the number of betas to produce.
|
39 |
-
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
40 |
-
prevent singularities.
|
41 |
-
|
42 |
-
Returns:
|
43 |
-
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
44 |
-
"""
|
45 |
-
|
46 |
-
def alpha_bar(time_step):
|
47 |
-
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
|
48 |
-
|
49 |
-
betas = []
|
50 |
-
for i in range(num_diffusion_timesteps):
|
51 |
-
t1 = i / num_diffusion_timesteps
|
52 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
53 |
-
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
54 |
-
return paddle.to_tensor(betas, dtype=paddle.float32)
|
55 |
-
|
56 |
-
|
57 |
-
class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
|
58 |
-
"""
|
59 |
-
DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with
|
60 |
-
the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality
|
61 |
-
samples, and it can generate quite good samples even in only 10 steps.
|
62 |
-
|
63 |
-
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
|
64 |
-
|
65 |
-
Currently, we support the singlestep DPM-Solver for both noise prediction models and data prediction models. We
|
66 |
-
recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
|
67 |
-
|
68 |
-
We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
|
69 |
-
diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
|
70 |
-
thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as
|
71 |
-
stable-diffusion).
|
72 |
-
|
73 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
74 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
75 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
76 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
77 |
-
|
78 |
-
Args:
|
79 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
80 |
-
beta_start (`float`): the starting `beta` value of inference.
|
81 |
-
beta_end (`float`): the final `beta` value.
|
82 |
-
beta_schedule (`str`):
|
83 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
84 |
-
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
85 |
-
trained_betas (`np.ndarray`, optional):
|
86 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
87 |
-
solver_order (`int`, default `2`):
|
88 |
-
the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided
|
89 |
-
sampling, and `solver_order=3` for unconditional sampling.
|
90 |
-
prediction_type (`str`, default `epsilon`):
|
91 |
-
indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`,
|
92 |
-
or `v-prediction`.
|
93 |
-
thresholding (`bool`, default `False`):
|
94 |
-
whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
|
95 |
-
For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to
|
96 |
-
use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion
|
97 |
-
models (such as stable-diffusion).
|
98 |
-
dynamic_thresholding_ratio (`float`, default `0.995`):
|
99 |
-
the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
|
100 |
-
(https://arxiv.org/abs/2205.11487).
|
101 |
-
sample_max_value (`float`, default `1.0`):
|
102 |
-
the threshold value for dynamic thresholding. Valid only when `thresholding=True` and
|
103 |
-
`algorithm_type="dpmsolver++`.
|
104 |
-
algorithm_type (`str`, default `dpmsolver++`):
|
105 |
-
the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the
|
106 |
-
algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in
|
107 |
-
https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided
|
108 |
-
sampling (e.g. stable-diffusion).
|
109 |
-
solver_type (`str`, default `midpoint`):
|
110 |
-
the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects
|
111 |
-
the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are
|
112 |
-
slightly better, so we recommend to use the `midpoint` type.
|
113 |
-
lower_order_final (`bool`, default `True`):
|
114 |
-
whether to use lower-order solvers in the final steps. For singlestep schedulers, we recommend to enable
|
115 |
-
this to use up all the function evaluations.
|
116 |
-
|
117 |
-
"""
|
118 |
-
|
119 |
-
_compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
|
120 |
-
order = 1
|
121 |
-
|
122 |
-
@register_to_config
|
123 |
-
def __init__(
|
124 |
-
self,
|
125 |
-
num_train_timesteps: int = 1000,
|
126 |
-
beta_start: float = 0.0001,
|
127 |
-
beta_end: float = 0.02,
|
128 |
-
beta_schedule: str = "linear",
|
129 |
-
trained_betas: Optional[np.ndarray] = None,
|
130 |
-
solver_order: int = 2,
|
131 |
-
prediction_type: str = "epsilon",
|
132 |
-
thresholding: bool = False,
|
133 |
-
dynamic_thresholding_ratio: float = 0.995,
|
134 |
-
sample_max_value: float = 1.0,
|
135 |
-
algorithm_type: str = "dpmsolver++",
|
136 |
-
solver_type: str = "midpoint",
|
137 |
-
lower_order_final: bool = True,
|
138 |
-
):
|
139 |
-
if trained_betas is not None:
|
140 |
-
self.betas = paddle.to_tensor(trained_betas, dtype=paddle.float32)
|
141 |
-
elif beta_schedule == "linear":
|
142 |
-
self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype=paddle.float32)
|
143 |
-
elif beta_schedule == "scaled_linear":
|
144 |
-
# this schedule is very specific to the latent diffusion model.
|
145 |
-
self.betas = (
|
146 |
-
paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=paddle.float32) ** 2
|
147 |
-
)
|
148 |
-
elif beta_schedule == "squaredcos_cap_v2":
|
149 |
-
# Glide cosine schedule
|
150 |
-
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
151 |
-
else:
|
152 |
-
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
153 |
-
|
154 |
-
self.alphas = 1.0 - self.betas
|
155 |
-
self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
|
156 |
-
# Currently we only support VP-type noise schedule
|
157 |
-
self.alpha_t = paddle.sqrt(self.alphas_cumprod)
|
158 |
-
self.sigma_t = paddle.sqrt(1 - self.alphas_cumprod)
|
159 |
-
self.lambda_t = paddle.log(self.alpha_t) - paddle.log(self.sigma_t)
|
160 |
-
|
161 |
-
# standard deviation of the initial noise distribution
|
162 |
-
self.init_noise_sigma = 1.0
|
163 |
-
|
164 |
-
# settings for DPM-Solver
|
165 |
-
if algorithm_type not in ["dpmsolver", "dpmsolver++"]:
|
166 |
-
raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}")
|
167 |
-
if solver_type not in ["midpoint", "heun"]:
|
168 |
-
raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}")
|
169 |
-
|
170 |
-
# setable values
|
171 |
-
self.num_inference_steps = None
|
172 |
-
timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
|
173 |
-
self.timesteps = paddle.to_tensor(timesteps)
|
174 |
-
self.model_outputs = [None] * solver_order
|
175 |
-
self.sample = None
|
176 |
-
self.order_list = self.get_order_list(num_train_timesteps)
|
177 |
-
|
178 |
-
def get_order_list(self, num_inference_steps: int) -> List[int]:
|
179 |
-
"""
|
180 |
-
Computes the solver order at each time step.
|
181 |
-
|
182 |
-
Args:
|
183 |
-
num_inference_steps (`int`):
|
184 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
185 |
-
"""
|
186 |
-
steps = num_inference_steps
|
187 |
-
order = self.solver_order
|
188 |
-
if self.lower_order_final:
|
189 |
-
if order == 3:
|
190 |
-
if steps % 3 == 0:
|
191 |
-
orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1]
|
192 |
-
elif steps % 3 == 1:
|
193 |
-
orders = [1, 2, 3] * (steps // 3) + [1]
|
194 |
-
else:
|
195 |
-
orders = [1, 2, 3] * (steps // 3) + [1, 2]
|
196 |
-
elif order == 2:
|
197 |
-
if steps % 2 == 0:
|
198 |
-
orders = [1, 2] * (steps // 2)
|
199 |
-
else:
|
200 |
-
orders = [1, 2] * (steps // 2) + [1]
|
201 |
-
elif order == 1:
|
202 |
-
orders = [1] * steps
|
203 |
-
else:
|
204 |
-
if order == 3:
|
205 |
-
orders = [1, 2, 3] * (steps // 3)
|
206 |
-
elif order == 2:
|
207 |
-
orders = [1, 2] * (steps // 2)
|
208 |
-
elif order == 1:
|
209 |
-
orders = [1] * steps
|
210 |
-
return orders
|
211 |
-
|
212 |
-
def set_timesteps(self, num_inference_steps: int):
|
213 |
-
"""
|
214 |
-
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
|
215 |
-
|
216 |
-
Args:
|
217 |
-
num_inference_steps (`int`):
|
218 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
219 |
-
"""
|
220 |
-
self.num_inference_steps = num_inference_steps
|
221 |
-
timesteps = (
|
222 |
-
np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)
|
223 |
-
.round()[::-1][:-1]
|
224 |
-
.copy()
|
225 |
-
.astype(np.int64)
|
226 |
-
)
|
227 |
-
self.timesteps = paddle.to_tensor(timesteps)
|
228 |
-
self.model_outputs = [None] * self.config.solver_order
|
229 |
-
self.sample = None
|
230 |
-
self.orders = self.get_order_list(num_inference_steps)
|
231 |
-
|
232 |
-
def convert_model_output(self, model_output: paddle.Tensor, timestep: int, sample: paddle.Tensor) -> paddle.Tensor:
|
233 |
-
"""
|
234 |
-
Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs.
|
235 |
-
|
236 |
-
DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to
|
237 |
-
discretize an integral of the data prediction model. So we need to first convert the model output to the
|
238 |
-
corresponding type to match the algorithm.
|
239 |
-
|
240 |
-
Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or
|
241 |
-
DPM-Solver++ for both noise prediction model and data prediction model.
|
242 |
-
|
243 |
-
Args:
|
244 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
245 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
246 |
-
sample (`paddle.Tensor`):
|
247 |
-
current instance of sample being created by diffusion process.
|
248 |
-
|
249 |
-
Returns:
|
250 |
-
`paddle.Tensor`: the converted model output.
|
251 |
-
"""
|
252 |
-
# DPM-Solver++ needs to solve an integral of the data prediction model.
|
253 |
-
if self.config.algorithm_type == "dpmsolver++":
|
254 |
-
if self.config.prediction_type == "epsilon":
|
255 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
256 |
-
x0_pred = (sample - sigma_t * model_output) / alpha_t
|
257 |
-
elif self.config.prediction_type == "sample":
|
258 |
-
x0_pred = model_output
|
259 |
-
elif self.config.prediction_type == "v_prediction":
|
260 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
261 |
-
x0_pred = alpha_t * sample - sigma_t * model_output
|
262 |
-
else:
|
263 |
-
raise ValueError(
|
264 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
|
265 |
-
" `v_prediction` for the DPMSolverSinglestepScheduler."
|
266 |
-
)
|
267 |
-
|
268 |
-
if self.config.thresholding:
|
269 |
-
# Dynamic thresholding in https://arxiv.org/abs/2205.11487
|
270 |
-
dtype = x0_pred.dtype
|
271 |
-
dynamic_max_val = paddle.quantile(
|
272 |
-
paddle.abs(x0_pred).reshape((x0_pred.shape[0], -1)).cast("float32"),
|
273 |
-
self.config.dynamic_thresholding_ratio,
|
274 |
-
axis=1,
|
275 |
-
)
|
276 |
-
dynamic_max_val = paddle.maximum(
|
277 |
-
dynamic_max_val,
|
278 |
-
self.config.sample_max_value * paddle.ones_like(dynamic_max_val),
|
279 |
-
)[(...,) + (None,) * (x0_pred.ndim - 1)]
|
280 |
-
x0_pred = paddle.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val
|
281 |
-
x0_pred = x0_pred.cast(dtype)
|
282 |
-
return x0_pred
|
283 |
-
# DPM-Solver needs to solve an integral of the noise prediction model.
|
284 |
-
elif self.config.algorithm_type == "dpmsolver":
|
285 |
-
if self.config.prediction_type == "epsilon":
|
286 |
-
return model_output
|
287 |
-
elif self.config.prediction_type == "sample":
|
288 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
289 |
-
epsilon = (sample - alpha_t * model_output) / sigma_t
|
290 |
-
return epsilon
|
291 |
-
elif self.config.prediction_type == "v_prediction":
|
292 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
293 |
-
epsilon = alpha_t * model_output + sigma_t * sample
|
294 |
-
return epsilon
|
295 |
-
else:
|
296 |
-
raise ValueError(
|
297 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
|
298 |
-
" `v_prediction` for the DPMSolverSinglestepScheduler."
|
299 |
-
)
|
300 |
-
|
301 |
-
def dpm_solver_first_order_update(
|
302 |
-
self,
|
303 |
-
model_output: paddle.Tensor,
|
304 |
-
timestep: int,
|
305 |
-
prev_timestep: int,
|
306 |
-
sample: paddle.Tensor,
|
307 |
-
) -> paddle.Tensor:
|
308 |
-
"""
|
309 |
-
One step for the first-order DPM-Solver (equivalent to DDIM).
|
310 |
-
|
311 |
-
See https://arxiv.org/abs/2206.00927 for the detailed derivation.
|
312 |
-
|
313 |
-
Args:
|
314 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
315 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
316 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
317 |
-
sample (`paddle.Tensor`):
|
318 |
-
current instance of sample being created by diffusion process.
|
319 |
-
|
320 |
-
Returns:
|
321 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
322 |
-
"""
|
323 |
-
lambda_t, lambda_s = self.lambda_t[prev_timestep], self.lambda_t[timestep]
|
324 |
-
alpha_t, alpha_s = self.alpha_t[prev_timestep], self.alpha_t[timestep]
|
325 |
-
sigma_t, sigma_s = self.sigma_t[prev_timestep], self.sigma_t[timestep]
|
326 |
-
h = lambda_t - lambda_s
|
327 |
-
if self.config.algorithm_type == "dpmsolver++":
|
328 |
-
x_t = (sigma_t / sigma_s) * sample - (alpha_t * (paddle.exp(-h) - 1.0)) * model_output
|
329 |
-
elif self.config.algorithm_type == "dpmsolver":
|
330 |
-
x_t = (alpha_t / alpha_s) * sample - (sigma_t * (paddle.exp(h) - 1.0)) * model_output
|
331 |
-
return x_t
|
332 |
-
|
333 |
-
def singlestep_dpm_solver_second_order_update(
|
334 |
-
self,
|
335 |
-
model_output_list: List[paddle.Tensor],
|
336 |
-
timestep_list: List[int],
|
337 |
-
prev_timestep: int,
|
338 |
-
sample: paddle.Tensor,
|
339 |
-
) -> paddle.Tensor:
|
340 |
-
"""
|
341 |
-
One step for the second-order singlestep DPM-Solver.
|
342 |
-
|
343 |
-
It computes the solution at time `prev_timestep` from the time `timestep_list[-2]`.
|
344 |
-
|
345 |
-
Args:
|
346 |
-
model_output_list (`List[paddle.Tensor]`):
|
347 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
348 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
349 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
350 |
-
sample (`paddle.Tensor`):
|
351 |
-
current instance of sample being created by diffusion process.
|
352 |
-
|
353 |
-
Returns:
|
354 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
355 |
-
"""
|
356 |
-
t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
|
357 |
-
m0, m1 = model_output_list[-1], model_output_list[-2]
|
358 |
-
lambda_t, lambda_s0, lambda_s1 = self.lambda_t[t], self.lambda_t[s0], self.lambda_t[s1]
|
359 |
-
alpha_t, alpha_s1 = self.alpha_t[t], self.alpha_t[s1]
|
360 |
-
sigma_t, sigma_s1 = self.sigma_t[t], self.sigma_t[s1]
|
361 |
-
h, h_0 = lambda_t - lambda_s1, lambda_s0 - lambda_s1
|
362 |
-
r0 = h_0 / h
|
363 |
-
D0, D1 = m1, (1.0 / r0) * (m0 - m1)
|
364 |
-
if self.config.algorithm_type == "dpmsolver++":
|
365 |
-
# See https://arxiv.org/abs/2211.01095 for detailed derivations
|
366 |
-
if self.config.solver_type == "midpoint":
|
367 |
-
x_t = (
|
368 |
-
(sigma_t / sigma_s1) * sample
|
369 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
370 |
-
- 0.5 * (alpha_t * (paddle.exp(-h) - 1.0)) * D1
|
371 |
-
)
|
372 |
-
elif self.config.solver_type == "heun":
|
373 |
-
x_t = (
|
374 |
-
(sigma_t / sigma_s1) * sample
|
375 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
376 |
-
+ (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1
|
377 |
-
)
|
378 |
-
elif self.config.algorithm_type == "dpmsolver":
|
379 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
380 |
-
if self.config.solver_type == "midpoint":
|
381 |
-
x_t = (
|
382 |
-
(alpha_t / alpha_s1) * sample
|
383 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
384 |
-
- 0.5 * (sigma_t * (paddle.exp(h) - 1.0)) * D1
|
385 |
-
)
|
386 |
-
elif self.config.solver_type == "heun":
|
387 |
-
x_t = (
|
388 |
-
(alpha_t / alpha_s1) * sample
|
389 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
390 |
-
- (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1
|
391 |
-
)
|
392 |
-
return x_t
|
393 |
-
|
394 |
-
def singlestep_dpm_solver_third_order_update(
|
395 |
-
self,
|
396 |
-
model_output_list: List[paddle.Tensor],
|
397 |
-
timestep_list: List[int],
|
398 |
-
prev_timestep: int,
|
399 |
-
sample: paddle.Tensor,
|
400 |
-
) -> paddle.Tensor:
|
401 |
-
"""
|
402 |
-
One step for the third-order singlestep DPM-Solver.
|
403 |
-
|
404 |
-
It computes the solution at time `prev_timestep` from the time `timestep_list[-3]`.
|
405 |
-
|
406 |
-
Args:
|
407 |
-
model_output_list (`List[paddle.Tensor]`):
|
408 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
409 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
410 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
411 |
-
sample (`paddle.Tensor`):
|
412 |
-
current instance of sample being created by diffusion process.
|
413 |
-
|
414 |
-
Returns:
|
415 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
416 |
-
"""
|
417 |
-
t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
|
418 |
-
m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
|
419 |
-
lambda_t, lambda_s0, lambda_s1, lambda_s2 = (
|
420 |
-
self.lambda_t[t],
|
421 |
-
self.lambda_t[s0],
|
422 |
-
self.lambda_t[s1],
|
423 |
-
self.lambda_t[s2],
|
424 |
-
)
|
425 |
-
alpha_t, alpha_s2 = self.alpha_t[t], self.alpha_t[s2]
|
426 |
-
sigma_t, sigma_s2 = self.sigma_t[t], self.sigma_t[s2]
|
427 |
-
h, h_0, h_1 = lambda_t - lambda_s2, lambda_s0 - lambda_s2, lambda_s1 - lambda_s2
|
428 |
-
r0, r1 = h_0 / h, h_1 / h
|
429 |
-
D0 = m2
|
430 |
-
D1_0, D1_1 = (1.0 / r1) * (m1 - m2), (1.0 / r0) * (m0 - m2)
|
431 |
-
D1 = (r0 * D1_0 - r1 * D1_1) / (r0 - r1)
|
432 |
-
D2 = 2.0 * (D1_1 - D1_0) / (r0 - r1)
|
433 |
-
if self.config.algorithm_type == "dpmsolver++":
|
434 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
435 |
-
if self.config.solver_type == "midpoint":
|
436 |
-
x_t = (
|
437 |
-
(sigma_t / sigma_s2) * sample
|
438 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
439 |
-
+ (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1_1
|
440 |
-
)
|
441 |
-
elif self.config.solver_type == "heun":
|
442 |
-
x_t = (
|
443 |
-
(sigma_t / sigma_s2) * sample
|
444 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
445 |
-
+ (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1
|
446 |
-
- (alpha_t * ((paddle.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
|
447 |
-
)
|
448 |
-
elif self.config.algorithm_type == "dpmsolver":
|
449 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
450 |
-
if self.config.solver_type == "midpoint":
|
451 |
-
x_t = (
|
452 |
-
(alpha_t / alpha_s2) * sample
|
453 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
454 |
-
- (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1_1
|
455 |
-
)
|
456 |
-
elif self.config.solver_type == "heun":
|
457 |
-
x_t = (
|
458 |
-
(alpha_t / alpha_s2) * sample
|
459 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
460 |
-
- (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1
|
461 |
-
- (sigma_t * ((paddle.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
|
462 |
-
)
|
463 |
-
return x_t
|
464 |
-
|
465 |
-
def singlestep_dpm_solver_update(
|
466 |
-
self,
|
467 |
-
model_output_list: List[paddle.Tensor],
|
468 |
-
timestep_list: List[int],
|
469 |
-
prev_timestep: int,
|
470 |
-
sample: paddle.Tensor,
|
471 |
-
order: int,
|
472 |
-
) -> paddle.Tensor:
|
473 |
-
"""
|
474 |
-
One step for the singlestep DPM-Solver.
|
475 |
-
|
476 |
-
Args:
|
477 |
-
model_output_list (`List[paddle.Tensor]`):
|
478 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
479 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
480 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
481 |
-
sample (`paddle.Tensor`):
|
482 |
-
current instance of sample being created by diffusion process.
|
483 |
-
order (`int`):
|
484 |
-
the solver order at this step.
|
485 |
-
|
486 |
-
Returns:
|
487 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
488 |
-
"""
|
489 |
-
if order == 1:
|
490 |
-
return self.dpm_solver_first_order_update(model_output_list[-1], timestep_list[-1], prev_timestep, sample)
|
491 |
-
elif order == 2:
|
492 |
-
return self.singlestep_dpm_solver_second_order_update(
|
493 |
-
model_output_list, timestep_list, prev_timestep, sample
|
494 |
-
)
|
495 |
-
elif order == 3:
|
496 |
-
return self.singlestep_dpm_solver_third_order_update(
|
497 |
-
model_output_list, timestep_list, prev_timestep, sample
|
498 |
-
)
|
499 |
-
else:
|
500 |
-
raise ValueError(f"Order must be 1, 2, 3, got {order}")
|
501 |
-
|
502 |
-
def step(
|
503 |
-
self,
|
504 |
-
model_output: paddle.Tensor,
|
505 |
-
timestep: int,
|
506 |
-
sample: paddle.Tensor,
|
507 |
-
return_dict: bool = True,
|
508 |
-
) -> Union[SchedulerOutput, Tuple]:
|
509 |
-
"""
|
510 |
-
Step function propagating the sample with the singlestep DPM-Solver.
|
511 |
-
|
512 |
-
Args:
|
513 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
514 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
515 |
-
sample (`paddle.Tensor`):
|
516 |
-
current instance of sample being created by diffusion process.
|
517 |
-
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
|
518 |
-
|
519 |
-
Returns:
|
520 |
-
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
|
521 |
-
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
522 |
-
|
523 |
-
"""
|
524 |
-
if self.num_inference_steps is None:
|
525 |
-
raise ValueError(
|
526 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
527 |
-
)
|
528 |
-
|
529 |
-
step_index = (self.timesteps == timestep).nonzero()
|
530 |
-
if len(step_index) == 0:
|
531 |
-
step_index = len(self.timesteps) - 1
|
532 |
-
else:
|
533 |
-
step_index = step_index.item()
|
534 |
-
prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
|
535 |
-
|
536 |
-
model_output = self.convert_model_output(model_output, timestep, sample)
|
537 |
-
for i in range(self.config.solver_order - 1):
|
538 |
-
self.model_outputs[i] = self.model_outputs[i + 1]
|
539 |
-
self.model_outputs[-1] = model_output
|
540 |
-
|
541 |
-
order = self.order_list[step_index]
|
542 |
-
# For single-step solvers, we use the initial value at each time with order = 1.
|
543 |
-
if order == 1:
|
544 |
-
self.sample = sample
|
545 |
-
|
546 |
-
timestep_list = [self.timesteps[step_index - i] for i in range(order - 1, 0, -1)] + [timestep]
|
547 |
-
prev_sample = self.singlestep_dpm_solver_update(
|
548 |
-
self.model_outputs, timestep_list, prev_timestep, self.sample, order
|
549 |
-
)
|
550 |
-
|
551 |
-
if not return_dict:
|
552 |
-
return (prev_sample,)
|
553 |
-
|
554 |
-
return SchedulerOutput(prev_sample=prev_sample)
|
555 |
-
|
556 |
-
def scale_model_input(self, sample: paddle.Tensor, *args, **kwargs) -> paddle.Tensor:
|
557 |
-
"""
|
558 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
559 |
-
current timestep.
|
560 |
-
|
561 |
-
Args:
|
562 |
-
sample (`paddle.Tensor`): input sample
|
563 |
-
|
564 |
-
Returns:
|
565 |
-
`paddle.Tensor`: scaled input sample
|
566 |
-
"""
|
567 |
-
return sample
|
568 |
-
|
569 |
-
def add_noise(
|
570 |
-
self,
|
571 |
-
original_samples: paddle.Tensor,
|
572 |
-
noise: paddle.Tensor,
|
573 |
-
timesteps: paddle.Tensor,
|
574 |
-
) -> paddle.Tensor:
|
575 |
-
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
576 |
-
self.alphas_cumprod = self.alphas_cumprod.cast(original_samples.dtype)
|
577 |
-
|
578 |
-
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
|
579 |
-
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
580 |
-
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
581 |
-
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
582 |
-
|
583 |
-
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
|
584 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
585 |
-
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
586 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
587 |
-
|
588 |
-
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
589 |
-
return noisy_samples
|
590 |
-
|
591 |
-
def __len__(self):
|
592 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/speaker_encoder/config.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
librispeech_datasets = {
|
2 |
-
"train": {
|
3 |
-
"clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"],
|
4 |
-
"other": ["LibriSpeech/train-other-500"]
|
5 |
-
},
|
6 |
-
"test": {
|
7 |
-
"clean": ["LibriSpeech/test-clean"],
|
8 |
-
"other": ["LibriSpeech/test-other"]
|
9 |
-
},
|
10 |
-
"dev": {
|
11 |
-
"clean": ["LibriSpeech/dev-clean"],
|
12 |
-
"other": ["LibriSpeech/dev-other"]
|
13 |
-
},
|
14 |
-
}
|
15 |
-
libritts_datasets = {
|
16 |
-
"train": {
|
17 |
-
"clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"],
|
18 |
-
"other": ["LibriTTS/train-other-500"]
|
19 |
-
},
|
20 |
-
"test": {
|
21 |
-
"clean": ["LibriTTS/test-clean"],
|
22 |
-
"other": ["LibriTTS/test-other"]
|
23 |
-
},
|
24 |
-
"dev": {
|
25 |
-
"clean": ["LibriTTS/dev-clean"],
|
26 |
-
"other": ["LibriTTS/dev-other"]
|
27 |
-
},
|
28 |
-
}
|
29 |
-
voxceleb_datasets = {
|
30 |
-
"voxceleb1" : {
|
31 |
-
"train": ["VoxCeleb1/wav"],
|
32 |
-
"test": ["VoxCeleb1/test_wav"]
|
33 |
-
},
|
34 |
-
"voxceleb2" : {
|
35 |
-
"train": ["VoxCeleb2/dev/aac"],
|
36 |
-
"test": ["VoxCeleb2/test_wav"]
|
37 |
-
}
|
38 |
-
}
|
39 |
-
|
40 |
-
other_datasets = [
|
41 |
-
"LJSpeech-1.1",
|
42 |
-
"VCTK-Corpus/wav48",
|
43 |
-
]
|
44 |
-
|
45 |
-
anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/agents/tools/smart_domain/persistent_layer_code_tool.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
from langchain import LLMChain, PromptTemplate
|
2 |
-
from langchain.agents import tool
|
3 |
-
|
4 |
-
from models import llm
|
5 |
-
from agents.tools.smart_domain.common import getPrefix
|
6 |
-
from agents.tools.smart_domain.db_entity_repository import db_entity_architecture, db_entity_test_strategy
|
7 |
-
from agents.tools.smart_domain.association_impl import association_impl_architecture, association_impl_test_strategy
|
8 |
-
|
9 |
-
|
10 |
-
persistent_task = """"Your task is to generate the persistent layer tests and product code."""
|
11 |
-
persistent_tech_stack = """Java17、reactor、lombok、Junit5、reactor test、Mockito、 Spring Data Reactive Couchbase、Testcontainers、Couchbase、WebClient"""
|
12 |
-
persistent_architecture = f"""the persistent layer inclue 3 componets:
|
13 |
-
{db_entity_architecture}
|
14 |
-
{association_impl_architecture}"""
|
15 |
-
|
16 |
-
persistent_test_strategy = f"""{db_entity_test_strategy}
|
17 |
-
{association_impl_test_strategy}"""
|
18 |
-
|
19 |
-
PERSISTENT_LAYER = getPrefix(persistent_task, persistent_tech_stack, persistent_architecture, persistent_test_strategy) + """
|
20 |
-
|
21 |
-
Use the following format:
|
22 |
-
request: the request that you need to fulfill include Entity and Association of domain layer
|
23 |
-
|
24 |
-
DBEntity:
|
25 |
-
```
|
26 |
-
the DBEntity code that you write to fulfill the request, follow TechStack and Architecture
|
27 |
-
```
|
28 |
-
|
29 |
-
Repository:
|
30 |
-
```
|
31 |
-
the Repository code that you write to fulfill the request, follow TechStack and Architecture
|
32 |
-
```
|
33 |
-
|
34 |
-
Association Impletation:
|
35 |
-
```
|
36 |
-
the Association Impletation code that you write to fulfill the request, follow TechStack and Architecture
|
37 |
-
```
|
38 |
-
|
39 |
-
Test:
|
40 |
-
```
|
41 |
-
the test code that you write to fulfill the request, follow TechStack Architecture and TestStrategy
|
42 |
-
```
|
43 |
-
|
44 |
-
request: {input}"""
|
45 |
-
|
46 |
-
PERSISTENT_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=PERSISTENT_LAYER,)
|
47 |
-
|
48 |
-
persistentChain = LLMChain(llm = llm(temperature=0.1), prompt=PERSISTENT_LAYER_PROMPT)
|
49 |
-
|
50 |
-
|
51 |
-
@tool("Generate Persistent Layer Code", return_direct=True)
|
52 |
-
def persistentLayerCodeGenerator(input: str) -> str:
|
53 |
-
'''useful for when you need to generate persistent layer code'''
|
54 |
-
response = persistentChain.run(input)
|
55 |
-
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AE-NV/sentiment-productreview/app.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
alias = "Sentiment Analysis on product reviews"
|
3 |
-
description = "Add a product review you can find on the internet. The model is trained on multiple languages so you can also test for that!"
|
4 |
-
name = "models/nlptown/bert-base-multilingual-uncased-sentiment"
|
5 |
-
examples = [
|
6 |
-
['''We vinden het aanbod heel lekker maar ...
|
7 |
-
We vinden het aanbod heel lekker.
|
8 |
-
Wat we wel heel erg spijtig vinden dat is dat er bij zoveel gerechten nog eens een supplement wordt gevraagd.
|
9 |
-
Jullie prijzen stijgen al regelmatig!
|
10 |
-
Jullie geven ook wel cadeaus maar nooit voor de gebruikers. Geef ons ook eens af en toe een bonus i.p.v. te proberen méér klanten te krijgen!
|
11 |
-
' '''],
|
12 |
-
['''Slechte kwaliteit
|
13 |
-
De maaltijden zijn veel te Nederlands getint, groenten zijn niet vers als ze geleverd worden, vlees is van slechte en goedkope kwaliteit, broodjes die bijgeleverd worden zijn niet lekker.. structuur van een spons..
|
14 |
-
Ik hoop dat ik zonder probleem het contract kan stopzetten…'''
|
15 |
-
],
|
16 |
-
]
|
17 |
-
gr.Interface.load(name=name,
|
18 |
-
alias=alias,
|
19 |
-
description=description,
|
20 |
-
examples=examples).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/model_cards/MUSICGEN_MODEL_CARD.md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
# MusicGen Model Card
|
2 |
-
|
3 |
-
## Model details
|
4 |
-
|
5 |
-
**Organization developing the model:** The FAIR team of Meta AI.
|
6 |
-
|
7 |
-
**Model date:** MusicGen was trained between April 2023 and May 2023.
|
8 |
-
|
9 |
-
**Model version:** This is the version 1 of the model.
|
10 |
-
|
11 |
-
**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation.
|
12 |
-
|
13 |
-
**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv].
|
14 |
-
|
15 |
-
**Citation details:** See [our paper][arxiv]
|
16 |
-
|
17 |
-
**License:** Code is released under MIT, model weights are released under CC-BY-NC 4.0.
|
18 |
-
|
19 |
-
**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [GitHub repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue.
|
20 |
-
|
21 |
-
## Intended use
|
22 |
-
**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including:
|
23 |
-
|
24 |
-
- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science
|
25 |
-
- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs
|
26 |
-
|
27 |
-
**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models.
|
28 |
-
|
29 |
-
**Out-of-scope use cases:** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
|
30 |
-
|
31 |
-
## Metrics
|
32 |
-
|
33 |
-
**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark:
|
34 |
-
|
35 |
-
- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish)
|
36 |
-
- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST)
|
37 |
-
- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model
|
38 |
-
|
39 |
-
Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes:
|
40 |
-
|
41 |
-
- Overall quality of the music samples;
|
42 |
-
- Text relevance to the provided text input;
|
43 |
-
- Adherence to the melody for melody-guided music generation.
|
44 |
-
|
45 |
-
More details on performance measures and human studies can be found in the paper.
|
46 |
-
|
47 |
-
**Decision thresholds:** Not applicable.
|
48 |
-
|
49 |
-
## Evaluation datasets
|
50 |
-
|
51 |
-
The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set.
|
52 |
-
|
53 |
-
## Training datasets
|
54 |
-
|
55 |
-
The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing.
|
56 |
-
|
57 |
-
## Evaluation results
|
58 |
-
|
59 |
-
Below are the objective metrics obtained on MusicCaps with the released model. Note that for the publicly released models, we had all the datasets go through a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs), in order to keep only the instrumental part. This explains the difference in objective metrics with the models used in the paper.
|
60 |
-
|
61 |
-
| Model | Frechet Audio Distance | KLD | Text Consistency | Chroma Cosine Similarity |
|
62 |
-
|---|---|---|---|---|
|
63 |
-
| facebook/musicgen-small | 4.88 | 1.28 | 0.27 | - |
|
64 |
-
| facebook/musicgen-medium | 5.14 | 1.24 | 0.28 | - |
|
65 |
-
| facebook/musicgen-large | 5.48 | 1.22 | 0.28 | - |
|
66 |
-
| facebook/musicgen-melody | 4.93 | 1.26 | 0.27 | 0.44 |
|
67 |
-
|
68 |
-
More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Results section.
|
69 |
-
|
70 |
-
## Limitations and biases
|
71 |
-
|
72 |
-
**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model.
|
73 |
-
|
74 |
-
**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs).
|
75 |
-
|
76 |
-
**Limitations:**
|
77 |
-
|
78 |
-
- The model is not able to generate realistic vocals.
|
79 |
-
- The model has been trained with English descriptions and will not perform as well in other languages.
|
80 |
-
- The model does not perform equally well for all music styles and cultures.
|
81 |
-
- The model sometimes generates end of songs, collapsing to silence.
|
82 |
-
- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results.
|
83 |
-
|
84 |
-
**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive.
|
85 |
-
|
86 |
-
**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data.
|
87 |
-
|
88 |
-
**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks.
|
89 |
-
|
90 |
-
[arxiv]: https://arxiv.org/abs/2306.05284
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer_emotion.py
DELETED
@@ -1,352 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
os.environ["OMP_NUM_THREADS"] = "1"
|
4 |
-
import torch
|
5 |
-
from collections import Counter
|
6 |
-
from utils.text_encoder import TokenTextEncoder
|
7 |
-
from data_gen.tts.emotion import inference as EmotionEncoder
|
8 |
-
from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance
|
9 |
-
from data_gen.tts.emotion.inference import preprocess_wav
|
10 |
-
from utils.multiprocess_utils import chunked_multiprocess_run
|
11 |
-
import random
|
12 |
-
import traceback
|
13 |
-
import json
|
14 |
-
from resemblyzer import VoiceEncoder
|
15 |
-
from tqdm import tqdm
|
16 |
-
from data_gen.tts.data_gen_utils import get_mel2ph, get_pitch, build_phone_encoder, is_sil_phoneme
|
17 |
-
from utils.hparams import hparams, set_hparams
|
18 |
-
import numpy as np
|
19 |
-
from utils.indexed_datasets import IndexedDatasetBuilder
|
20 |
-
from vocoders.base_vocoder import get_vocoder_cls
|
21 |
-
import pandas as pd
|
22 |
-
|
23 |
-
|
24 |
-
class BinarizationError(Exception):
|
25 |
-
pass
|
26 |
-
|
27 |
-
|
28 |
-
class EmotionBinarizer:
|
29 |
-
def __init__(self, processed_data_dir=None):
|
30 |
-
if processed_data_dir is None:
|
31 |
-
processed_data_dir = hparams['processed_data_dir']
|
32 |
-
self.processed_data_dirs = processed_data_dir.split(",")
|
33 |
-
self.binarization_args = hparams['binarization_args']
|
34 |
-
self.pre_align_args = hparams['pre_align_args']
|
35 |
-
self.item2txt = {}
|
36 |
-
self.item2ph = {}
|
37 |
-
self.item2wavfn = {}
|
38 |
-
self.item2tgfn = {}
|
39 |
-
self.item2spk = {}
|
40 |
-
self.item2emo = {}
|
41 |
-
|
42 |
-
def load_meta_data(self):
|
43 |
-
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
|
44 |
-
self.meta_df = pd.read_csv(f"{processed_data_dir}/metadata_phone.csv", dtype=str)
|
45 |
-
for r_idx, r in tqdm(self.meta_df.iterrows(), desc='Loading meta data.'):
|
46 |
-
item_name = raw_item_name = r['item_name']
|
47 |
-
if len(self.processed_data_dirs) > 1:
|
48 |
-
item_name = f'ds{ds_id}_{item_name}'
|
49 |
-
self.item2txt[item_name] = r['txt']
|
50 |
-
self.item2ph[item_name] = r['ph']
|
51 |
-
self.item2wavfn[item_name] = r['wav_fn']
|
52 |
-
self.item2spk[item_name] = r.get('spk_name', 'SPK1') \
|
53 |
-
if self.binarization_args['with_spk_id'] else 'SPK1'
|
54 |
-
if len(self.processed_data_dirs) > 1:
|
55 |
-
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
|
56 |
-
self.item2tgfn[item_name] = f"{processed_data_dir}/mfa_outputs/{raw_item_name}.TextGrid"
|
57 |
-
self.item2emo[item_name] = r.get('others', '"Neutral"')
|
58 |
-
self.item_names = sorted(list(self.item2txt.keys()))
|
59 |
-
if self.binarization_args['shuffle']:
|
60 |
-
random.seed(1234)
|
61 |
-
random.shuffle(self.item_names)
|
62 |
-
|
63 |
-
@property
|
64 |
-
def train_item_names(self):
|
65 |
-
return self.item_names[hparams['test_num']:]
|
66 |
-
|
67 |
-
@property
|
68 |
-
def valid_item_names(self):
|
69 |
-
return self.item_names[:hparams['test_num']]
|
70 |
-
|
71 |
-
@property
|
72 |
-
def test_item_names(self):
|
73 |
-
return self.valid_item_names
|
74 |
-
|
75 |
-
def build_spk_map(self):
|
76 |
-
spk_map = set()
|
77 |
-
for item_name in self.item_names:
|
78 |
-
spk_name = self.item2spk[item_name]
|
79 |
-
spk_map.add(spk_name)
|
80 |
-
spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))}
|
81 |
-
print("| #Spk: ", len(spk_map))
|
82 |
-
assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
|
83 |
-
return spk_map
|
84 |
-
|
85 |
-
def build_emo_map(self):
|
86 |
-
emo_map = set()
|
87 |
-
for item_name in self.item_names:
|
88 |
-
emo_name = self.item2emo[item_name]
|
89 |
-
emo_map.add(emo_name)
|
90 |
-
emo_map = {x: i for i, x in enumerate(sorted(list(emo_map)))}
|
91 |
-
print("| #Emo: ", len(emo_map))
|
92 |
-
return emo_map
|
93 |
-
|
94 |
-
def item_name2spk_id(self, item_name):
|
95 |
-
return self.spk_map[self.item2spk[item_name]]
|
96 |
-
|
97 |
-
def item_name2emo_id(self, item_name):
|
98 |
-
return self.emo_map[self.item2emo[item_name]]
|
99 |
-
|
100 |
-
def _phone_encoder(self):
|
101 |
-
ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
|
102 |
-
ph_set = []
|
103 |
-
if self.binarization_args['reset_phone_dict'] or not os.path.exists(ph_set_fn):
|
104 |
-
for ph_sent in self.item2ph.values():
|
105 |
-
ph_set += ph_sent.split(' ')
|
106 |
-
ph_set = sorted(set(ph_set))
|
107 |
-
json.dump(ph_set, open(ph_set_fn, 'w'))
|
108 |
-
print("| Build phone set: ", ph_set)
|
109 |
-
else:
|
110 |
-
ph_set = json.load(open(ph_set_fn, 'r'))
|
111 |
-
print("| Load phone set: ", ph_set)
|
112 |
-
return build_phone_encoder(hparams['binary_data_dir'])
|
113 |
-
|
114 |
-
def _word_encoder(self):
|
115 |
-
fn = f"{hparams['binary_data_dir']}/word_set.json"
|
116 |
-
word_set = []
|
117 |
-
if self.binarization_args['reset_word_dict']:
|
118 |
-
for word_sent in self.item2txt.values():
|
119 |
-
word_set += [x for x in word_sent.split(' ') if x != '']
|
120 |
-
word_set = Counter(word_set)
|
121 |
-
total_words = sum(word_set.values())
|
122 |
-
word_set = word_set.most_common(hparams['word_size'])
|
123 |
-
num_unk_words = total_words - sum([x[1] for x in word_set])
|
124 |
-
word_set = [x[0] for x in word_set]
|
125 |
-
json.dump(word_set, open(fn, 'w'))
|
126 |
-
print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words},"
|
127 |
-
f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.")
|
128 |
-
else:
|
129 |
-
word_set = json.load(open(fn, 'r'))
|
130 |
-
print("| Load word set. Size: ", len(word_set), word_set[:10])
|
131 |
-
return TokenTextEncoder(None, vocab_list=word_set, replace_oov='<UNK>')
|
132 |
-
|
133 |
-
def meta_data(self, prefix):
|
134 |
-
if prefix == 'valid':
|
135 |
-
item_names = self.valid_item_names
|
136 |
-
elif prefix == 'test':
|
137 |
-
item_names = self.test_item_names
|
138 |
-
else:
|
139 |
-
item_names = self.train_item_names
|
140 |
-
for item_name in item_names:
|
141 |
-
ph = self.item2ph[item_name]
|
142 |
-
txt = self.item2txt[item_name]
|
143 |
-
tg_fn = self.item2tgfn.get(item_name)
|
144 |
-
wav_fn = self.item2wavfn[item_name]
|
145 |
-
spk_id = self.item_name2spk_id(item_name)
|
146 |
-
emotion = self.item_name2emo_id(item_name)
|
147 |
-
yield item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion
|
148 |
-
|
149 |
-
def process(self):
|
150 |
-
self.load_meta_data()
|
151 |
-
os.makedirs(hparams['binary_data_dir'], exist_ok=True)
|
152 |
-
self.spk_map = self.build_spk_map()
|
153 |
-
print("| spk_map: ", self.spk_map)
|
154 |
-
spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
|
155 |
-
json.dump(self.spk_map, open(spk_map_fn, 'w'))
|
156 |
-
|
157 |
-
self.emo_map = self.build_emo_map()
|
158 |
-
print("| emo_map: ", self.emo_map)
|
159 |
-
emo_map_fn = f"{hparams['binary_data_dir']}/emo_map.json"
|
160 |
-
json.dump(self.emo_map, open(emo_map_fn, 'w'))
|
161 |
-
|
162 |
-
self.phone_encoder = self._phone_encoder()
|
163 |
-
self.word_encoder = None
|
164 |
-
EmotionEncoder.load_model(hparams['emotion_encoder_path'])
|
165 |
-
|
166 |
-
if self.binarization_args['with_word']:
|
167 |
-
self.word_encoder = self._word_encoder()
|
168 |
-
self.process_data('valid')
|
169 |
-
self.process_data('test')
|
170 |
-
self.process_data('train')
|
171 |
-
|
172 |
-
def process_data(self, prefix):
|
173 |
-
data_dir = hparams['binary_data_dir']
|
174 |
-
args = []
|
175 |
-
builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}')
|
176 |
-
ph_lengths = []
|
177 |
-
mel_lengths = []
|
178 |
-
f0s = []
|
179 |
-
total_sec = 0
|
180 |
-
if self.binarization_args['with_spk_embed']:
|
181 |
-
voice_encoder = VoiceEncoder().cuda()
|
182 |
-
|
183 |
-
meta_data = list(self.meta_data(prefix))
|
184 |
-
for m in meta_data:
|
185 |
-
args.append(list(m) + [(self.phone_encoder, self.word_encoder), self.binarization_args])
|
186 |
-
num_workers = self.num_workers
|
187 |
-
for f_id, (_, item) in enumerate(
|
188 |
-
zip(tqdm(meta_data), chunked_multiprocess_run(self.process_item, args, num_workers=num_workers))):
|
189 |
-
if item is None:
|
190 |
-
continue
|
191 |
-
item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \
|
192 |
-
if self.binarization_args['with_spk_embed'] else None
|
193 |
-
processed_wav = preprocess_wav(item['wav_fn'])
|
194 |
-
item['emo_embed'] = Embed_utterance(processed_wav)
|
195 |
-
if not self.binarization_args['with_wav'] and 'wav' in item:
|
196 |
-
del item['wav']
|
197 |
-
builder.add_item(item)
|
198 |
-
mel_lengths.append(item['len'])
|
199 |
-
if 'ph_len' in item:
|
200 |
-
ph_lengths.append(item['ph_len'])
|
201 |
-
total_sec += item['sec']
|
202 |
-
if item.get('f0') is not None:
|
203 |
-
f0s.append(item['f0'])
|
204 |
-
builder.finalize()
|
205 |
-
np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths)
|
206 |
-
if len(ph_lengths) > 0:
|
207 |
-
np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths)
|
208 |
-
if len(f0s) > 0:
|
209 |
-
f0s = np.concatenate(f0s, 0)
|
210 |
-
f0s = f0s[f0s != 0]
|
211 |
-
np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()])
|
212 |
-
print(f"| {prefix} total duration: {total_sec:.3f}s")
|
213 |
-
|
214 |
-
@classmethod
|
215 |
-
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion, encoder, binarization_args):
|
216 |
-
res = {'item_name': item_name, 'txt': txt, 'ph': ph, 'wav_fn': wav_fn, 'spk_id': spk_id, 'emotion': emotion}
|
217 |
-
if binarization_args['with_linear']:
|
218 |
-
wav, mel, linear_stft = get_vocoder_cls(hparams).wav2spec(wav_fn) # , return_linear=True
|
219 |
-
res['linear'] = linear_stft
|
220 |
-
else:
|
221 |
-
wav, mel = get_vocoder_cls(hparams).wav2spec(wav_fn)
|
222 |
-
wav = wav.astype(np.float16)
|
223 |
-
res.update({'mel': mel, 'wav': wav,
|
224 |
-
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]})
|
225 |
-
try:
|
226 |
-
if binarization_args['with_f0']:
|
227 |
-
cls.get_pitch(res)
|
228 |
-
if binarization_args['with_f0cwt']:
|
229 |
-
cls.get_f0cwt(res)
|
230 |
-
if binarization_args['with_txt']:
|
231 |
-
ph_encoder, word_encoder = encoder
|
232 |
-
try:
|
233 |
-
res['phone'] = ph_encoder.encode(ph)
|
234 |
-
res['ph_len'] = len(res['phone'])
|
235 |
-
except:
|
236 |
-
traceback.print_exc()
|
237 |
-
raise BinarizationError(f"Empty phoneme")
|
238 |
-
if binarization_args['with_align']:
|
239 |
-
cls.get_align(tg_fn, res)
|
240 |
-
if binarization_args['trim_eos_bos']:
|
241 |
-
bos_dur = res['dur'][0]
|
242 |
-
eos_dur = res['dur'][-1]
|
243 |
-
res['mel'] = mel[bos_dur:-eos_dur]
|
244 |
-
res['f0'] = res['f0'][bos_dur:-eos_dur]
|
245 |
-
res['pitch'] = res['pitch'][bos_dur:-eos_dur]
|
246 |
-
res['mel2ph'] = res['mel2ph'][bos_dur:-eos_dur]
|
247 |
-
res['wav'] = wav[bos_dur * hparams['hop_size']:-eos_dur * hparams['hop_size']]
|
248 |
-
res['dur'] = res['dur'][1:-1]
|
249 |
-
res['len'] = res['mel'].shape[0]
|
250 |
-
if binarization_args['with_word']:
|
251 |
-
cls.get_word(res, word_encoder)
|
252 |
-
except BinarizationError as e:
|
253 |
-
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
|
254 |
-
return None
|
255 |
-
except Exception as e:
|
256 |
-
traceback.print_exc()
|
257 |
-
print(f"| Skip item. item_name: {item_name}, wav_fn: {wav_fn}")
|
258 |
-
return None
|
259 |
-
return res
|
260 |
-
|
261 |
-
@staticmethod
|
262 |
-
def get_align(tg_fn, res):
|
263 |
-
ph = res['ph']
|
264 |
-
mel = res['mel']
|
265 |
-
phone_encoded = res['phone']
|
266 |
-
if tg_fn is not None and os.path.exists(tg_fn):
|
267 |
-
mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams)
|
268 |
-
else:
|
269 |
-
raise BinarizationError(f"Align not found")
|
270 |
-
if mel2ph.max() - 1 >= len(phone_encoded):
|
271 |
-
raise BinarizationError(
|
272 |
-
f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}")
|
273 |
-
res['mel2ph'] = mel2ph
|
274 |
-
res['dur'] = dur
|
275 |
-
|
276 |
-
@staticmethod
|
277 |
-
def get_pitch(res):
|
278 |
-
wav, mel = res['wav'], res['mel']
|
279 |
-
f0, pitch_coarse = get_pitch(wav, mel, hparams)
|
280 |
-
if sum(f0) == 0:
|
281 |
-
raise BinarizationError("Empty f0")
|
282 |
-
res['f0'] = f0
|
283 |
-
res['pitch'] = pitch_coarse
|
284 |
-
|
285 |
-
@staticmethod
|
286 |
-
def get_f0cwt(res):
|
287 |
-
from utils.cwt import get_cont_lf0, get_lf0_cwt
|
288 |
-
f0 = res['f0']
|
289 |
-
uv, cont_lf0_lpf = get_cont_lf0(f0)
|
290 |
-
logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf)
|
291 |
-
cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org
|
292 |
-
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm)
|
293 |
-
if np.any(np.isnan(Wavelet_lf0)):
|
294 |
-
raise BinarizationError("NaN CWT")
|
295 |
-
res['cwt_spec'] = Wavelet_lf0
|
296 |
-
res['cwt_scales'] = scales
|
297 |
-
res['f0_mean'] = logf0s_mean_org
|
298 |
-
res['f0_std'] = logf0s_std_org
|
299 |
-
|
300 |
-
@staticmethod
|
301 |
-
def get_word(res, word_encoder):
|
302 |
-
ph_split = res['ph'].split(" ")
|
303 |
-
# ph side mapping to word
|
304 |
-
ph_words = [] # ['<BOS>', 'N_AW1_', ',', 'AE1_Z_|', 'AO1_L_|', 'B_UH1_K_S_|', 'N_AA1_T_|', ....]
|
305 |
-
ph2word = np.zeros([len(ph_split)], dtype=int)
|
306 |
-
last_ph_idx_for_word = [] # [2, 11, ...]
|
307 |
-
for i, ph in enumerate(ph_split):
|
308 |
-
if ph == '|':
|
309 |
-
last_ph_idx_for_word.append(i)
|
310 |
-
elif not ph[0].isalnum():
|
311 |
-
if ph not in ['<BOS>']:
|
312 |
-
last_ph_idx_for_word.append(i - 1)
|
313 |
-
last_ph_idx_for_word.append(i)
|
314 |
-
start_ph_idx_for_word = [0] + [i + 1 for i in last_ph_idx_for_word[:-1]]
|
315 |
-
for i, (s_w, e_w) in enumerate(zip(start_ph_idx_for_word, last_ph_idx_for_word)):
|
316 |
-
ph_words.append(ph_split[s_w:e_w + 1])
|
317 |
-
ph2word[s_w:e_w + 1] = i
|
318 |
-
ph2word = ph2word.tolist()
|
319 |
-
ph_words = ["_".join(w) for w in ph_words]
|
320 |
-
|
321 |
-
# mel side mapping to word
|
322 |
-
mel2word = []
|
323 |
-
dur_word = [0 for _ in range(len(ph_words))]
|
324 |
-
for i, m2p in enumerate(res['mel2ph']):
|
325 |
-
word_idx = ph2word[m2p - 1]
|
326 |
-
mel2word.append(ph2word[m2p - 1])
|
327 |
-
dur_word[word_idx] += 1
|
328 |
-
ph2word = [x + 1 for x in ph2word] # 0预留给padding
|
329 |
-
mel2word = [x + 1 for x in mel2word] # 0预留给padding
|
330 |
-
res['ph_words'] = ph_words # [T_word]
|
331 |
-
res['ph2word'] = ph2word # [T_ph]
|
332 |
-
res['mel2word'] = mel2word # [T_mel]
|
333 |
-
res['dur_word'] = dur_word # [T_word]
|
334 |
-
words = [x for x in res['txt'].split(" ") if x != '']
|
335 |
-
while len(words) > 0 and is_sil_phoneme(words[0]):
|
336 |
-
words = words[1:]
|
337 |
-
while len(words) > 0 and is_sil_phoneme(words[-1]):
|
338 |
-
words = words[:-1]
|
339 |
-
words = ['<BOS>'] + words + ['<EOS>']
|
340 |
-
word_tokens = word_encoder.encode(" ".join(words))
|
341 |
-
res['words'] = words
|
342 |
-
res['word_tokens'] = word_tokens
|
343 |
-
assert len(words) == len(ph_words), [words, ph_words]
|
344 |
-
|
345 |
-
@property
|
346 |
-
def num_workers(self):
|
347 |
-
return int(os.getenv('N_PROC', hparams.get('N_PROC', os.cpu_count())))
|
348 |
-
|
349 |
-
|
350 |
-
if __name__ == "__main__":
|
351 |
-
set_hparams()
|
352 |
-
EmotionBinarizer().process()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/Upside-Down-Diffusion/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Upside-Down-Diffusion
|
3 |
-
emoji: 🙃
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.44.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
hf_oauth: true
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
model = dict(
|
3 |
-
type='ImageClassifier',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNet',
|
6 |
-
depth=18,
|
7 |
-
num_stages=4,
|
8 |
-
out_indices=(3, ),
|
9 |
-
style='pytorch'),
|
10 |
-
neck=dict(type='GlobalAveragePooling'),
|
11 |
-
head=dict(
|
12 |
-
type='LinearClsHead',
|
13 |
-
num_classes=1000,
|
14 |
-
in_channels=512,
|
15 |
-
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
|
16 |
-
topk=(1, 5),
|
17 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/__init__.py
DELETED
@@ -1,615 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
logger = logging.getLogger(__name__)
|
9 |
-
|
10 |
-
FILE = Path(__file__).resolve()
|
11 |
-
ROOT = FILE.parents[3] # YOLOv5 root directory
|
12 |
-
if str(ROOT) not in sys.path:
|
13 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
14 |
-
|
15 |
-
try:
|
16 |
-
import comet_ml
|
17 |
-
|
18 |
-
# Project Configuration
|
19 |
-
config = comet_ml.config.get_config()
|
20 |
-
COMET_PROJECT_NAME = config.get_string(
|
21 |
-
os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5"
|
22 |
-
)
|
23 |
-
except (ModuleNotFoundError, ImportError):
|
24 |
-
comet_ml = None
|
25 |
-
COMET_PROJECT_NAME = None
|
26 |
-
|
27 |
-
import PIL
|
28 |
-
import torch
|
29 |
-
import torchvision.transforms as T
|
30 |
-
import yaml
|
31 |
-
|
32 |
-
from utils.dataloaders import img2label_paths
|
33 |
-
from utils.general import check_dataset, scale_boxes, xywh2xyxy
|
34 |
-
from utils.metrics import box_iou
|
35 |
-
|
36 |
-
COMET_PREFIX = "comet://"
|
37 |
-
|
38 |
-
COMET_MODE = os.getenv("COMET_MODE", "online")
|
39 |
-
|
40 |
-
# Model Saving Settings
|
41 |
-
COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
|
42 |
-
|
43 |
-
# Dataset Artifact Settings
|
44 |
-
COMET_UPLOAD_DATASET = (
|
45 |
-
os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true"
|
46 |
-
)
|
47 |
-
|
48 |
-
# Evaluation Settings
|
49 |
-
COMET_LOG_CONFUSION_MATRIX = (
|
50 |
-
os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true"
|
51 |
-
)
|
52 |
-
COMET_LOG_PREDICTIONS = (
|
53 |
-
os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true"
|
54 |
-
)
|
55 |
-
COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100))
|
56 |
-
|
57 |
-
# Confusion Matrix Settings
|
58 |
-
CONF_THRES = float(os.getenv("CONF_THRES", 0.001))
|
59 |
-
IOU_THRES = float(os.getenv("IOU_THRES", 0.6))
|
60 |
-
|
61 |
-
# Batch Logging Settings
|
62 |
-
COMET_LOG_BATCH_METRICS = (
|
63 |
-
os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true"
|
64 |
-
)
|
65 |
-
COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1)
|
66 |
-
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv(
|
67 |
-
"COMET_PREDICTION_LOGGING_INTERVAL", 1
|
68 |
-
)
|
69 |
-
COMET_LOG_PER_CLASS_METRICS = (
|
70 |
-
os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true"
|
71 |
-
)
|
72 |
-
|
73 |
-
RANK = int(os.getenv("RANK", -1))
|
74 |
-
|
75 |
-
to_pil = T.ToPILImage()
|
76 |
-
|
77 |
-
|
78 |
-
class CometLogger:
|
79 |
-
"""Log metrics, parameters, source code, models and much more
|
80 |
-
with Comet
|
81 |
-
"""
|
82 |
-
|
83 |
-
def __init__(
|
84 |
-
self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs
|
85 |
-
) -> None:
|
86 |
-
self.job_type = job_type
|
87 |
-
self.opt = opt
|
88 |
-
self.hyp = hyp
|
89 |
-
|
90 |
-
# Comet Flags
|
91 |
-
self.comet_mode = COMET_MODE
|
92 |
-
|
93 |
-
self.save_model = opt.save_period > -1
|
94 |
-
self.model_name = COMET_MODEL_NAME
|
95 |
-
|
96 |
-
# Batch Logging Settings
|
97 |
-
self.log_batch_metrics = COMET_LOG_BATCH_METRICS
|
98 |
-
self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
|
99 |
-
|
100 |
-
# Dataset Artifact Settings
|
101 |
-
self.upload_dataset = (
|
102 |
-
self.opt.upload_dataset
|
103 |
-
if self.opt.upload_dataset
|
104 |
-
else COMET_UPLOAD_DATASET
|
105 |
-
)
|
106 |
-
self.resume = self.opt.resume
|
107 |
-
|
108 |
-
# Default parameters to pass to Experiment objects
|
109 |
-
self.default_experiment_kwargs = {
|
110 |
-
"log_code": False,
|
111 |
-
"log_env_gpu": True,
|
112 |
-
"log_env_cpu": True,
|
113 |
-
"project_name": COMET_PROJECT_NAME,
|
114 |
-
}
|
115 |
-
self.default_experiment_kwargs.update(experiment_kwargs)
|
116 |
-
self.experiment = self._get_experiment(self.comet_mode, run_id)
|
117 |
-
|
118 |
-
self.data_dict = self.check_dataset(self.opt.data)
|
119 |
-
self.class_names = self.data_dict["names"]
|
120 |
-
self.num_classes = self.data_dict["nc"]
|
121 |
-
|
122 |
-
self.logged_images_count = 0
|
123 |
-
self.max_images = COMET_MAX_IMAGE_UPLOADS
|
124 |
-
|
125 |
-
if run_id is None:
|
126 |
-
self.experiment.log_other("Created from", "YOLOv5")
|
127 |
-
if not isinstance(self.experiment, comet_ml.OfflineExperiment):
|
128 |
-
(
|
129 |
-
workspace,
|
130 |
-
project_name,
|
131 |
-
experiment_id,
|
132 |
-
) = self.experiment.url.split("/")[-3:]
|
133 |
-
self.experiment.log_other(
|
134 |
-
"Run Path",
|
135 |
-
f"{workspace}/{project_name}/{experiment_id}",
|
136 |
-
)
|
137 |
-
self.log_parameters(vars(opt))
|
138 |
-
self.log_parameters(self.opt.hyp)
|
139 |
-
self.log_asset_data(
|
140 |
-
self.opt.hyp,
|
141 |
-
name="hyperparameters.json",
|
142 |
-
metadata={"type": "hyp-config-file"},
|
143 |
-
)
|
144 |
-
self.log_asset(
|
145 |
-
f"{self.opt.save_dir}/opt.yaml",
|
146 |
-
metadata={"type": "opt-config-file"},
|
147 |
-
)
|
148 |
-
|
149 |
-
self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
|
150 |
-
|
151 |
-
if hasattr(self.opt, "conf_thres"):
|
152 |
-
self.conf_thres = self.opt.conf_thres
|
153 |
-
else:
|
154 |
-
self.conf_thres = CONF_THRES
|
155 |
-
if hasattr(self.opt, "iou_thres"):
|
156 |
-
self.iou_thres = self.opt.iou_thres
|
157 |
-
else:
|
158 |
-
self.iou_thres = IOU_THRES
|
159 |
-
|
160 |
-
self.log_parameters(
|
161 |
-
{
|
162 |
-
"val_iou_threshold": self.iou_thres,
|
163 |
-
"val_conf_threshold": self.conf_thres,
|
164 |
-
}
|
165 |
-
)
|
166 |
-
|
167 |
-
self.comet_log_predictions = COMET_LOG_PREDICTIONS
|
168 |
-
if self.opt.bbox_interval == -1:
|
169 |
-
self.comet_log_prediction_interval = (
|
170 |
-
1 if self.opt.epochs < 10 else self.opt.epochs // 10
|
171 |
-
)
|
172 |
-
else:
|
173 |
-
self.comet_log_prediction_interval = self.opt.bbox_interval
|
174 |
-
|
175 |
-
if self.comet_log_predictions:
|
176 |
-
self.metadata_dict = {}
|
177 |
-
self.logged_image_names = []
|
178 |
-
|
179 |
-
self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
|
180 |
-
|
181 |
-
self.experiment.log_others(
|
182 |
-
{
|
183 |
-
"comet_mode": COMET_MODE,
|
184 |
-
"comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS,
|
185 |
-
"comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS,
|
186 |
-
"comet_log_batch_metrics": COMET_LOG_BATCH_METRICS,
|
187 |
-
"comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX,
|
188 |
-
"comet_model_name": COMET_MODEL_NAME,
|
189 |
-
}
|
190 |
-
)
|
191 |
-
|
192 |
-
# Check if running the Experiment with the Comet Optimizer
|
193 |
-
if hasattr(self.opt, "comet_optimizer_id"):
|
194 |
-
self.experiment.log_other(
|
195 |
-
"optimizer_id", self.opt.comet_optimizer_id
|
196 |
-
)
|
197 |
-
self.experiment.log_other(
|
198 |
-
"optimizer_objective", self.opt.comet_optimizer_objective
|
199 |
-
)
|
200 |
-
self.experiment.log_other(
|
201 |
-
"optimizer_metric", self.opt.comet_optimizer_metric
|
202 |
-
)
|
203 |
-
self.experiment.log_other(
|
204 |
-
"optimizer_parameters", json.dumps(self.hyp)
|
205 |
-
)
|
206 |
-
|
207 |
-
def _get_experiment(self, mode, experiment_id=None):
|
208 |
-
if mode == "offline":
|
209 |
-
if experiment_id is not None:
|
210 |
-
return comet_ml.ExistingOfflineExperiment(
|
211 |
-
previous_experiment=experiment_id,
|
212 |
-
**self.default_experiment_kwargs,
|
213 |
-
)
|
214 |
-
|
215 |
-
return comet_ml.OfflineExperiment(
|
216 |
-
**self.default_experiment_kwargs,
|
217 |
-
)
|
218 |
-
|
219 |
-
else:
|
220 |
-
try:
|
221 |
-
if experiment_id is not None:
|
222 |
-
return comet_ml.ExistingExperiment(
|
223 |
-
previous_experiment=experiment_id,
|
224 |
-
**self.default_experiment_kwargs,
|
225 |
-
)
|
226 |
-
|
227 |
-
return comet_ml.Experiment(**self.default_experiment_kwargs)
|
228 |
-
|
229 |
-
except ValueError:
|
230 |
-
logger.warning(
|
231 |
-
"COMET WARNING: "
|
232 |
-
"Comet credentials have not been set. "
|
233 |
-
"Comet will default to offline logging. "
|
234 |
-
"Please set your credentials to enable online logging."
|
235 |
-
)
|
236 |
-
return self._get_experiment("offline", experiment_id)
|
237 |
-
|
238 |
-
return
|
239 |
-
|
240 |
-
def log_metrics(self, log_dict, **kwargs):
|
241 |
-
self.experiment.log_metrics(log_dict, **kwargs)
|
242 |
-
|
243 |
-
def log_parameters(self, log_dict, **kwargs):
|
244 |
-
self.experiment.log_parameters(log_dict, **kwargs)
|
245 |
-
|
246 |
-
def log_asset(self, asset_path, **kwargs):
|
247 |
-
self.experiment.log_asset(asset_path, **kwargs)
|
248 |
-
|
249 |
-
def log_asset_data(self, asset, **kwargs):
|
250 |
-
self.experiment.log_asset_data(asset, **kwargs)
|
251 |
-
|
252 |
-
def log_image(self, img, **kwargs):
|
253 |
-
self.experiment.log_image(img, **kwargs)
|
254 |
-
|
255 |
-
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
|
256 |
-
if not self.save_model:
|
257 |
-
return
|
258 |
-
|
259 |
-
model_metadata = {
|
260 |
-
"fitness_score": fitness_score[-1],
|
261 |
-
"epochs_trained": epoch + 1,
|
262 |
-
"save_period": opt.save_period,
|
263 |
-
"total_epochs": opt.epochs,
|
264 |
-
}
|
265 |
-
|
266 |
-
model_files = glob.glob(f"{path}/*.pt")
|
267 |
-
for model_path in model_files:
|
268 |
-
name = Path(model_path).name
|
269 |
-
|
270 |
-
self.experiment.log_model(
|
271 |
-
self.model_name,
|
272 |
-
file_or_folder=model_path,
|
273 |
-
file_name=name,
|
274 |
-
metadata=model_metadata,
|
275 |
-
overwrite=True,
|
276 |
-
)
|
277 |
-
|
278 |
-
def check_dataset(self, data_file):
|
279 |
-
with open(data_file) as f:
|
280 |
-
data_config = yaml.safe_load(f)
|
281 |
-
|
282 |
-
if data_config["path"].startswith(COMET_PREFIX):
|
283 |
-
path = data_config["path"].replace(COMET_PREFIX, "")
|
284 |
-
data_dict = self.download_dataset_artifact(path)
|
285 |
-
|
286 |
-
return data_dict
|
287 |
-
|
288 |
-
self.log_asset(self.opt.data, metadata={"type": "data-config-file"})
|
289 |
-
|
290 |
-
return check_dataset(data_file)
|
291 |
-
|
292 |
-
def log_predictions(self, image, labelsn, path, shape, predn):
|
293 |
-
if self.logged_images_count >= self.max_images:
|
294 |
-
return
|
295 |
-
detections = predn[predn[:, 4] > self.conf_thres]
|
296 |
-
iou = box_iou(labelsn[:, 1:], detections[:, :4])
|
297 |
-
mask, _ = torch.where(iou > self.iou_thres)
|
298 |
-
if len(mask) == 0:
|
299 |
-
return
|
300 |
-
|
301 |
-
filtered_detections = detections[mask]
|
302 |
-
filtered_labels = labelsn[mask]
|
303 |
-
|
304 |
-
image_id = path.split("/")[-1].split(".")[0]
|
305 |
-
image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}"
|
306 |
-
if image_name not in self.logged_image_names:
|
307 |
-
native_scale_image = PIL.Image.open(path)
|
308 |
-
self.log_image(native_scale_image, name=image_name)
|
309 |
-
self.logged_image_names.append(image_name)
|
310 |
-
|
311 |
-
metadata = []
|
312 |
-
for cls, *xyxy in filtered_labels.tolist():
|
313 |
-
metadata.append(
|
314 |
-
{
|
315 |
-
"label": f"{self.class_names[int(cls)]}-gt",
|
316 |
-
"score": 100,
|
317 |
-
"box": {
|
318 |
-
"x": xyxy[0],
|
319 |
-
"y": xyxy[1],
|
320 |
-
"x2": xyxy[2],
|
321 |
-
"y2": xyxy[3],
|
322 |
-
},
|
323 |
-
}
|
324 |
-
)
|
325 |
-
for *xyxy, conf, cls in filtered_detections.tolist():
|
326 |
-
metadata.append(
|
327 |
-
{
|
328 |
-
"label": f"{self.class_names[int(cls)]}",
|
329 |
-
"score": conf * 100,
|
330 |
-
"box": {
|
331 |
-
"x": xyxy[0],
|
332 |
-
"y": xyxy[1],
|
333 |
-
"x2": xyxy[2],
|
334 |
-
"y2": xyxy[3],
|
335 |
-
},
|
336 |
-
}
|
337 |
-
)
|
338 |
-
|
339 |
-
self.metadata_dict[image_name] = metadata
|
340 |
-
self.logged_images_count += 1
|
341 |
-
|
342 |
-
return
|
343 |
-
|
344 |
-
def preprocess_prediction(self, image, labels, shape, pred):
|
345 |
-
nl, _ = labels.shape[0], pred.shape[0]
|
346 |
-
|
347 |
-
# Predictions
|
348 |
-
if self.opt.single_cls:
|
349 |
-
pred[:, 5] = 0
|
350 |
-
|
351 |
-
predn = pred.clone()
|
352 |
-
scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])
|
353 |
-
|
354 |
-
labelsn = None
|
355 |
-
if nl:
|
356 |
-
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
357 |
-
scale_boxes(
|
358 |
-
image.shape[1:], tbox, shape[0], shape[1]
|
359 |
-
) # native-space labels
|
360 |
-
labelsn = torch.cat(
|
361 |
-
(labels[:, 0:1], tbox), 1
|
362 |
-
) # native-space labels
|
363 |
-
scale_boxes(
|
364 |
-
image.shape[1:], predn[:, :4], shape[0], shape[1]
|
365 |
-
) # native-space pred
|
366 |
-
|
367 |
-
return predn, labelsn
|
368 |
-
|
369 |
-
def add_assets_to_artifact(self, artifact, path, asset_path, split):
|
370 |
-
img_paths = sorted(glob.glob(f"{asset_path}/*"))
|
371 |
-
label_paths = img2label_paths(img_paths)
|
372 |
-
|
373 |
-
for image_file, label_file in zip(img_paths, label_paths):
|
374 |
-
image_logical_path, label_logical_path = map(
|
375 |
-
lambda x: os.path.relpath(x, path), [image_file, label_file]
|
376 |
-
)
|
377 |
-
|
378 |
-
try:
|
379 |
-
artifact.add(
|
380 |
-
image_file,
|
381 |
-
logical_path=image_logical_path,
|
382 |
-
metadata={"split": split},
|
383 |
-
)
|
384 |
-
artifact.add(
|
385 |
-
label_file,
|
386 |
-
logical_path=label_logical_path,
|
387 |
-
metadata={"split": split},
|
388 |
-
)
|
389 |
-
except ValueError as e:
|
390 |
-
logger.error(
|
391 |
-
"COMET ERROR: Error adding file to Artifact. Skipping file."
|
392 |
-
)
|
393 |
-
logger.error(f"COMET ERROR: {e}")
|
394 |
-
continue
|
395 |
-
|
396 |
-
return artifact
|
397 |
-
|
398 |
-
def upload_dataset_artifact(self):
|
399 |
-
dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
|
400 |
-
path = str((ROOT / Path(self.data_dict["path"])).resolve())
|
401 |
-
|
402 |
-
metadata = self.data_dict.copy()
|
403 |
-
for key in ["train", "val", "test"]:
|
404 |
-
split_path = metadata.get(key)
|
405 |
-
if split_path is not None:
|
406 |
-
metadata[key] = split_path.replace(path, "")
|
407 |
-
|
408 |
-
artifact = comet_ml.Artifact(
|
409 |
-
name=dataset_name, artifact_type="dataset", metadata=metadata
|
410 |
-
)
|
411 |
-
for key in metadata.keys():
|
412 |
-
if key in ["train", "val", "test"]:
|
413 |
-
if isinstance(self.upload_dataset, str) and (
|
414 |
-
key != self.upload_dataset
|
415 |
-
):
|
416 |
-
continue
|
417 |
-
|
418 |
-
asset_path = self.data_dict.get(key)
|
419 |
-
if asset_path is not None:
|
420 |
-
artifact = self.add_assets_to_artifact(
|
421 |
-
artifact, path, asset_path, key
|
422 |
-
)
|
423 |
-
|
424 |
-
self.experiment.log_artifact(artifact)
|
425 |
-
|
426 |
-
return
|
427 |
-
|
428 |
-
def download_dataset_artifact(self, artifact_path):
|
429 |
-
logged_artifact = self.experiment.get_artifact(artifact_path)
|
430 |
-
artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
|
431 |
-
logged_artifact.download(artifact_save_dir)
|
432 |
-
|
433 |
-
metadata = logged_artifact.metadata
|
434 |
-
data_dict = metadata.copy()
|
435 |
-
data_dict["path"] = artifact_save_dir
|
436 |
-
|
437 |
-
metadata_names = metadata.get("names")
|
438 |
-
if type(metadata_names) == dict:
|
439 |
-
data_dict["names"] = {
|
440 |
-
int(k): v for k, v in metadata.get("names").items()
|
441 |
-
}
|
442 |
-
elif type(metadata_names) == list:
|
443 |
-
data_dict["names"] = {
|
444 |
-
int(k): v
|
445 |
-
for k, v in zip(range(len(metadata_names)), metadata_names)
|
446 |
-
}
|
447 |
-
else:
|
448 |
-
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
|
449 |
-
|
450 |
-
data_dict = self.update_data_paths(data_dict)
|
451 |
-
return data_dict
|
452 |
-
|
453 |
-
def update_data_paths(self, data_dict):
|
454 |
-
path = data_dict.get("path", "")
|
455 |
-
|
456 |
-
for split in ["train", "val", "test"]:
|
457 |
-
if data_dict.get(split):
|
458 |
-
split_path = data_dict.get(split)
|
459 |
-
data_dict[split] = (
|
460 |
-
f"{path}/{split_path}"
|
461 |
-
if isinstance(split, str)
|
462 |
-
else [f"{path}/{x}" for x in split_path]
|
463 |
-
)
|
464 |
-
|
465 |
-
return data_dict
|
466 |
-
|
467 |
-
def on_pretrain_routine_end(self, paths):
|
468 |
-
if self.opt.resume:
|
469 |
-
return
|
470 |
-
|
471 |
-
for path in paths:
|
472 |
-
self.log_asset(str(path))
|
473 |
-
|
474 |
-
if self.upload_dataset:
|
475 |
-
if not self.resume:
|
476 |
-
self.upload_dataset_artifact()
|
477 |
-
|
478 |
-
return
|
479 |
-
|
480 |
-
def on_train_start(self):
|
481 |
-
self.log_parameters(self.hyp)
|
482 |
-
|
483 |
-
def on_train_epoch_start(self):
|
484 |
-
return
|
485 |
-
|
486 |
-
def on_train_epoch_end(self, epoch):
|
487 |
-
self.experiment.curr_epoch = epoch
|
488 |
-
|
489 |
-
return
|
490 |
-
|
491 |
-
def on_train_batch_start(self):
|
492 |
-
return
|
493 |
-
|
494 |
-
def on_train_batch_end(self, log_dict, step):
|
495 |
-
self.experiment.curr_step = step
|
496 |
-
if self.log_batch_metrics and (
|
497 |
-
step % self.comet_log_batch_interval == 0
|
498 |
-
):
|
499 |
-
self.log_metrics(log_dict, step=step)
|
500 |
-
|
501 |
-
return
|
502 |
-
|
503 |
-
def on_train_end(self, files, save_dir, last, best, epoch, results):
|
504 |
-
if self.comet_log_predictions:
|
505 |
-
curr_epoch = self.experiment.curr_epoch
|
506 |
-
self.experiment.log_asset_data(
|
507 |
-
self.metadata_dict, "image-metadata.json", epoch=curr_epoch
|
508 |
-
)
|
509 |
-
|
510 |
-
for f in files:
|
511 |
-
self.log_asset(f, metadata={"epoch": epoch})
|
512 |
-
self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch})
|
513 |
-
|
514 |
-
if not self.opt.evolve:
|
515 |
-
model_path = str(best if best.exists() else last)
|
516 |
-
name = Path(model_path).name
|
517 |
-
if self.save_model:
|
518 |
-
self.experiment.log_model(
|
519 |
-
self.model_name,
|
520 |
-
file_or_folder=model_path,
|
521 |
-
file_name=name,
|
522 |
-
overwrite=True,
|
523 |
-
)
|
524 |
-
|
525 |
-
# Check if running Experiment with Comet Optimizer
|
526 |
-
if hasattr(self.opt, "comet_optimizer_id"):
|
527 |
-
metric = results.get(self.opt.comet_optimizer_metric)
|
528 |
-
self.experiment.log_other("optimizer_metric_value", metric)
|
529 |
-
|
530 |
-
self.finish_run()
|
531 |
-
|
532 |
-
def on_val_start(self):
|
533 |
-
return
|
534 |
-
|
535 |
-
def on_val_batch_start(self):
|
536 |
-
return
|
537 |
-
|
538 |
-
def on_val_batch_end(
|
539 |
-
self, batch_i, images, targets, paths, shapes, outputs
|
540 |
-
):
|
541 |
-
if not (
|
542 |
-
self.comet_log_predictions
|
543 |
-
and ((batch_i + 1) % self.comet_log_prediction_interval == 0)
|
544 |
-
):
|
545 |
-
return
|
546 |
-
|
547 |
-
for si, pred in enumerate(outputs):
|
548 |
-
if len(pred) == 0:
|
549 |
-
continue
|
550 |
-
|
551 |
-
image = images[si]
|
552 |
-
labels = targets[targets[:, 0] == si, 1:]
|
553 |
-
shape = shapes[si]
|
554 |
-
path = paths[si]
|
555 |
-
predn, labelsn = self.preprocess_prediction(
|
556 |
-
image, labels, shape, pred
|
557 |
-
)
|
558 |
-
if labelsn is not None:
|
559 |
-
self.log_predictions(image, labelsn, path, shape, predn)
|
560 |
-
|
561 |
-
return
|
562 |
-
|
563 |
-
def on_val_end(
|
564 |
-
self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix
|
565 |
-
):
|
566 |
-
if self.comet_log_per_class_metrics:
|
567 |
-
if self.num_classes > 1:
|
568 |
-
for i, c in enumerate(ap_class):
|
569 |
-
class_name = self.class_names[c]
|
570 |
-
self.experiment.log_metrics(
|
571 |
-
{
|
572 |
-
"[email protected]": ap50[i],
|
573 |
-
"[email protected]:.95": ap[i],
|
574 |
-
"precision": p[i],
|
575 |
-
"recall": r[i],
|
576 |
-
"f1": f1[i],
|
577 |
-
"true_positives": tp[i],
|
578 |
-
"false_positives": fp[i],
|
579 |
-
"support": nt[c],
|
580 |
-
},
|
581 |
-
prefix=class_name,
|
582 |
-
)
|
583 |
-
|
584 |
-
if self.comet_log_confusion_matrix:
|
585 |
-
epoch = self.experiment.curr_epoch
|
586 |
-
class_names = list(self.class_names.values())
|
587 |
-
class_names.append("background")
|
588 |
-
num_classes = len(class_names)
|
589 |
-
|
590 |
-
self.experiment.log_confusion_matrix(
|
591 |
-
matrix=confusion_matrix.matrix,
|
592 |
-
max_categories=num_classes,
|
593 |
-
labels=class_names,
|
594 |
-
epoch=epoch,
|
595 |
-
column_label="Actual Category",
|
596 |
-
row_label="Predicted Category",
|
597 |
-
file_name=f"confusion-matrix-epoch-{epoch}.json",
|
598 |
-
)
|
599 |
-
|
600 |
-
def on_fit_epoch_end(self, result, epoch):
|
601 |
-
self.log_metrics(result, epoch=epoch)
|
602 |
-
|
603 |
-
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
|
604 |
-
if (
|
605 |
-
(epoch + 1) % self.opt.save_period == 0 and not final_epoch
|
606 |
-
) and self.opt.save_period != -1:
|
607 |
-
self.log_model(
|
608 |
-
last.parent, self.opt, epoch, fi, best_model=best_fitness == fi
|
609 |
-
)
|
610 |
-
|
611 |
-
def on_params_update(self, params):
|
612 |
-
self.log_parameters(params)
|
613 |
-
|
614 |
-
def finish_run(self):
|
615 |
-
self.experiment.end()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abrish-Aadi/Chest-Xray-anomaly-detection/app.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import tensorflow as tf
|
3 |
-
import os
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
model=tf.keras.models.load_model('model.h5')
|
7 |
-
|
8 |
-
LABELS = ['NORMAL', 'TUBERCULOSIS', 'PNEUMONIA', 'COVID19']
|
9 |
-
|
10 |
-
def predict_input_image(img):
|
11 |
-
img_4d=img.reshape(-1,128,128,3)/255.0
|
12 |
-
print(img_4d.min())
|
13 |
-
print(img_4d.max())
|
14 |
-
prediction=model.predict(img_4d)[0]
|
15 |
-
return {LABELS[i]: float(prediction[i]) for i in range(4)}
|
16 |
-
|
17 |
-
def k():
|
18 |
-
return gr.update(value=None)
|
19 |
-
|
20 |
-
with gr.Blocks(title="Chest X-Ray Anomaly Detection", css="") as demo:
|
21 |
-
with gr.Row():
|
22 |
-
textmd = gr.Markdown('''
|
23 |
-
# Chest X-Ray Anomaly Detection
|
24 |
-
''')
|
25 |
-
with gr.Row():
|
26 |
-
with gr.Column(scale=1, min_width=600):
|
27 |
-
image = gr.inputs.Image(shape=(128,128))
|
28 |
-
with gr.Row():
|
29 |
-
clear_btn = gr.Button("Clear")
|
30 |
-
submit_btn = gr.Button("Submit", elem_id="warningk", variant='primary')
|
31 |
-
examples = gr.Examples(examples=["COVID19(573).jpg",
|
32 |
-
"NORMAL2-IM-1345-0001-0002.jpeg",
|
33 |
-
"person1946_bacteria_4875.jpeg",
|
34 |
-
"Tuberculosis-658.png"], inputs=image)
|
35 |
-
label = gr.outputs.Label(num_top_classes=4)
|
36 |
-
|
37 |
-
clear_btn.click(k, inputs=[], outputs=image)
|
38 |
-
submit_btn.click(predict_input_image, inputs=image, outputs=label)
|
39 |
-
|
40 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/FadeMethods.js
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
import { FadeIn, FadeOutDestroy } from '../fade/Fade.js';
|
2 |
-
import { WaitComplete } from '../utils/WaitEvent.js';
|
3 |
-
import GetParentSizerMethods from './GetParentSizerMethods.js';
|
4 |
-
|
5 |
-
const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
|
6 |
-
|
7 |
-
var OnInitFade = function (gameObject, fade) {
|
8 |
-
// Route 'complete' of fade to gameObject
|
9 |
-
fade.completeEventName = undefined;
|
10 |
-
fade.on('complete', function () {
|
11 |
-
if (fade.completeEventName) {
|
12 |
-
gameObject.emit(fade.completeEventName, gameObject);
|
13 |
-
fade.completeEventName = undefined;
|
14 |
-
}
|
15 |
-
})
|
16 |
-
|
17 |
-
// Update local state
|
18 |
-
fade.on('update', function () {
|
19 |
-
var parent = GetParentSizerMethods.getParentSizer(gameObject);
|
20 |
-
if (parent) {
|
21 |
-
parent.resetChildAlphaState(gameObject);
|
22 |
-
}
|
23 |
-
})
|
24 |
-
}
|
25 |
-
|
26 |
-
export default {
|
27 |
-
fadeIn(duration, alpha) {
|
28 |
-
if (IsPlainObject(duration)) {
|
29 |
-
var config = duration;
|
30 |
-
duration = config.duration;
|
31 |
-
alpha = config.alpha;
|
32 |
-
}
|
33 |
-
|
34 |
-
var isInit = (this._fade === undefined);
|
35 |
-
|
36 |
-
this._fade = FadeIn(this, duration, alpha, this._fade);
|
37 |
-
|
38 |
-
if (isInit) {
|
39 |
-
OnInitFade(this, this._fade);
|
40 |
-
}
|
41 |
-
|
42 |
-
this._fade.completeEventName = 'fadein.complete';
|
43 |
-
|
44 |
-
return this;
|
45 |
-
},
|
46 |
-
|
47 |
-
fadeInPromise(duration, alpha) {
|
48 |
-
this.fadeIn(duration, alpha);
|
49 |
-
return WaitComplete(this._fade);
|
50 |
-
},
|
51 |
-
|
52 |
-
fadeOutDestroy(duration, destroyMode) {
|
53 |
-
if (IsPlainObject(duration)) {
|
54 |
-
var config = duration;
|
55 |
-
duration = config.duration;
|
56 |
-
destroyMode = config.destroy;
|
57 |
-
}
|
58 |
-
|
59 |
-
var isInit = (this._fade === undefined);
|
60 |
-
|
61 |
-
this._fade = FadeOutDestroy(this, duration, destroyMode, this._fade);
|
62 |
-
|
63 |
-
if (isInit) {
|
64 |
-
OnInitFade(this, this._fade);
|
65 |
-
}
|
66 |
-
|
67 |
-
this._fade.completeEventName = 'fadeout.complete';
|
68 |
-
|
69 |
-
return this;
|
70 |
-
},
|
71 |
-
|
72 |
-
fadeOutDestroyPromise(duration, destroyMode) {
|
73 |
-
this.fadeOutDestroy(duration, destroyMode);
|
74 |
-
return WaitComplete(this._fade);
|
75 |
-
},
|
76 |
-
|
77 |
-
fadeOut(duration) {
|
78 |
-
this.fadeOutDestroy(duration, false);
|
79 |
-
return this;
|
80 |
-
},
|
81 |
-
|
82 |
-
fadeOutPromise(duration) {
|
83 |
-
this.fadeOut(duration);
|
84 |
-
return WaitComplete(this._fade);
|
85 |
-
}
|
86 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.d.ts
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import ScrollBar from './ScrollBar';
|
2 |
-
|
3 |
-
export default function (
|
4 |
-
config?: ScrollBar.IConfig
|
5 |
-
): ScrollBar;
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.cpp
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
//
|
3 |
-
// NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
// and proprietary rights in and to this software, related documentation
|
5 |
-
// and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
// distribution of this software and related documentation without an express
|
7 |
-
// license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
#include <torch/extension.h>
|
10 |
-
#include <ATen/cuda/CUDAContext.h>
|
11 |
-
#include <c10/cuda/CUDAGuard.h>
|
12 |
-
#include "bias_act.h"
|
13 |
-
|
14 |
-
//------------------------------------------------------------------------
|
15 |
-
|
16 |
-
static bool has_same_layout(torch::Tensor x, torch::Tensor y)
|
17 |
-
{
|
18 |
-
if (x.dim() != y.dim())
|
19 |
-
return false;
|
20 |
-
for (int64_t i = 0; i < x.dim(); i++)
|
21 |
-
{
|
22 |
-
if (x.size(i) != y.size(i))
|
23 |
-
return false;
|
24 |
-
if (x.size(i) >= 2 && x.stride(i) != y.stride(i))
|
25 |
-
return false;
|
26 |
-
}
|
27 |
-
return true;
|
28 |
-
}
|
29 |
-
|
30 |
-
//------------------------------------------------------------------------
|
31 |
-
|
32 |
-
static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)
|
33 |
-
{
|
34 |
-
// Validate arguments.
|
35 |
-
TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
|
36 |
-
TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");
|
37 |
-
TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");
|
38 |
-
TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");
|
39 |
-
TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");
|
40 |
-
TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
|
41 |
-
TORCH_CHECK(b.dim() == 1, "b must have rank 1");
|
42 |
-
TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");
|
43 |
-
TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");
|
44 |
-
TORCH_CHECK(grad >= 0, "grad must be non-negative");
|
45 |
-
|
46 |
-
// Validate layout.
|
47 |
-
TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");
|
48 |
-
TORCH_CHECK(b.is_contiguous(), "b must be contiguous");
|
49 |
-
TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");
|
50 |
-
TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");
|
51 |
-
TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");
|
52 |
-
|
53 |
-
// Create output tensor.
|
54 |
-
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
|
55 |
-
torch::Tensor y = torch::empty_like(x);
|
56 |
-
TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");
|
57 |
-
|
58 |
-
// Initialize CUDA kernel parameters.
|
59 |
-
bias_act_kernel_params p;
|
60 |
-
p.x = x.data_ptr();
|
61 |
-
p.b = (b.numel()) ? b.data_ptr() : NULL;
|
62 |
-
p.xref = (xref.numel()) ? xref.data_ptr() : NULL;
|
63 |
-
p.yref = (yref.numel()) ? yref.data_ptr() : NULL;
|
64 |
-
p.dy = (dy.numel()) ? dy.data_ptr() : NULL;
|
65 |
-
p.y = y.data_ptr();
|
66 |
-
p.grad = grad;
|
67 |
-
p.act = act;
|
68 |
-
p.alpha = alpha;
|
69 |
-
p.gain = gain;
|
70 |
-
p.clamp = clamp;
|
71 |
-
p.sizeX = (int)x.numel();
|
72 |
-
p.sizeB = (int)b.numel();
|
73 |
-
p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;
|
74 |
-
|
75 |
-
// Choose CUDA kernel.
|
76 |
-
void* kernel;
|
77 |
-
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
|
78 |
-
{
|
79 |
-
kernel = choose_bias_act_kernel<scalar_t>(p);
|
80 |
-
});
|
81 |
-
TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");
|
82 |
-
|
83 |
-
// Launch CUDA kernel.
|
84 |
-
p.loopX = 4;
|
85 |
-
int blockSize = 4 * 32;
|
86 |
-
int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
|
87 |
-
void* args[] = {&p};
|
88 |
-
AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
|
89 |
-
return y;
|
90 |
-
}
|
91 |
-
|
92 |
-
//------------------------------------------------------------------------
|
93 |
-
|
94 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
95 |
-
{
|
96 |
-
m.def("bias_act", &bias_act);
|
97 |
-
}
|
98 |
-
|
99 |
-
//------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.py
DELETED
@@ -1,206 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
import os
|
4 |
-
|
5 |
-
import torch
|
6 |
-
from torch.nn import functional as F
|
7 |
-
from torch.autograd import Function
|
8 |
-
from torch.utils.cpp_extension import load
|
9 |
-
|
10 |
-
|
11 |
-
module_path = os.path.dirname(__file__)
|
12 |
-
upfirdn2d_op = load(
|
13 |
-
"upfirdn2d",
|
14 |
-
sources=[
|
15 |
-
os.path.join(module_path, "upfirdn2d.cpp"),
|
16 |
-
os.path.join(module_path, "upfirdn2d_kernel.cu"),
|
17 |
-
],
|
18 |
-
)
|
19 |
-
|
20 |
-
|
21 |
-
class UpFirDn2dBackward(Function):
|
22 |
-
@staticmethod
|
23 |
-
def forward(
|
24 |
-
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
|
25 |
-
):
|
26 |
-
|
27 |
-
up_x, up_y = up
|
28 |
-
down_x, down_y = down
|
29 |
-
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
|
30 |
-
|
31 |
-
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
|
32 |
-
|
33 |
-
grad_input = upfirdn2d_op.upfirdn2d(
|
34 |
-
grad_output,
|
35 |
-
grad_kernel,
|
36 |
-
down_x,
|
37 |
-
down_y,
|
38 |
-
up_x,
|
39 |
-
up_y,
|
40 |
-
g_pad_x0,
|
41 |
-
g_pad_x1,
|
42 |
-
g_pad_y0,
|
43 |
-
g_pad_y1,
|
44 |
-
)
|
45 |
-
grad_input = grad_input.view(
|
46 |
-
in_size[0], in_size[1], in_size[2], in_size[3])
|
47 |
-
|
48 |
-
ctx.save_for_backward(kernel)
|
49 |
-
|
50 |
-
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
51 |
-
|
52 |
-
ctx.up_x = up_x
|
53 |
-
ctx.up_y = up_y
|
54 |
-
ctx.down_x = down_x
|
55 |
-
ctx.down_y = down_y
|
56 |
-
ctx.pad_x0 = pad_x0
|
57 |
-
ctx.pad_x1 = pad_x1
|
58 |
-
ctx.pad_y0 = pad_y0
|
59 |
-
ctx.pad_y1 = pad_y1
|
60 |
-
ctx.in_size = in_size
|
61 |
-
ctx.out_size = out_size
|
62 |
-
|
63 |
-
return grad_input
|
64 |
-
|
65 |
-
@staticmethod
|
66 |
-
def backward(ctx, gradgrad_input):
|
67 |
-
(kernel,) = ctx.saved_tensors
|
68 |
-
|
69 |
-
gradgrad_input = gradgrad_input.reshape(-1,
|
70 |
-
ctx.in_size[2], ctx.in_size[3], 1)
|
71 |
-
|
72 |
-
gradgrad_out = upfirdn2d_op.upfirdn2d(
|
73 |
-
gradgrad_input,
|
74 |
-
kernel,
|
75 |
-
ctx.up_x,
|
76 |
-
ctx.up_y,
|
77 |
-
ctx.down_x,
|
78 |
-
ctx.down_y,
|
79 |
-
ctx.pad_x0,
|
80 |
-
ctx.pad_x1,
|
81 |
-
ctx.pad_y0,
|
82 |
-
ctx.pad_y1,
|
83 |
-
)
|
84 |
-
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
|
85 |
-
gradgrad_out = gradgrad_out.view(
|
86 |
-
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
|
87 |
-
)
|
88 |
-
|
89 |
-
return gradgrad_out, None, None, None, None, None, None, None, None
|
90 |
-
|
91 |
-
|
92 |
-
class UpFirDn2d(Function):
|
93 |
-
@staticmethod
|
94 |
-
def forward(ctx, input, kernel, up, down, pad):
|
95 |
-
up_x, up_y = up
|
96 |
-
down_x, down_y = down
|
97 |
-
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
98 |
-
|
99 |
-
kernel_h, kernel_w = kernel.shape
|
100 |
-
batch, channel, in_h, in_w = input.shape
|
101 |
-
ctx.in_size = input.shape
|
102 |
-
|
103 |
-
input = input.reshape(-1, in_h, in_w, 1)
|
104 |
-
|
105 |
-
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
|
106 |
-
|
107 |
-
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
108 |
-
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
109 |
-
ctx.out_size = (out_h, out_w)
|
110 |
-
|
111 |
-
ctx.up = (up_x, up_y)
|
112 |
-
ctx.down = (down_x, down_y)
|
113 |
-
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
|
114 |
-
|
115 |
-
g_pad_x0 = kernel_w - pad_x0 - 1
|
116 |
-
g_pad_y0 = kernel_h - pad_y0 - 1
|
117 |
-
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
|
118 |
-
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
|
119 |
-
|
120 |
-
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
|
121 |
-
|
122 |
-
out = upfirdn2d_op.upfirdn2d(
|
123 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
124 |
-
)
|
125 |
-
# out = out.view(major, out_h, out_w, minor)
|
126 |
-
out = out.view(-1, channel, out_h, out_w)
|
127 |
-
|
128 |
-
return out
|
129 |
-
|
130 |
-
@staticmethod
|
131 |
-
def backward(ctx, grad_output):
|
132 |
-
kernel, grad_kernel = ctx.saved_tensors
|
133 |
-
|
134 |
-
grad_input = UpFirDn2dBackward.apply(
|
135 |
-
grad_output,
|
136 |
-
kernel,
|
137 |
-
grad_kernel,
|
138 |
-
ctx.up,
|
139 |
-
ctx.down,
|
140 |
-
ctx.pad,
|
141 |
-
ctx.g_pad,
|
142 |
-
ctx.in_size,
|
143 |
-
ctx.out_size,
|
144 |
-
)
|
145 |
-
|
146 |
-
return grad_input, None, None, None, None
|
147 |
-
|
148 |
-
|
149 |
-
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
|
150 |
-
if input.device.type == "cpu":
|
151 |
-
out = upfirdn2d_native(
|
152 |
-
input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
|
153 |
-
)
|
154 |
-
|
155 |
-
else:
|
156 |
-
out = UpFirDn2d.apply(
|
157 |
-
input, kernel, (up, up), (down,
|
158 |
-
down), (pad[0], pad[1], pad[0], pad[1])
|
159 |
-
)
|
160 |
-
|
161 |
-
return out
|
162 |
-
|
163 |
-
|
164 |
-
def upfirdn2d_native(
|
165 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
166 |
-
):
|
167 |
-
_, channel, in_h, in_w = input.shape
|
168 |
-
input = input.reshape(-1, in_h, in_w, 1)
|
169 |
-
|
170 |
-
_, in_h, in_w, minor = input.shape
|
171 |
-
kernel_h, kernel_w = kernel.shape
|
172 |
-
|
173 |
-
out = input.view(-1, in_h, 1, in_w, 1, minor)
|
174 |
-
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
|
175 |
-
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
|
176 |
-
|
177 |
-
out = F.pad(
|
178 |
-
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0),
|
179 |
-
max(pad_y0, 0), max(pad_y1, 0)]
|
180 |
-
)
|
181 |
-
out = out[
|
182 |
-
:,
|
183 |
-
max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0),
|
184 |
-
max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0),
|
185 |
-
:,
|
186 |
-
]
|
187 |
-
|
188 |
-
out = out.permute(0, 3, 1, 2)
|
189 |
-
out = out.reshape(
|
190 |
-
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
|
191 |
-
)
|
192 |
-
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
193 |
-
out = F.conv2d(out, w)
|
194 |
-
out = out.reshape(
|
195 |
-
-1,
|
196 |
-
minor,
|
197 |
-
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
198 |
-
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
199 |
-
)
|
200 |
-
out = out.permute(0, 2, 3, 1)
|
201 |
-
out = out[:, ::down_y, ::down_x, :]
|
202 |
-
|
203 |
-
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
204 |
-
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
205 |
-
|
206 |
-
return out.view(-1, channel, out_h, out_w)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(plugins=[
|
4 |
-
dict(
|
5 |
-
cfg=dict(type='ContextBlock', ratio=1. / 16),
|
6 |
-
stages=(False, True, True, True),
|
7 |
-
position='after_conv3')
|
8 |
-
]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/scratch/README.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# Rethinking ImageNet Pre-training
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{he2018rethinking,
|
9 |
-
title={Rethinking imagenet pre-training},
|
10 |
-
author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr},
|
11 |
-
journal={arXiv preprint arXiv:1811.08883},
|
12 |
-
year={2018}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and Models
|
17 |
-
|
18 |
-
| Model | Backbone | Style | Lr schd | box AP | mask AP | Config | Download |
|
19 |
-
|:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:------:|:--------:|
|
20 |
-
| Faster R-CNN | R-50-FPN | pytorch | 6x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_20200201_193013.log.json) |
|
21 |
-
| Mask R-CNN | R-50-FPN | pytorch | 6x | 41.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_20200201_193051.log.json) |
|
22 |
-
|
23 |
-
Note:
|
24 |
-
|
25 |
-
- The above models are trained with 16 GPUs.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/smooth_l1_loss.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
|
5 |
-
from ..builder import LOSSES
|
6 |
-
from .utils import weighted_loss
|
7 |
-
|
8 |
-
|
9 |
-
@mmcv.jit(derivate=True, coderize=True)
|
10 |
-
@weighted_loss
|
11 |
-
def smooth_l1_loss(pred, target, beta=1.0):
|
12 |
-
"""Smooth L1 loss.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
pred (torch.Tensor): The prediction.
|
16 |
-
target (torch.Tensor): The learning target of the prediction.
|
17 |
-
beta (float, optional): The threshold in the piecewise function.
|
18 |
-
Defaults to 1.0.
|
19 |
-
|
20 |
-
Returns:
|
21 |
-
torch.Tensor: Calculated loss
|
22 |
-
"""
|
23 |
-
assert beta > 0
|
24 |
-
assert pred.size() == target.size() and target.numel() > 0
|
25 |
-
diff = torch.abs(pred - target)
|
26 |
-
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
|
27 |
-
diff - 0.5 * beta)
|
28 |
-
return loss
|
29 |
-
|
30 |
-
|
31 |
-
@mmcv.jit(derivate=True, coderize=True)
|
32 |
-
@weighted_loss
|
33 |
-
def l1_loss(pred, target):
|
34 |
-
"""L1 loss.
|
35 |
-
|
36 |
-
Args:
|
37 |
-
pred (torch.Tensor): The prediction.
|
38 |
-
target (torch.Tensor): The learning target of the prediction.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
torch.Tensor: Calculated loss
|
42 |
-
"""
|
43 |
-
assert pred.size() == target.size() and target.numel() > 0
|
44 |
-
loss = torch.abs(pred - target)
|
45 |
-
return loss
|
46 |
-
|
47 |
-
|
48 |
-
@LOSSES.register_module()
|
49 |
-
class SmoothL1Loss(nn.Module):
|
50 |
-
"""Smooth L1 loss.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
beta (float, optional): The threshold in the piecewise function.
|
54 |
-
Defaults to 1.0.
|
55 |
-
reduction (str, optional): The method to reduce the loss.
|
56 |
-
Options are "none", "mean" and "sum". Defaults to "mean".
|
57 |
-
loss_weight (float, optional): The weight of loss.
|
58 |
-
"""
|
59 |
-
|
60 |
-
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
|
61 |
-
super(SmoothL1Loss, self).__init__()
|
62 |
-
self.beta = beta
|
63 |
-
self.reduction = reduction
|
64 |
-
self.loss_weight = loss_weight
|
65 |
-
|
66 |
-
def forward(self,
|
67 |
-
pred,
|
68 |
-
target,
|
69 |
-
weight=None,
|
70 |
-
avg_factor=None,
|
71 |
-
reduction_override=None,
|
72 |
-
**kwargs):
|
73 |
-
"""Forward function.
|
74 |
-
|
75 |
-
Args:
|
76 |
-
pred (torch.Tensor): The prediction.
|
77 |
-
target (torch.Tensor): The learning target of the prediction.
|
78 |
-
weight (torch.Tensor, optional): The weight of loss for each
|
79 |
-
prediction. Defaults to None.
|
80 |
-
avg_factor (int, optional): Average factor that is used to average
|
81 |
-
the loss. Defaults to None.
|
82 |
-
reduction_override (str, optional): The reduction method used to
|
83 |
-
override the original reduction method of the loss.
|
84 |
-
Defaults to None.
|
85 |
-
"""
|
86 |
-
assert reduction_override in (None, 'none', 'mean', 'sum')
|
87 |
-
reduction = (
|
88 |
-
reduction_override if reduction_override else self.reduction)
|
89 |
-
loss_bbox = self.loss_weight * smooth_l1_loss(
|
90 |
-
pred,
|
91 |
-
target,
|
92 |
-
weight,
|
93 |
-
beta=self.beta,
|
94 |
-
reduction=reduction,
|
95 |
-
avg_factor=avg_factor,
|
96 |
-
**kwargs)
|
97 |
-
return loss_bbox
|
98 |
-
|
99 |
-
|
100 |
-
@LOSSES.register_module()
|
101 |
-
class L1Loss(nn.Module):
|
102 |
-
"""L1 loss.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
reduction (str, optional): The method to reduce the loss.
|
106 |
-
Options are "none", "mean" and "sum".
|
107 |
-
loss_weight (float, optional): The weight of loss.
|
108 |
-
"""
|
109 |
-
|
110 |
-
def __init__(self, reduction='mean', loss_weight=1.0):
|
111 |
-
super(L1Loss, self).__init__()
|
112 |
-
self.reduction = reduction
|
113 |
-
self.loss_weight = loss_weight
|
114 |
-
|
115 |
-
def forward(self,
|
116 |
-
pred,
|
117 |
-
target,
|
118 |
-
weight=None,
|
119 |
-
avg_factor=None,
|
120 |
-
reduction_override=None):
|
121 |
-
"""Forward function.
|
122 |
-
|
123 |
-
Args:
|
124 |
-
pred (torch.Tensor): The prediction.
|
125 |
-
target (torch.Tensor): The learning target of the prediction.
|
126 |
-
weight (torch.Tensor, optional): The weight of loss for each
|
127 |
-
prediction. Defaults to None.
|
128 |
-
avg_factor (int, optional): Average factor that is used to average
|
129 |
-
the loss. Defaults to None.
|
130 |
-
reduction_override (str, optional): The reduction method used to
|
131 |
-
override the original reduction method of the loss.
|
132 |
-
Defaults to None.
|
133 |
-
"""
|
134 |
-
assert reduction_override in (None, 'none', 'mean', 'sum')
|
135 |
-
reduction = (
|
136 |
-
reduction_override if reduction_override else self.reduction)
|
137 |
-
loss_bbox = self.loss_weight * l1_loss(
|
138 |
-
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
|
139 |
-
return loss_bbox
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_r50-d8_512x512_80k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
-
from langchain import LLMChain, PromptTemplate
|
5 |
-
from langchain.memory import ConversationBufferMemory
|
6 |
-
|
7 |
-
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
8 |
-
|
9 |
-
template = """You are a helpful assistant to answer all user queries.
|
10 |
-
{chat_history}
|
11 |
-
User: {user_message}
|
12 |
-
Chatbot:"""
|
13 |
-
|
14 |
-
prompt = PromptTemplate(
|
15 |
-
input_variables=["chat_history", "user_message"], template=template
|
16 |
-
)
|
17 |
-
|
18 |
-
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
-
|
20 |
-
llm_chain = LLMChain(
|
21 |
-
llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
|
22 |
-
prompt=prompt,
|
23 |
-
verbose=True,
|
24 |
-
memory=memory,
|
25 |
-
)
|
26 |
-
|
27 |
-
def get_text_response(user_message,history):
|
28 |
-
response = llm_chain.predict(user_message = user_message)
|
29 |
-
return response
|
30 |
-
|
31 |
-
demo = gr.ChatInterface(get_text_response)
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnaudding001/OpenAI_whisperLive/app-network.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# Run the app with no audio file restrictions, and make it available on the network
|
2 |
-
from app import create_ui
|
3 |
-
create_ui(-1, server_name="0.0.0.0")
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese.py
DELETED
@@ -1,585 +0,0 @@
|
|
1 |
-
# Convert Japanese text to phonemes which is
|
2 |
-
# compatible with Julius https://github.com/julius-speech/segmentation-kit
|
3 |
-
import re
|
4 |
-
import unicodedata
|
5 |
-
|
6 |
-
from transformers import AutoTokenizer
|
7 |
-
|
8 |
-
from bert_vits2.text.symbols import *
|
9 |
-
from bert_vits2.text.japanese_bert import tokenizer
|
10 |
-
|
11 |
-
try:
|
12 |
-
import MeCab
|
13 |
-
except ImportError as e:
|
14 |
-
raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e
|
15 |
-
from num2words import num2words
|
16 |
-
|
17 |
-
_CONVRULES = [
|
18 |
-
# Conversion of 2 letters
|
19 |
-
"アァ/ a a",
|
20 |
-
"イィ/ i i",
|
21 |
-
"イェ/ i e",
|
22 |
-
"イャ/ y a",
|
23 |
-
"ウゥ/ u:",
|
24 |
-
"エェ/ e e",
|
25 |
-
"オォ/ o:",
|
26 |
-
"カァ/ k a:",
|
27 |
-
"キィ/ k i:",
|
28 |
-
"クゥ/ k u:",
|
29 |
-
"クャ/ ky a",
|
30 |
-
"クュ/ ky u",
|
31 |
-
"クョ/ ky o",
|
32 |
-
"ケェ/ k e:",
|
33 |
-
"コォ/ k o:",
|
34 |
-
"ガァ/ g a:",
|
35 |
-
"ギィ/ g i:",
|
36 |
-
"グゥ/ g u:",
|
37 |
-
"グャ/ gy a",
|
38 |
-
"グュ/ gy u",
|
39 |
-
"グョ/ gy o",
|
40 |
-
"ゲェ/ g e:",
|
41 |
-
"ゴォ/ g o:",
|
42 |
-
"サァ/ s a:",
|
43 |
-
"シィ/ sh i:",
|
44 |
-
"スゥ/ s u:",
|
45 |
-
"スャ/ sh a",
|
46 |
-
"スュ/ sh u",
|
47 |
-
"スョ/ sh o",
|
48 |
-
"セェ/ s e:",
|
49 |
-
"ソォ/ s o:",
|
50 |
-
"ザァ/ z a:",
|
51 |
-
"ジィ/ j i:",
|
52 |
-
"ズゥ/ z u:",
|
53 |
-
"ズャ/ zy a",
|
54 |
-
"ズュ/ zy u",
|
55 |
-
"ズョ/ zy o",
|
56 |
-
"ゼェ/ z e:",
|
57 |
-
"ゾォ/ z o:",
|
58 |
-
"タァ/ t a:",
|
59 |
-
"チィ/ ch i:",
|
60 |
-
"ツァ/ ts a",
|
61 |
-
"ツィ/ ts i",
|
62 |
-
"ツゥ/ ts u:",
|
63 |
-
"ツャ/ ch a",
|
64 |
-
"ツュ/ ch u",
|
65 |
-
"ツョ/ ch o",
|
66 |
-
"ツェ/ ts e",
|
67 |
-
"ツォ/ ts o",
|
68 |
-
"テェ/ t e:",
|
69 |
-
"トォ/ t o:",
|
70 |
-
"ダァ/ d a:",
|
71 |
-
"ヂィ/ j i:",
|
72 |
-
"ヅゥ/ d u:",
|
73 |
-
"ヅャ/ zy a",
|
74 |
-
"ヅュ/ zy u",
|
75 |
-
"ヅョ/ zy o",
|
76 |
-
"デェ/ d e:",
|
77 |
-
"ドォ/ d o:",
|
78 |
-
"ナァ/ n a:",
|
79 |
-
"ニィ/ n i:",
|
80 |
-
"ヌゥ/ n u:",
|
81 |
-
"ヌャ/ ny a",
|
82 |
-
"ヌュ/ ny u",
|
83 |
-
"ヌョ/ ny o",
|
84 |
-
"ネェ/ n e:",
|
85 |
-
"ノォ/ n o:",
|
86 |
-
"ハァ/ h a:",
|
87 |
-
"ヒィ/ h i:",
|
88 |
-
"フゥ/ f u:",
|
89 |
-
"フャ/ hy a",
|
90 |
-
"フュ/ hy u",
|
91 |
-
"フョ/ hy o",
|
92 |
-
"ヘェ/ h e:",
|
93 |
-
"ホォ/ h o:",
|
94 |
-
"バァ/ b a:",
|
95 |
-
"ビィ/ b i:",
|
96 |
-
"ブゥ/ b u:",
|
97 |
-
"フャ/ hy a",
|
98 |
-
"ブュ/ by u",
|
99 |
-
"フョ/ hy o",
|
100 |
-
"ベェ/ b e:",
|
101 |
-
"ボォ/ b o:",
|
102 |
-
"パァ/ p a:",
|
103 |
-
"ピィ/ p i:",
|
104 |
-
"プゥ/ p u:",
|
105 |
-
"プャ/ py a",
|
106 |
-
"プュ/ py u",
|
107 |
-
"プョ/ py o",
|
108 |
-
"ペェ/ p e:",
|
109 |
-
"ポォ/ p o:",
|
110 |
-
"マァ/ m a:",
|
111 |
-
"ミィ/ m i:",
|
112 |
-
"ムゥ/ m u:",
|
113 |
-
"ムャ/ my a",
|
114 |
-
"ムュ/ my u",
|
115 |
-
"ムョ/ my o",
|
116 |
-
"メェ/ m e:",
|
117 |
-
"モォ/ m o:",
|
118 |
-
"ヤァ/ y a:",
|
119 |
-
"ユゥ/ y u:",
|
120 |
-
"ユャ/ y a:",
|
121 |
-
"ユュ/ y u:",
|
122 |
-
"ユョ/ y o:",
|
123 |
-
"ヨォ/ y o:",
|
124 |
-
"ラァ/ r a:",
|
125 |
-
"リィ/ r i:",
|
126 |
-
"ルゥ/ r u:",
|
127 |
-
"ルャ/ ry a",
|
128 |
-
"ルュ/ ry u",
|
129 |
-
"ルョ/ ry o",
|
130 |
-
"レェ/ r e:",
|
131 |
-
"ロォ/ r o:",
|
132 |
-
"ワァ/ w a:",
|
133 |
-
"ヲォ/ o:",
|
134 |
-
"ディ/ d i",
|
135 |
-
"デェ/ d e:",
|
136 |
-
"デャ/ dy a",
|
137 |
-
"デュ/ dy u",
|
138 |
-
"デョ/ dy o",
|
139 |
-
"ティ/ t i",
|
140 |
-
"テェ/ t e:",
|
141 |
-
"テャ/ ty a",
|
142 |
-
"テュ/ ty u",
|
143 |
-
"テョ/ ty o",
|
144 |
-
"スィ/ s i",
|
145 |
-
"ズァ/ z u a",
|
146 |
-
"ズィ/ z i",
|
147 |
-
"ズゥ/ z u",
|
148 |
-
"ズャ/ zy a",
|
149 |
-
"ズュ/ zy u",
|
150 |
-
"ズョ/ zy o",
|
151 |
-
"ズェ/ z e",
|
152 |
-
"ズォ/ z o",
|
153 |
-
"キャ/ ky a",
|
154 |
-
"キュ/ ky u",
|
155 |
-
"キョ/ ky o",
|
156 |
-
"シャ/ sh a",
|
157 |
-
"シュ/ sh u",
|
158 |
-
"シェ/ sh e",
|
159 |
-
"ショ/ sh o",
|
160 |
-
"チャ/ ch a",
|
161 |
-
"チュ/ ch u",
|
162 |
-
"チェ/ ch e",
|
163 |
-
"チョ/ ch o",
|
164 |
-
"トゥ/ t u",
|
165 |
-
"トャ/ ty a",
|
166 |
-
"トュ/ ty u",
|
167 |
-
"トョ/ ty o",
|
168 |
-
"ドァ/ d o a",
|
169 |
-
"ドゥ/ d u",
|
170 |
-
"ドャ/ dy a",
|
171 |
-
"ドュ/ dy u",
|
172 |
-
"ドョ/ dy o",
|
173 |
-
"ドォ/ d o:",
|
174 |
-
"ニャ/ ny a",
|
175 |
-
"ニュ/ ny u",
|
176 |
-
"ニョ/ ny o",
|
177 |
-
"ヒャ/ hy a",
|
178 |
-
"ヒュ/ hy u",
|
179 |
-
"ヒョ/ hy o",
|
180 |
-
"ミャ/ my a",
|
181 |
-
"ミュ/ my u",
|
182 |
-
"ミョ/ my o",
|
183 |
-
"リャ/ ry a",
|
184 |
-
"リュ/ ry u",
|
185 |
-
"リョ/ ry o",
|
186 |
-
"ギャ/ gy a",
|
187 |
-
"ギュ/ gy u",
|
188 |
-
"ギョ/ gy o",
|
189 |
-
"ヂェ/ j e",
|
190 |
-
"ヂャ/ j a",
|
191 |
-
"ヂュ/ j u",
|
192 |
-
"ヂョ/ j o",
|
193 |
-
"ジェ/ j e",
|
194 |
-
"ジャ/ j a",
|
195 |
-
"ジュ/ j u",
|
196 |
-
"ジョ/ j o",
|
197 |
-
"ビャ/ by a",
|
198 |
-
"ビュ/ by u",
|
199 |
-
"ビョ/ by o",
|
200 |
-
"ピャ/ py a",
|
201 |
-
"ピュ/ py u",
|
202 |
-
"ピョ/ py o",
|
203 |
-
"ウァ/ u a",
|
204 |
-
"ウィ/ w i",
|
205 |
-
"ウェ/ w e",
|
206 |
-
"ウォ/ w o",
|
207 |
-
"ファ/ f a",
|
208 |
-
"フィ/ f i",
|
209 |
-
"フゥ/ f u",
|
210 |
-
"フャ/ hy a",
|
211 |
-
"フュ/ hy u",
|
212 |
-
"フョ/ hy o",
|
213 |
-
"フェ/ f e",
|
214 |
-
"フォ/ f o",
|
215 |
-
"ヴァ/ b a",
|
216 |
-
"ヴィ/ b i",
|
217 |
-
"ヴェ/ b e",
|
218 |
-
"ヴォ/ b o",
|
219 |
-
"ヴュ/ by u",
|
220 |
-
# Conversion of 1 letter
|
221 |
-
"ア/ a",
|
222 |
-
"イ/ i",
|
223 |
-
"ウ/ u",
|
224 |
-
"エ/ e",
|
225 |
-
"オ/ o",
|
226 |
-
"カ/ k a",
|
227 |
-
"キ/ k i",
|
228 |
-
"ク/ k u",
|
229 |
-
"ケ/ k e",
|
230 |
-
"コ/ k o",
|
231 |
-
"サ/ s a",
|
232 |
-
"シ/ sh i",
|
233 |
-
"ス/ s u",
|
234 |
-
"セ/ s e",
|
235 |
-
"ソ/ s o",
|
236 |
-
"タ/ t a",
|
237 |
-
"チ/ ch i",
|
238 |
-
"ツ/ ts u",
|
239 |
-
"テ/ t e",
|
240 |
-
"ト/ t o",
|
241 |
-
"ナ/ n a",
|
242 |
-
"ニ/ n i",
|
243 |
-
"ヌ/ n u",
|
244 |
-
"ネ/ n e",
|
245 |
-
"ノ/ n o",
|
246 |
-
"ハ/ h a",
|
247 |
-
"ヒ/ h i",
|
248 |
-
"フ/ f u",
|
249 |
-
"ヘ/ h e",
|
250 |
-
"ホ/ h o",
|
251 |
-
"マ/ m a",
|
252 |
-
"ミ/ m i",
|
253 |
-
"ム/ m u",
|
254 |
-
"メ/ m e",
|
255 |
-
"モ/ m o",
|
256 |
-
"ラ/ r a",
|
257 |
-
"リ/ r i",
|
258 |
-
"ル/ r u",
|
259 |
-
"レ/ r e",
|
260 |
-
"ロ/ r o",
|
261 |
-
"ガ/ g a",
|
262 |
-
"ギ/ g i",
|
263 |
-
"グ/ g u",
|
264 |
-
"ゲ/ g e",
|
265 |
-
"ゴ/ g o",
|
266 |
-
"ザ/ z a",
|
267 |
-
"ジ/ j i",
|
268 |
-
"ズ/ z u",
|
269 |
-
"ゼ/ z e",
|
270 |
-
"ゾ/ z o",
|
271 |
-
"ダ/ d a",
|
272 |
-
"ヂ/ j i",
|
273 |
-
"ヅ/ z u",
|
274 |
-
"デ/ d e",
|
275 |
-
"ド/ d o",
|
276 |
-
"バ/ b a",
|
277 |
-
"ビ/ b i",
|
278 |
-
"ブ/ b u",
|
279 |
-
"ベ/ b e",
|
280 |
-
"ボ/ b o",
|
281 |
-
"パ/ p a",
|
282 |
-
"ピ/ p i",
|
283 |
-
"プ/ p u",
|
284 |
-
"ペ/ p e",
|
285 |
-
"ポ/ p o",
|
286 |
-
"ヤ/ y a",
|
287 |
-
"ユ/ y u",
|
288 |
-
"ヨ/ y o",
|
289 |
-
"ワ/ w a",
|
290 |
-
"ヰ/ i",
|
291 |
-
"ヱ/ e",
|
292 |
-
"ヲ/ o",
|
293 |
-
"ン/ N",
|
294 |
-
"ッ/ q",
|
295 |
-
"ヴ/ b u",
|
296 |
-
"ー/:",
|
297 |
-
# Try converting broken text
|
298 |
-
"ァ/ a",
|
299 |
-
"ィ/ i",
|
300 |
-
"ゥ/ u",
|
301 |
-
"ェ/ e",
|
302 |
-
"ォ/ o",
|
303 |
-
"ヮ/ w a",
|
304 |
-
"ォ/ o",
|
305 |
-
# Symbols
|
306 |
-
"、/ ,",
|
307 |
-
"。/ .",
|
308 |
-
"!/ !",
|
309 |
-
"?/ ?",
|
310 |
-
"・/ ,",
|
311 |
-
]
|
312 |
-
|
313 |
-
_COLON_RX = re.compile(":+")
|
314 |
-
_REJECT_RX = re.compile("[^ a-zA-Z:,.?]")
|
315 |
-
|
316 |
-
|
317 |
-
def _makerulemap():
|
318 |
-
l = [tuple(x.split("/")) for x in _CONVRULES]
|
319 |
-
return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2))
|
320 |
-
|
321 |
-
|
322 |
-
_RULEMAP1, _RULEMAP2 = _makerulemap()
|
323 |
-
|
324 |
-
|
325 |
-
def kata2phoneme(text: str) -> str:
|
326 |
-
"""Convert katakana text to phonemes."""
|
327 |
-
text = text.strip()
|
328 |
-
res = []
|
329 |
-
while text:
|
330 |
-
if len(text) >= 2:
|
331 |
-
x = _RULEMAP2.get(text[:2])
|
332 |
-
if x is not None:
|
333 |
-
text = text[2:]
|
334 |
-
res += x.split(" ")[1:]
|
335 |
-
continue
|
336 |
-
x = _RULEMAP1.get(text[0])
|
337 |
-
if x is not None:
|
338 |
-
text = text[1:]
|
339 |
-
res += x.split(" ")[1:]
|
340 |
-
continue
|
341 |
-
res.append(text[0])
|
342 |
-
text = text[1:]
|
343 |
-
# res = _COLON_RX.sub(":", res)
|
344 |
-
return res
|
345 |
-
|
346 |
-
|
347 |
-
_KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1))
|
348 |
-
_HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1))
|
349 |
-
_HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA)
|
350 |
-
|
351 |
-
|
352 |
-
def hira2kata(text: str) -> str:
|
353 |
-
text = text.translate(_HIRA2KATATRANS)
|
354 |
-
return text.replace("う゛", "ヴ")
|
355 |
-
|
356 |
-
|
357 |
-
_SYMBOL_TOKENS = set(list("・、。?!"))
|
358 |
-
_NO_YOMI_TOKENS = set(list("「」『』―()[][]"))
|
359 |
-
_TAGGER = MeCab.Tagger()
|
360 |
-
|
361 |
-
|
362 |
-
def text2kata(text: str) -> str:
|
363 |
-
parsed = _TAGGER.parse(text)
|
364 |
-
res = []
|
365 |
-
for line in parsed.split("\n"):
|
366 |
-
if line == "EOS":
|
367 |
-
break
|
368 |
-
parts = line.split("\t")
|
369 |
-
|
370 |
-
word, yomi = parts[0], parts[1]
|
371 |
-
if yomi:
|
372 |
-
res.append(yomi)
|
373 |
-
else:
|
374 |
-
if word in _SYMBOL_TOKENS:
|
375 |
-
res.append(word)
|
376 |
-
elif word in ("っ", "ッ"):
|
377 |
-
res.append("ッ")
|
378 |
-
elif word in _NO_YOMI_TOKENS:
|
379 |
-
pass
|
380 |
-
else:
|
381 |
-
res.append(word)
|
382 |
-
return hira2kata("".join(res))
|
383 |
-
|
384 |
-
|
385 |
-
_ALPHASYMBOL_YOMI = {
|
386 |
-
"#": "シャープ",
|
387 |
-
"%": "パーセント",
|
388 |
-
"&": "アンド",
|
389 |
-
"+": "プラス",
|
390 |
-
"-": "マイナス",
|
391 |
-
":": "コロン",
|
392 |
-
";": "セミコロン",
|
393 |
-
"<": "小なり",
|
394 |
-
"=": "イコール",
|
395 |
-
">": "大なり",
|
396 |
-
"@": "アット",
|
397 |
-
"a": "エー",
|
398 |
-
"b": "ビー",
|
399 |
-
"c": "シー",
|
400 |
-
"d": "ディー",
|
401 |
-
"e": "イー",
|
402 |
-
"f": "エフ",
|
403 |
-
"g": "ジー",
|
404 |
-
"h": "エイチ",
|
405 |
-
"i": "アイ",
|
406 |
-
"j": "ジェー",
|
407 |
-
"k": "ケー",
|
408 |
-
"l": "エル",
|
409 |
-
"m": "エム",
|
410 |
-
"n": "エヌ",
|
411 |
-
"o": "オー",
|
412 |
-
"p": "ピー",
|
413 |
-
"q": "キュー",
|
414 |
-
"r": "アール",
|
415 |
-
"s": "エス",
|
416 |
-
"t": "ティー",
|
417 |
-
"u": "ユー",
|
418 |
-
"v": "ブイ",
|
419 |
-
"w": "ダブリュー",
|
420 |
-
"x": "エックス",
|
421 |
-
"y": "ワイ",
|
422 |
-
"z": "ゼット",
|
423 |
-
"α": "アルファ",
|
424 |
-
"β": "ベータ",
|
425 |
-
"γ": "ガンマ",
|
426 |
-
"δ": "デルタ",
|
427 |
-
"ε": "イプシロン",
|
428 |
-
"ζ": "ゼータ",
|
429 |
-
"η": "イータ",
|
430 |
-
"θ": "シータ",
|
431 |
-
"ι": "イオタ",
|
432 |
-
"κ": "カッパ",
|
433 |
-
"λ": "ラムダ",
|
434 |
-
"μ": "ミュー",
|
435 |
-
"ν": "ニュー",
|
436 |
-
"ξ": "クサイ",
|
437 |
-
"ο": "オミクロン",
|
438 |
-
"π": "パイ",
|
439 |
-
"ρ": "ロー",
|
440 |
-
"σ": "シグマ",
|
441 |
-
"τ": "タウ",
|
442 |
-
"υ": "ウプシロン",
|
443 |
-
"φ": "ファイ",
|
444 |
-
"χ": "カイ",
|
445 |
-
"ψ": "プサイ",
|
446 |
-
"ω": "オメガ",
|
447 |
-
}
|
448 |
-
|
449 |
-
_NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+")
|
450 |
-
_CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"}
|
451 |
-
_CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])")
|
452 |
-
_NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?")
|
453 |
-
|
454 |
-
|
455 |
-
def japanese_convert_numbers_to_words(text: str) -> str:
|
456 |
-
res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text)
|
457 |
-
res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res)
|
458 |
-
res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res)
|
459 |
-
return res
|
460 |
-
|
461 |
-
|
462 |
-
def japanese_convert_alpha_symbols_to_words(text: str) -> str:
|
463 |
-
return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()])
|
464 |
-
|
465 |
-
|
466 |
-
def japanese_text_to_phonemes(text: str) -> str:
|
467 |
-
"""Convert Japanese text to phonemes."""
|
468 |
-
res = unicodedata.normalize("NFKC", text)
|
469 |
-
res = japanese_convert_numbers_to_words(res)
|
470 |
-
# res = japanese_convert_alpha_symbols_to_words(res)
|
471 |
-
res = text2kata(res)
|
472 |
-
res = kata2phoneme(res)
|
473 |
-
return res
|
474 |
-
|
475 |
-
|
476 |
-
def is_japanese_character(char):
|
477 |
-
# 定义日语文字系统的 Unicode 范围
|
478 |
-
japanese_ranges = [
|
479 |
-
(0x3040, 0x309F), # 平假名
|
480 |
-
(0x30A0, 0x30FF), # 片假名
|
481 |
-
(0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs)
|
482 |
-
(0x3400, 0x4DBF), # 汉字扩展 A
|
483 |
-
(0x20000, 0x2A6DF), # 汉字扩展 B
|
484 |
-
# 可以根据需要添加其他汉字扩展范围
|
485 |
-
]
|
486 |
-
|
487 |
-
# 将字符的 Unicode 编码转换为整数
|
488 |
-
char_code = ord(char)
|
489 |
-
|
490 |
-
# 检查字符是否在任何一个日语范围内
|
491 |
-
for start, end in japanese_ranges:
|
492 |
-
if start <= char_code <= end:
|
493 |
-
return True
|
494 |
-
|
495 |
-
return False
|
496 |
-
|
497 |
-
|
498 |
-
rep_map = {
|
499 |
-
":": ",",
|
500 |
-
";": ",",
|
501 |
-
",": ",",
|
502 |
-
"。": ".",
|
503 |
-
"!": "!",
|
504 |
-
"?": "?",
|
505 |
-
"\n": ".",
|
506 |
-
"·": ",",
|
507 |
-
"、": ",",
|
508 |
-
"...": "…",
|
509 |
-
}
|
510 |
-
|
511 |
-
|
512 |
-
def replace_punctuation(text):
|
513 |
-
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
|
514 |
-
|
515 |
-
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
|
516 |
-
|
517 |
-
replaced_text = re.sub(
|
518 |
-
r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF"
|
519 |
-
+ "".join(punctuation)
|
520 |
-
+ r"]+",
|
521 |
-
"",
|
522 |
-
replaced_text,
|
523 |
-
)
|
524 |
-
|
525 |
-
return replaced_text
|
526 |
-
|
527 |
-
|
528 |
-
def text_normalize(text):
|
529 |
-
res = unicodedata.normalize("NFKC", text)
|
530 |
-
res = japanese_convert_numbers_to_words(res)
|
531 |
-
# res = "".join([i for i in res if is_japanese_character(i)])
|
532 |
-
res = replace_punctuation(res)
|
533 |
-
return res
|
534 |
-
|
535 |
-
|
536 |
-
def distribute_phone(n_phone, n_word):
|
537 |
-
phones_per_word = [0] * n_word
|
538 |
-
for task in range(n_phone):
|
539 |
-
min_tasks = min(phones_per_word)
|
540 |
-
min_index = phones_per_word.index(min_tasks)
|
541 |
-
phones_per_word[min_index] += 1
|
542 |
-
return phones_per_word
|
543 |
-
|
544 |
-
|
545 |
-
def g2p(norm_text):
|
546 |
-
tokenized = tokenizer.tokenize(norm_text)
|
547 |
-
phs = []
|
548 |
-
ph_groups = []
|
549 |
-
for t in tokenized:
|
550 |
-
if not t.startswith("#"):
|
551 |
-
ph_groups.append([t])
|
552 |
-
else:
|
553 |
-
ph_groups[-1].append(t.replace("#", ""))
|
554 |
-
word2ph = []
|
555 |
-
for group in ph_groups:
|
556 |
-
phonemes = kata2phoneme(text2kata("".join(group)))
|
557 |
-
# phonemes = [i for i in phonemes if i in symbols]
|
558 |
-
for i in phonemes:
|
559 |
-
assert i in symbols, (i, group, norm_text, tokenized)
|
560 |
-
phone_len = len(phonemes)
|
561 |
-
word_len = len(group)
|
562 |
-
|
563 |
-
aaa = distribute_phone(phone_len, word_len)
|
564 |
-
word2ph += aaa
|
565 |
-
|
566 |
-
phs += phonemes
|
567 |
-
phones = ["_"] + phs + ["_"]
|
568 |
-
tones = [0 for i in phones]
|
569 |
-
word2ph = [1] + word2ph + [1]
|
570 |
-
return phones, tones, word2ph
|
571 |
-
|
572 |
-
|
573 |
-
if __name__ == "__main__":
|
574 |
-
from config import ABS_PATH
|
575 |
-
|
576 |
-
tokenizer = AutoTokenizer.from_pretrained(ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3")
|
577 |
-
text = "hello,こんにちは、世界!……"
|
578 |
-
from bert_vits2.text.japanese_bert import get_bert_feature
|
579 |
-
|
580 |
-
text = text_normalize(text)
|
581 |
-
print(text)
|
582 |
-
phones, tones, word2ph = g2p(text)
|
583 |
-
bert = get_bert_feature(text, word2ph)
|
584 |
-
|
585 |
-
print(phones, tones, word2ph, bert.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/vits/text/vits_pinyin.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
""" from https://github.com/PlayVoice/vits_chinese """
|
2 |
-
import pypinyin
|
3 |
-
from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
|
4 |
-
from pypinyin.converter import DefaultConverter
|
5 |
-
from pypinyin.core import Pinyin
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
|
9 |
-
from vits.bert.prosody_tool import pinyin_dict
|
10 |
-
from vits.bert import TTSProsody
|
11 |
-
|
12 |
-
|
13 |
-
class MyConverter(NeutralToneWith5Mixin, DefaultConverter):
|
14 |
-
pass
|
15 |
-
|
16 |
-
|
17 |
-
def is_chinese(uchar):
|
18 |
-
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
|
19 |
-
return True
|
20 |
-
else:
|
21 |
-
return False
|
22 |
-
|
23 |
-
|
24 |
-
def clean_chinese(text: str):
|
25 |
-
text = text.strip()
|
26 |
-
text_clean = []
|
27 |
-
for char in text:
|
28 |
-
if (is_chinese(char)):
|
29 |
-
text_clean.append(char)
|
30 |
-
else:
|
31 |
-
if len(text_clean) > 1 and is_chinese(text_clean[-1]):
|
32 |
-
text_clean.append(',')
|
33 |
-
text_clean = ''.join(text_clean).strip(',')
|
34 |
-
return text_clean
|
35 |
-
|
36 |
-
|
37 |
-
class VITS_PinYin:
|
38 |
-
def __init__(self, bert_path, device):
|
39 |
-
self.pinyin_parser = Pinyin(MyConverter())
|
40 |
-
self.prosody = TTSProsody(bert_path, device)
|
41 |
-
|
42 |
-
def chinese_to_phonemes(self, text):
|
43 |
-
# 考虑使用g2pw的chinese bert替换原始的pypinyin,目前测试下来运行速度太慢。
|
44 |
-
# 将标准中文文本符号替换成 bert 符号库中的单符号,以保证bert的效果.
|
45 |
-
text = text.replace("——", "...") \
|
46 |
-
.replace("—", "...") \
|
47 |
-
.replace("……", "...") \
|
48 |
-
.replace("…", "...") \
|
49 |
-
.replace('“', '"') \
|
50 |
-
.replace('”', '"') \
|
51 |
-
.replace("\n", "")
|
52 |
-
tokens = self.prosody.char_model.tokenizer.tokenize(text)
|
53 |
-
text = ''.join(tokens)
|
54 |
-
assert not tokens.count("[UNK]")
|
55 |
-
pinyins = np.reshape(pypinyin.pinyin(text, style=pypinyin.TONE3), (-1))
|
56 |
-
try:
|
57 |
-
phone_index = 0
|
58 |
-
phone_items = []
|
59 |
-
phone_items.append('sil')
|
60 |
-
count_phone = []
|
61 |
-
count_phone.append(1)
|
62 |
-
temp = ""
|
63 |
-
|
64 |
-
len_pys = len(tokens)
|
65 |
-
for word in tokens:
|
66 |
-
if is_chinese(word):
|
67 |
-
count_phone.append(2)
|
68 |
-
if (phone_index >= len_pys):
|
69 |
-
print(
|
70 |
-
f"!!!![{text}]plz check ur text whether includes MULTIBYTE symbol.\
|
71 |
-
(请检查你的文本中是否包含多字节符号)")
|
72 |
-
pinyin = pinyins[phone_index]
|
73 |
-
phone_index = phone_index + 1
|
74 |
-
if not pinyin[-1].isdigit():
|
75 |
-
pinyin += "5"
|
76 |
-
if pinyin[:-1] in pinyin_dict:
|
77 |
-
tone = pinyin[-1]
|
78 |
-
a = pinyin[:-1]
|
79 |
-
a1, a2 = pinyin_dict[a]
|
80 |
-
phone_items += [a1, a2 + tone]
|
81 |
-
else:
|
82 |
-
temp += word
|
83 |
-
if temp == pinyins[phone_index]:
|
84 |
-
temp = ""
|
85 |
-
phone_index += 1
|
86 |
-
count_phone.append(1)
|
87 |
-
phone_items.append('sp')
|
88 |
-
|
89 |
-
count_phone.append(1)
|
90 |
-
phone_items.append('sil')
|
91 |
-
phone_items_str = ' '.join(phone_items)
|
92 |
-
except IndexError as e:
|
93 |
-
print('except:', e)
|
94 |
-
|
95 |
-
text = f'[PAD]{text}[PAD]'
|
96 |
-
char_embeds = self.prosody.get_char_embeds(text)
|
97 |
-
char_embeds = self.prosody.expand_for_phone(char_embeds, count_phone)
|
98 |
-
return phone_items_str, char_embeds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/specifiers.py
DELETED
@@ -1,802 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import abc
|
6 |
-
import functools
|
7 |
-
import itertools
|
8 |
-
import re
|
9 |
-
import warnings
|
10 |
-
from typing import (
|
11 |
-
Callable,
|
12 |
-
Dict,
|
13 |
-
Iterable,
|
14 |
-
Iterator,
|
15 |
-
List,
|
16 |
-
Optional,
|
17 |
-
Pattern,
|
18 |
-
Set,
|
19 |
-
Tuple,
|
20 |
-
TypeVar,
|
21 |
-
Union,
|
22 |
-
)
|
23 |
-
|
24 |
-
from .utils import canonicalize_version
|
25 |
-
from .version import LegacyVersion, Version, parse
|
26 |
-
|
27 |
-
ParsedVersion = Union[Version, LegacyVersion]
|
28 |
-
UnparsedVersion = Union[Version, LegacyVersion, str]
|
29 |
-
VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
|
30 |
-
CallableOperator = Callable[[ParsedVersion, str], bool]
|
31 |
-
|
32 |
-
|
33 |
-
class InvalidSpecifier(ValueError):
|
34 |
-
"""
|
35 |
-
An invalid specifier was found, users should refer to PEP 440.
|
36 |
-
"""
|
37 |
-
|
38 |
-
|
39 |
-
class BaseSpecifier(metaclass=abc.ABCMeta):
|
40 |
-
@abc.abstractmethod
|
41 |
-
def __str__(self) -> str:
|
42 |
-
"""
|
43 |
-
Returns the str representation of this Specifier like object. This
|
44 |
-
should be representative of the Specifier itself.
|
45 |
-
"""
|
46 |
-
|
47 |
-
@abc.abstractmethod
|
48 |
-
def __hash__(self) -> int:
|
49 |
-
"""
|
50 |
-
Returns a hash value for this Specifier like object.
|
51 |
-
"""
|
52 |
-
|
53 |
-
@abc.abstractmethod
|
54 |
-
def __eq__(self, other: object) -> bool:
|
55 |
-
"""
|
56 |
-
Returns a boolean representing whether or not the two Specifier like
|
57 |
-
objects are equal.
|
58 |
-
"""
|
59 |
-
|
60 |
-
@abc.abstractproperty
|
61 |
-
def prereleases(self) -> Optional[bool]:
|
62 |
-
"""
|
63 |
-
Returns whether or not pre-releases as a whole are allowed by this
|
64 |
-
specifier.
|
65 |
-
"""
|
66 |
-
|
67 |
-
@prereleases.setter
|
68 |
-
def prereleases(self, value: bool) -> None:
|
69 |
-
"""
|
70 |
-
Sets whether or not pre-releases as a whole are allowed by this
|
71 |
-
specifier.
|
72 |
-
"""
|
73 |
-
|
74 |
-
@abc.abstractmethod
|
75 |
-
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
|
76 |
-
"""
|
77 |
-
Determines if the given item is contained within this specifier.
|
78 |
-
"""
|
79 |
-
|
80 |
-
@abc.abstractmethod
|
81 |
-
def filter(
|
82 |
-
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
83 |
-
) -> Iterable[VersionTypeVar]:
|
84 |
-
"""
|
85 |
-
Takes an iterable of items and filters them so that only items which
|
86 |
-
are contained within this specifier are allowed in it.
|
87 |
-
"""
|
88 |
-
|
89 |
-
|
90 |
-
class _IndividualSpecifier(BaseSpecifier):
|
91 |
-
|
92 |
-
_operators: Dict[str, str] = {}
|
93 |
-
_regex: Pattern[str]
|
94 |
-
|
95 |
-
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
96 |
-
match = self._regex.search(spec)
|
97 |
-
if not match:
|
98 |
-
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
|
99 |
-
|
100 |
-
self._spec: Tuple[str, str] = (
|
101 |
-
match.group("operator").strip(),
|
102 |
-
match.group("version").strip(),
|
103 |
-
)
|
104 |
-
|
105 |
-
# Store whether or not this Specifier should accept prereleases
|
106 |
-
self._prereleases = prereleases
|
107 |
-
|
108 |
-
def __repr__(self) -> str:
|
109 |
-
pre = (
|
110 |
-
f", prereleases={self.prereleases!r}"
|
111 |
-
if self._prereleases is not None
|
112 |
-
else ""
|
113 |
-
)
|
114 |
-
|
115 |
-
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
|
116 |
-
|
117 |
-
def __str__(self) -> str:
|
118 |
-
return "{}{}".format(*self._spec)
|
119 |
-
|
120 |
-
@property
|
121 |
-
def _canonical_spec(self) -> Tuple[str, str]:
|
122 |
-
return self._spec[0], canonicalize_version(self._spec[1])
|
123 |
-
|
124 |
-
def __hash__(self) -> int:
|
125 |
-
return hash(self._canonical_spec)
|
126 |
-
|
127 |
-
def __eq__(self, other: object) -> bool:
|
128 |
-
if isinstance(other, str):
|
129 |
-
try:
|
130 |
-
other = self.__class__(str(other))
|
131 |
-
except InvalidSpecifier:
|
132 |
-
return NotImplemented
|
133 |
-
elif not isinstance(other, self.__class__):
|
134 |
-
return NotImplemented
|
135 |
-
|
136 |
-
return self._canonical_spec == other._canonical_spec
|
137 |
-
|
138 |
-
def _get_operator(self, op: str) -> CallableOperator:
|
139 |
-
operator_callable: CallableOperator = getattr(
|
140 |
-
self, f"_compare_{self._operators[op]}"
|
141 |
-
)
|
142 |
-
return operator_callable
|
143 |
-
|
144 |
-
def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
|
145 |
-
if not isinstance(version, (LegacyVersion, Version)):
|
146 |
-
version = parse(version)
|
147 |
-
return version
|
148 |
-
|
149 |
-
@property
|
150 |
-
def operator(self) -> str:
|
151 |
-
return self._spec[0]
|
152 |
-
|
153 |
-
@property
|
154 |
-
def version(self) -> str:
|
155 |
-
return self._spec[1]
|
156 |
-
|
157 |
-
@property
|
158 |
-
def prereleases(self) -> Optional[bool]:
|
159 |
-
return self._prereleases
|
160 |
-
|
161 |
-
@prereleases.setter
|
162 |
-
def prereleases(self, value: bool) -> None:
|
163 |
-
self._prereleases = value
|
164 |
-
|
165 |
-
def __contains__(self, item: str) -> bool:
|
166 |
-
return self.contains(item)
|
167 |
-
|
168 |
-
def contains(
|
169 |
-
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
170 |
-
) -> bool:
|
171 |
-
|
172 |
-
# Determine if prereleases are to be allowed or not.
|
173 |
-
if prereleases is None:
|
174 |
-
prereleases = self.prereleases
|
175 |
-
|
176 |
-
# Normalize item to a Version or LegacyVersion, this allows us to have
|
177 |
-
# a shortcut for ``"2.0" in Specifier(">=2")
|
178 |
-
normalized_item = self._coerce_version(item)
|
179 |
-
|
180 |
-
# Determine if we should be supporting prereleases in this specifier
|
181 |
-
# or not, if we do not support prereleases than we can short circuit
|
182 |
-
# logic if this version is a prereleases.
|
183 |
-
if normalized_item.is_prerelease and not prereleases:
|
184 |
-
return False
|
185 |
-
|
186 |
-
# Actually do the comparison to determine if this item is contained
|
187 |
-
# within this Specifier or not.
|
188 |
-
operator_callable: CallableOperator = self._get_operator(self.operator)
|
189 |
-
return operator_callable(normalized_item, self.version)
|
190 |
-
|
191 |
-
def filter(
|
192 |
-
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
193 |
-
) -> Iterable[VersionTypeVar]:
|
194 |
-
|
195 |
-
yielded = False
|
196 |
-
found_prereleases = []
|
197 |
-
|
198 |
-
kw = {"prereleases": prereleases if prereleases is not None else True}
|
199 |
-
|
200 |
-
# Attempt to iterate over all the values in the iterable and if any of
|
201 |
-
# them match, yield them.
|
202 |
-
for version in iterable:
|
203 |
-
parsed_version = self._coerce_version(version)
|
204 |
-
|
205 |
-
if self.contains(parsed_version, **kw):
|
206 |
-
# If our version is a prerelease, and we were not set to allow
|
207 |
-
# prereleases, then we'll store it for later in case nothing
|
208 |
-
# else matches this specifier.
|
209 |
-
if parsed_version.is_prerelease and not (
|
210 |
-
prereleases or self.prereleases
|
211 |
-
):
|
212 |
-
found_prereleases.append(version)
|
213 |
-
# Either this is not a prerelease, or we should have been
|
214 |
-
# accepting prereleases from the beginning.
|
215 |
-
else:
|
216 |
-
yielded = True
|
217 |
-
yield version
|
218 |
-
|
219 |
-
# Now that we've iterated over everything, determine if we've yielded
|
220 |
-
# any values, and if we have not and we have any prereleases stored up
|
221 |
-
# then we will go ahead and yield the prereleases.
|
222 |
-
if not yielded and found_prereleases:
|
223 |
-
for version in found_prereleases:
|
224 |
-
yield version
|
225 |
-
|
226 |
-
|
227 |
-
class LegacySpecifier(_IndividualSpecifier):
|
228 |
-
|
229 |
-
_regex_str = r"""
|
230 |
-
(?P<operator>(==|!=|<=|>=|<|>))
|
231 |
-
\s*
|
232 |
-
(?P<version>
|
233 |
-
[^,;\s)]* # Since this is a "legacy" specifier, and the version
|
234 |
-
# string can be just about anything, we match everything
|
235 |
-
# except for whitespace, a semi-colon for marker support,
|
236 |
-
# a closing paren since versions can be enclosed in
|
237 |
-
# them, and a comma since it's a version separator.
|
238 |
-
)
|
239 |
-
"""
|
240 |
-
|
241 |
-
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
242 |
-
|
243 |
-
_operators = {
|
244 |
-
"==": "equal",
|
245 |
-
"!=": "not_equal",
|
246 |
-
"<=": "less_than_equal",
|
247 |
-
">=": "greater_than_equal",
|
248 |
-
"<": "less_than",
|
249 |
-
">": "greater_than",
|
250 |
-
}
|
251 |
-
|
252 |
-
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
253 |
-
super().__init__(spec, prereleases)
|
254 |
-
|
255 |
-
warnings.warn(
|
256 |
-
"Creating a LegacyVersion has been deprecated and will be "
|
257 |
-
"removed in the next major release",
|
258 |
-
DeprecationWarning,
|
259 |
-
)
|
260 |
-
|
261 |
-
def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
|
262 |
-
if not isinstance(version, LegacyVersion):
|
263 |
-
version = LegacyVersion(str(version))
|
264 |
-
return version
|
265 |
-
|
266 |
-
def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
267 |
-
return prospective == self._coerce_version(spec)
|
268 |
-
|
269 |
-
def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
270 |
-
return prospective != self._coerce_version(spec)
|
271 |
-
|
272 |
-
def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
273 |
-
return prospective <= self._coerce_version(spec)
|
274 |
-
|
275 |
-
def _compare_greater_than_equal(
|
276 |
-
self, prospective: LegacyVersion, spec: str
|
277 |
-
) -> bool:
|
278 |
-
return prospective >= self._coerce_version(spec)
|
279 |
-
|
280 |
-
def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
281 |
-
return prospective < self._coerce_version(spec)
|
282 |
-
|
283 |
-
def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
284 |
-
return prospective > self._coerce_version(spec)
|
285 |
-
|
286 |
-
|
287 |
-
def _require_version_compare(
|
288 |
-
fn: Callable[["Specifier", ParsedVersion, str], bool]
|
289 |
-
) -> Callable[["Specifier", ParsedVersion, str], bool]:
|
290 |
-
@functools.wraps(fn)
|
291 |
-
def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
|
292 |
-
if not isinstance(prospective, Version):
|
293 |
-
return False
|
294 |
-
return fn(self, prospective, spec)
|
295 |
-
|
296 |
-
return wrapped
|
297 |
-
|
298 |
-
|
299 |
-
class Specifier(_IndividualSpecifier):
|
300 |
-
|
301 |
-
_regex_str = r"""
|
302 |
-
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
|
303 |
-
(?P<version>
|
304 |
-
(?:
|
305 |
-
# The identity operators allow for an escape hatch that will
|
306 |
-
# do an exact string match of the version you wish to install.
|
307 |
-
# This will not be parsed by PEP 440 and we cannot determine
|
308 |
-
# any semantic meaning from it. This operator is discouraged
|
309 |
-
# but included entirely as an escape hatch.
|
310 |
-
(?<====) # Only match for the identity operator
|
311 |
-
\s*
|
312 |
-
[^\s]* # We just match everything, except for whitespace
|
313 |
-
# since we are only testing for strict identity.
|
314 |
-
)
|
315 |
-
|
|
316 |
-
(?:
|
317 |
-
# The (non)equality operators allow for wild card and local
|
318 |
-
# versions to be specified so we have to define these two
|
319 |
-
# operators separately to enable that.
|
320 |
-
(?<===|!=) # Only match for equals and not equals
|
321 |
-
|
322 |
-
\s*
|
323 |
-
v?
|
324 |
-
(?:[0-9]+!)? # epoch
|
325 |
-
[0-9]+(?:\.[0-9]+)* # release
|
326 |
-
(?: # pre release
|
327 |
-
[-_\.]?
|
328 |
-
(a|b|c|rc|alpha|beta|pre|preview)
|
329 |
-
[-_\.]?
|
330 |
-
[0-9]*
|
331 |
-
)?
|
332 |
-
(?: # post release
|
333 |
-
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
334 |
-
)?
|
335 |
-
|
336 |
-
# You cannot use a wild card and a dev or local version
|
337 |
-
# together so group them with a | and make them optional.
|
338 |
-
(?:
|
339 |
-
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
340 |
-
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
341 |
-
|
|
342 |
-
\.\* # Wild card syntax of .*
|
343 |
-
)?
|
344 |
-
)
|
345 |
-
|
|
346 |
-
(?:
|
347 |
-
# The compatible operator requires at least two digits in the
|
348 |
-
# release segment.
|
349 |
-
(?<=~=) # Only match for the compatible operator
|
350 |
-
|
351 |
-
\s*
|
352 |
-
v?
|
353 |
-
(?:[0-9]+!)? # epoch
|
354 |
-
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
|
355 |
-
(?: # pre release
|
356 |
-
[-_\.]?
|
357 |
-
(a|b|c|rc|alpha|beta|pre|preview)
|
358 |
-
[-_\.]?
|
359 |
-
[0-9]*
|
360 |
-
)?
|
361 |
-
(?: # post release
|
362 |
-
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
363 |
-
)?
|
364 |
-
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
365 |
-
)
|
366 |
-
|
|
367 |
-
(?:
|
368 |
-
# All other operators only allow a sub set of what the
|
369 |
-
# (non)equality operators do. Specifically they do not allow
|
370 |
-
# local versions to be specified nor do they allow the prefix
|
371 |
-
# matching wild cards.
|
372 |
-
(?<!==|!=|~=) # We have special cases for these
|
373 |
-
# operators so we want to make sure they
|
374 |
-
# don't match here.
|
375 |
-
|
376 |
-
\s*
|
377 |
-
v?
|
378 |
-
(?:[0-9]+!)? # epoch
|
379 |
-
[0-9]+(?:\.[0-9]+)* # release
|
380 |
-
(?: # pre release
|
381 |
-
[-_\.]?
|
382 |
-
(a|b|c|rc|alpha|beta|pre|preview)
|
383 |
-
[-_\.]?
|
384 |
-
[0-9]*
|
385 |
-
)?
|
386 |
-
(?: # post release
|
387 |
-
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
388 |
-
)?
|
389 |
-
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
390 |
-
)
|
391 |
-
)
|
392 |
-
"""
|
393 |
-
|
394 |
-
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
395 |
-
|
396 |
-
_operators = {
|
397 |
-
"~=": "compatible",
|
398 |
-
"==": "equal",
|
399 |
-
"!=": "not_equal",
|
400 |
-
"<=": "less_than_equal",
|
401 |
-
">=": "greater_than_equal",
|
402 |
-
"<": "less_than",
|
403 |
-
">": "greater_than",
|
404 |
-
"===": "arbitrary",
|
405 |
-
}
|
406 |
-
|
407 |
-
@_require_version_compare
|
408 |
-
def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
|
409 |
-
|
410 |
-
# Compatible releases have an equivalent combination of >= and ==. That
|
411 |
-
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
|
412 |
-
# implement this in terms of the other specifiers instead of
|
413 |
-
# implementing it ourselves. The only thing we need to do is construct
|
414 |
-
# the other specifiers.
|
415 |
-
|
416 |
-
# We want everything but the last item in the version, but we want to
|
417 |
-
# ignore suffix segments.
|
418 |
-
prefix = ".".join(
|
419 |
-
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
|
420 |
-
)
|
421 |
-
|
422 |
-
# Add the prefix notation to the end of our string
|
423 |
-
prefix += ".*"
|
424 |
-
|
425 |
-
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
|
426 |
-
prospective, prefix
|
427 |
-
)
|
428 |
-
|
429 |
-
@_require_version_compare
|
430 |
-
def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
431 |
-
|
432 |
-
# We need special logic to handle prefix matching
|
433 |
-
if spec.endswith(".*"):
|
434 |
-
# In the case of prefix matching we want to ignore local segment.
|
435 |
-
prospective = Version(prospective.public)
|
436 |
-
# Split the spec out by dots, and pretend that there is an implicit
|
437 |
-
# dot in between a release segment and a pre-release segment.
|
438 |
-
split_spec = _version_split(spec[:-2]) # Remove the trailing .*
|
439 |
-
|
440 |
-
# Split the prospective version out by dots, and pretend that there
|
441 |
-
# is an implicit dot in between a release segment and a pre-release
|
442 |
-
# segment.
|
443 |
-
split_prospective = _version_split(str(prospective))
|
444 |
-
|
445 |
-
# Shorten the prospective version to be the same length as the spec
|
446 |
-
# so that we can determine if the specifier is a prefix of the
|
447 |
-
# prospective version or not.
|
448 |
-
shortened_prospective = split_prospective[: len(split_spec)]
|
449 |
-
|
450 |
-
# Pad out our two sides with zeros so that they both equal the same
|
451 |
-
# length.
|
452 |
-
padded_spec, padded_prospective = _pad_version(
|
453 |
-
split_spec, shortened_prospective
|
454 |
-
)
|
455 |
-
|
456 |
-
return padded_prospective == padded_spec
|
457 |
-
else:
|
458 |
-
# Convert our spec string into a Version
|
459 |
-
spec_version = Version(spec)
|
460 |
-
|
461 |
-
# If the specifier does not have a local segment, then we want to
|
462 |
-
# act as if the prospective version also does not have a local
|
463 |
-
# segment.
|
464 |
-
if not spec_version.local:
|
465 |
-
prospective = Version(prospective.public)
|
466 |
-
|
467 |
-
return prospective == spec_version
|
468 |
-
|
469 |
-
@_require_version_compare
|
470 |
-
def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
471 |
-
return not self._compare_equal(prospective, spec)
|
472 |
-
|
473 |
-
@_require_version_compare
|
474 |
-
def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
475 |
-
|
476 |
-
# NB: Local version identifiers are NOT permitted in the version
|
477 |
-
# specifier, so local version labels can be universally removed from
|
478 |
-
# the prospective version.
|
479 |
-
return Version(prospective.public) <= Version(spec)
|
480 |
-
|
481 |
-
@_require_version_compare
|
482 |
-
def _compare_greater_than_equal(
|
483 |
-
self, prospective: ParsedVersion, spec: str
|
484 |
-
) -> bool:
|
485 |
-
|
486 |
-
# NB: Local version identifiers are NOT permitted in the version
|
487 |
-
# specifier, so local version labels can be universally removed from
|
488 |
-
# the prospective version.
|
489 |
-
return Version(prospective.public) >= Version(spec)
|
490 |
-
|
491 |
-
@_require_version_compare
|
492 |
-
def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
493 |
-
|
494 |
-
# Convert our spec to a Version instance, since we'll want to work with
|
495 |
-
# it as a version.
|
496 |
-
spec = Version(spec_str)
|
497 |
-
|
498 |
-
# Check to see if the prospective version is less than the spec
|
499 |
-
# version. If it's not we can short circuit and just return False now
|
500 |
-
# instead of doing extra unneeded work.
|
501 |
-
if not prospective < spec:
|
502 |
-
return False
|
503 |
-
|
504 |
-
# This special case is here so that, unless the specifier itself
|
505 |
-
# includes is a pre-release version, that we do not accept pre-release
|
506 |
-
# versions for the version mentioned in the specifier (e.g. <3.1 should
|
507 |
-
# not match 3.1.dev0, but should match 3.0.dev0).
|
508 |
-
if not spec.is_prerelease and prospective.is_prerelease:
|
509 |
-
if Version(prospective.base_version) == Version(spec.base_version):
|
510 |
-
return False
|
511 |
-
|
512 |
-
# If we've gotten to here, it means that prospective version is both
|
513 |
-
# less than the spec version *and* it's not a pre-release of the same
|
514 |
-
# version in the spec.
|
515 |
-
return True
|
516 |
-
|
517 |
-
@_require_version_compare
|
518 |
-
def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
519 |
-
|
520 |
-
# Convert our spec to a Version instance, since we'll want to work with
|
521 |
-
# it as a version.
|
522 |
-
spec = Version(spec_str)
|
523 |
-
|
524 |
-
# Check to see if the prospective version is greater than the spec
|
525 |
-
# version. If it's not we can short circuit and just return False now
|
526 |
-
# instead of doing extra unneeded work.
|
527 |
-
if not prospective > spec:
|
528 |
-
return False
|
529 |
-
|
530 |
-
# This special case is here so that, unless the specifier itself
|
531 |
-
# includes is a post-release version, that we do not accept
|
532 |
-
# post-release versions for the version mentioned in the specifier
|
533 |
-
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
|
534 |
-
if not spec.is_postrelease and prospective.is_postrelease:
|
535 |
-
if Version(prospective.base_version) == Version(spec.base_version):
|
536 |
-
return False
|
537 |
-
|
538 |
-
# Ensure that we do not allow a local version of the version mentioned
|
539 |
-
# in the specifier, which is technically greater than, to match.
|
540 |
-
if prospective.local is not None:
|
541 |
-
if Version(prospective.base_version) == Version(spec.base_version):
|
542 |
-
return False
|
543 |
-
|
544 |
-
# If we've gotten to here, it means that prospective version is both
|
545 |
-
# greater than the spec version *and* it's not a pre-release of the
|
546 |
-
# same version in the spec.
|
547 |
-
return True
|
548 |
-
|
549 |
-
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
|
550 |
-
return str(prospective).lower() == str(spec).lower()
|
551 |
-
|
552 |
-
@property
|
553 |
-
def prereleases(self) -> bool:
|
554 |
-
|
555 |
-
# If there is an explicit prereleases set for this, then we'll just
|
556 |
-
# blindly use that.
|
557 |
-
if self._prereleases is not None:
|
558 |
-
return self._prereleases
|
559 |
-
|
560 |
-
# Look at all of our specifiers and determine if they are inclusive
|
561 |
-
# operators, and if they are if they are including an explicit
|
562 |
-
# prerelease.
|
563 |
-
operator, version = self._spec
|
564 |
-
if operator in ["==", ">=", "<=", "~=", "==="]:
|
565 |
-
# The == specifier can include a trailing .*, if it does we
|
566 |
-
# want to remove before parsing.
|
567 |
-
if operator == "==" and version.endswith(".*"):
|
568 |
-
version = version[:-2]
|
569 |
-
|
570 |
-
# Parse the version, and if it is a pre-release than this
|
571 |
-
# specifier allows pre-releases.
|
572 |
-
if parse(version).is_prerelease:
|
573 |
-
return True
|
574 |
-
|
575 |
-
return False
|
576 |
-
|
577 |
-
@prereleases.setter
|
578 |
-
def prereleases(self, value: bool) -> None:
|
579 |
-
self._prereleases = value
|
580 |
-
|
581 |
-
|
582 |
-
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
|
583 |
-
|
584 |
-
|
585 |
-
def _version_split(version: str) -> List[str]:
|
586 |
-
result: List[str] = []
|
587 |
-
for item in version.split("."):
|
588 |
-
match = _prefix_regex.search(item)
|
589 |
-
if match:
|
590 |
-
result.extend(match.groups())
|
591 |
-
else:
|
592 |
-
result.append(item)
|
593 |
-
return result
|
594 |
-
|
595 |
-
|
596 |
-
def _is_not_suffix(segment: str) -> bool:
|
597 |
-
return not any(
|
598 |
-
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
|
599 |
-
)
|
600 |
-
|
601 |
-
|
602 |
-
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
|
603 |
-
left_split, right_split = [], []
|
604 |
-
|
605 |
-
# Get the release segment of our versions
|
606 |
-
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
|
607 |
-
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
|
608 |
-
|
609 |
-
# Get the rest of our versions
|
610 |
-
left_split.append(left[len(left_split[0]) :])
|
611 |
-
right_split.append(right[len(right_split[0]) :])
|
612 |
-
|
613 |
-
# Insert our padding
|
614 |
-
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
|
615 |
-
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
|
616 |
-
|
617 |
-
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
|
618 |
-
|
619 |
-
|
620 |
-
class SpecifierSet(BaseSpecifier):
|
621 |
-
def __init__(
|
622 |
-
self, specifiers: str = "", prereleases: Optional[bool] = None
|
623 |
-
) -> None:
|
624 |
-
|
625 |
-
# Split on , to break each individual specifier into it's own item, and
|
626 |
-
# strip each item to remove leading/trailing whitespace.
|
627 |
-
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
|
628 |
-
|
629 |
-
# Parsed each individual specifier, attempting first to make it a
|
630 |
-
# Specifier and falling back to a LegacySpecifier.
|
631 |
-
parsed: Set[_IndividualSpecifier] = set()
|
632 |
-
for specifier in split_specifiers:
|
633 |
-
try:
|
634 |
-
parsed.add(Specifier(specifier))
|
635 |
-
except InvalidSpecifier:
|
636 |
-
parsed.add(LegacySpecifier(specifier))
|
637 |
-
|
638 |
-
# Turn our parsed specifiers into a frozen set and save them for later.
|
639 |
-
self._specs = frozenset(parsed)
|
640 |
-
|
641 |
-
# Store our prereleases value so we can use it later to determine if
|
642 |
-
# we accept prereleases or not.
|
643 |
-
self._prereleases = prereleases
|
644 |
-
|
645 |
-
def __repr__(self) -> str:
|
646 |
-
pre = (
|
647 |
-
f", prereleases={self.prereleases!r}"
|
648 |
-
if self._prereleases is not None
|
649 |
-
else ""
|
650 |
-
)
|
651 |
-
|
652 |
-
return f"<SpecifierSet({str(self)!r}{pre})>"
|
653 |
-
|
654 |
-
def __str__(self) -> str:
|
655 |
-
return ",".join(sorted(str(s) for s in self._specs))
|
656 |
-
|
657 |
-
def __hash__(self) -> int:
|
658 |
-
return hash(self._specs)
|
659 |
-
|
660 |
-
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
|
661 |
-
if isinstance(other, str):
|
662 |
-
other = SpecifierSet(other)
|
663 |
-
elif not isinstance(other, SpecifierSet):
|
664 |
-
return NotImplemented
|
665 |
-
|
666 |
-
specifier = SpecifierSet()
|
667 |
-
specifier._specs = frozenset(self._specs | other._specs)
|
668 |
-
|
669 |
-
if self._prereleases is None and other._prereleases is not None:
|
670 |
-
specifier._prereleases = other._prereleases
|
671 |
-
elif self._prereleases is not None and other._prereleases is None:
|
672 |
-
specifier._prereleases = self._prereleases
|
673 |
-
elif self._prereleases == other._prereleases:
|
674 |
-
specifier._prereleases = self._prereleases
|
675 |
-
else:
|
676 |
-
raise ValueError(
|
677 |
-
"Cannot combine SpecifierSets with True and False prerelease "
|
678 |
-
"overrides."
|
679 |
-
)
|
680 |
-
|
681 |
-
return specifier
|
682 |
-
|
683 |
-
def __eq__(self, other: object) -> bool:
|
684 |
-
if isinstance(other, (str, _IndividualSpecifier)):
|
685 |
-
other = SpecifierSet(str(other))
|
686 |
-
elif not isinstance(other, SpecifierSet):
|
687 |
-
return NotImplemented
|
688 |
-
|
689 |
-
return self._specs == other._specs
|
690 |
-
|
691 |
-
def __len__(self) -> int:
|
692 |
-
return len(self._specs)
|
693 |
-
|
694 |
-
def __iter__(self) -> Iterator[_IndividualSpecifier]:
|
695 |
-
return iter(self._specs)
|
696 |
-
|
697 |
-
@property
|
698 |
-
def prereleases(self) -> Optional[bool]:
|
699 |
-
|
700 |
-
# If we have been given an explicit prerelease modifier, then we'll
|
701 |
-
# pass that through here.
|
702 |
-
if self._prereleases is not None:
|
703 |
-
return self._prereleases
|
704 |
-
|
705 |
-
# If we don't have any specifiers, and we don't have a forced value,
|
706 |
-
# then we'll just return None since we don't know if this should have
|
707 |
-
# pre-releases or not.
|
708 |
-
if not self._specs:
|
709 |
-
return None
|
710 |
-
|
711 |
-
# Otherwise we'll see if any of the given specifiers accept
|
712 |
-
# prereleases, if any of them do we'll return True, otherwise False.
|
713 |
-
return any(s.prereleases for s in self._specs)
|
714 |
-
|
715 |
-
@prereleases.setter
|
716 |
-
def prereleases(self, value: bool) -> None:
|
717 |
-
self._prereleases = value
|
718 |
-
|
719 |
-
def __contains__(self, item: UnparsedVersion) -> bool:
|
720 |
-
return self.contains(item)
|
721 |
-
|
722 |
-
def contains(
|
723 |
-
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
724 |
-
) -> bool:
|
725 |
-
|
726 |
-
# Ensure that our item is a Version or LegacyVersion instance.
|
727 |
-
if not isinstance(item, (LegacyVersion, Version)):
|
728 |
-
item = parse(item)
|
729 |
-
|
730 |
-
# Determine if we're forcing a prerelease or not, if we're not forcing
|
731 |
-
# one for this particular filter call, then we'll use whatever the
|
732 |
-
# SpecifierSet thinks for whether or not we should support prereleases.
|
733 |
-
if prereleases is None:
|
734 |
-
prereleases = self.prereleases
|
735 |
-
|
736 |
-
# We can determine if we're going to allow pre-releases by looking to
|
737 |
-
# see if any of the underlying items supports them. If none of them do
|
738 |
-
# and this item is a pre-release then we do not allow it and we can
|
739 |
-
# short circuit that here.
|
740 |
-
# Note: This means that 1.0.dev1 would not be contained in something
|
741 |
-
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
|
742 |
-
if not prereleases and item.is_prerelease:
|
743 |
-
return False
|
744 |
-
|
745 |
-
# We simply dispatch to the underlying specs here to make sure that the
|
746 |
-
# given version is contained within all of them.
|
747 |
-
# Note: This use of all() here means that an empty set of specifiers
|
748 |
-
# will always return True, this is an explicit design decision.
|
749 |
-
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
|
750 |
-
|
751 |
-
def filter(
|
752 |
-
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
753 |
-
) -> Iterable[VersionTypeVar]:
|
754 |
-
|
755 |
-
# Determine if we're forcing a prerelease or not, if we're not forcing
|
756 |
-
# one for this particular filter call, then we'll use whatever the
|
757 |
-
# SpecifierSet thinks for whether or not we should support prereleases.
|
758 |
-
if prereleases is None:
|
759 |
-
prereleases = self.prereleases
|
760 |
-
|
761 |
-
# If we have any specifiers, then we want to wrap our iterable in the
|
762 |
-
# filter method for each one, this will act as a logical AND amongst
|
763 |
-
# each specifier.
|
764 |
-
if self._specs:
|
765 |
-
for spec in self._specs:
|
766 |
-
iterable = spec.filter(iterable, prereleases=bool(prereleases))
|
767 |
-
return iterable
|
768 |
-
# If we do not have any specifiers, then we need to have a rough filter
|
769 |
-
# which will filter out any pre-releases, unless there are no final
|
770 |
-
# releases, and which will filter out LegacyVersion in general.
|
771 |
-
else:
|
772 |
-
filtered: List[VersionTypeVar] = []
|
773 |
-
found_prereleases: List[VersionTypeVar] = []
|
774 |
-
|
775 |
-
item: UnparsedVersion
|
776 |
-
parsed_version: Union[Version, LegacyVersion]
|
777 |
-
|
778 |
-
for item in iterable:
|
779 |
-
# Ensure that we some kind of Version class for this item.
|
780 |
-
if not isinstance(item, (LegacyVersion, Version)):
|
781 |
-
parsed_version = parse(item)
|
782 |
-
else:
|
783 |
-
parsed_version = item
|
784 |
-
|
785 |
-
# Filter out any item which is parsed as a LegacyVersion
|
786 |
-
if isinstance(parsed_version, LegacyVersion):
|
787 |
-
continue
|
788 |
-
|
789 |
-
# Store any item which is a pre-release for later unless we've
|
790 |
-
# already found a final version or we are accepting prereleases
|
791 |
-
if parsed_version.is_prerelease and not prereleases:
|
792 |
-
if not filtered:
|
793 |
-
found_prereleases.append(item)
|
794 |
-
else:
|
795 |
-
filtered.append(item)
|
796 |
-
|
797 |
-
# If we've found no items except for pre-releases, then we'll go
|
798 |
-
# ahead and use the pre-releases
|
799 |
-
if not filtered and found_prereleases and prereleases is None:
|
800 |
-
return found_prereleases
|
801 |
-
|
802 |
-
return filtered
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Avkash/WebcamFaceProcessing/app.py
DELETED
@@ -1,316 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import gradio as gr
|
3 |
-
import mediapipe as mp
|
4 |
-
import dlib
|
5 |
-
import imutils
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
|
9 |
-
mp_drawing = mp.solutions.drawing_utils
|
10 |
-
mp_drawing_styles = mp.solutions.drawing_styles
|
11 |
-
mp_face_mesh = mp.solutions.face_mesh
|
12 |
-
mp_face_detection = mp.solutions.face_detection
|
13 |
-
|
14 |
-
|
15 |
-
def apply_media_pipe_face_detection(image):
|
16 |
-
with mp_face_detection.FaceDetection(
|
17 |
-
model_selection=1, min_detection_confidence=0.5) as face_detection:
|
18 |
-
results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
19 |
-
if not results.detections:
|
20 |
-
return image
|
21 |
-
annotated_image = image.copy()
|
22 |
-
for detection in results.detections:
|
23 |
-
mp_drawing.draw_detection(annotated_image, detection)
|
24 |
-
return annotated_image
|
25 |
-
|
26 |
-
|
27 |
-
def apply_media_pipe_facemesh(image):
|
28 |
-
with mp_face_mesh.FaceMesh(
|
29 |
-
static_image_mode=True,
|
30 |
-
max_num_faces=1,
|
31 |
-
refine_landmarks=True,
|
32 |
-
min_detection_confidence=0.5) as face_mesh:
|
33 |
-
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
34 |
-
if not results.multi_face_landmarks:
|
35 |
-
return image
|
36 |
-
annotated_image = image.copy()
|
37 |
-
for face_landmarks in results.multi_face_landmarks:
|
38 |
-
mp_drawing.draw_landmarks(
|
39 |
-
image=annotated_image,
|
40 |
-
landmark_list=face_landmarks,
|
41 |
-
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
42 |
-
landmark_drawing_spec=None,
|
43 |
-
connection_drawing_spec=mp_drawing_styles
|
44 |
-
.get_default_face_mesh_tesselation_style())
|
45 |
-
mp_drawing.draw_landmarks(
|
46 |
-
image=annotated_image,
|
47 |
-
landmark_list=face_landmarks,
|
48 |
-
connections=mp_face_mesh.FACEMESH_CONTOURS,
|
49 |
-
landmark_drawing_spec=None,
|
50 |
-
connection_drawing_spec=mp_drawing_styles
|
51 |
-
.get_default_face_mesh_contours_style())
|
52 |
-
mp_drawing.draw_landmarks(
|
53 |
-
image=annotated_image,
|
54 |
-
landmark_list=face_landmarks,
|
55 |
-
connections=mp_face_mesh.FACEMESH_IRISES,
|
56 |
-
landmark_drawing_spec=None,
|
57 |
-
connection_drawing_spec=mp_drawing_styles
|
58 |
-
.get_default_face_mesh_iris_connections_style())
|
59 |
-
return annotated_image
|
60 |
-
|
61 |
-
|
62 |
-
class FaceOrientation(object):
|
63 |
-
def __init__(self):
|
64 |
-
self.detect = dlib.get_frontal_face_detector()
|
65 |
-
self.predict = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")
|
66 |
-
|
67 |
-
def create_orientation(self, frame):
|
68 |
-
draw_rect1 = True
|
69 |
-
draw_rect2 = True
|
70 |
-
draw_lines = True
|
71 |
-
|
72 |
-
frame = imutils.resize(frame, width=800)
|
73 |
-
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
74 |
-
subjects = self.detect(gray, 0)
|
75 |
-
|
76 |
-
for subject in subjects:
|
77 |
-
landmarks = self.predict(gray, subject)
|
78 |
-
size = frame.shape
|
79 |
-
|
80 |
-
# 2D image points. If you change the image, you need to change vector
|
81 |
-
image_points = np.array([
|
82 |
-
(landmarks.part(33).x, landmarks.part(33).y), # Nose tip
|
83 |
-
(landmarks.part(8).x, landmarks.part(8).y), # Chin
|
84 |
-
(landmarks.part(36).x, landmarks.part(36).y), # Left eye left corner
|
85 |
-
(landmarks.part(45).x, landmarks.part(45).y), # Right eye right corne
|
86 |
-
(landmarks.part(48).x, landmarks.part(48).y), # Left Mouth corner
|
87 |
-
(landmarks.part(54).x, landmarks.part(54).y) # Right mouth corner
|
88 |
-
], dtype="double")
|
89 |
-
|
90 |
-
# 3D model points.
|
91 |
-
model_points = np.array([
|
92 |
-
(0.0, 0.0, 0.0), # Nose tip
|
93 |
-
(0.0, -330.0, -65.0), # Chin
|
94 |
-
(-225.0, 170.0, -135.0), # Left eye left corner
|
95 |
-
(225.0, 170.0, -135.0), # Right eye right corne
|
96 |
-
(-150.0, -150.0, -125.0), # Left Mouth corner
|
97 |
-
(150.0, -150.0, -125.0) # Right mouth corner
|
98 |
-
|
99 |
-
])
|
100 |
-
# Camera internals
|
101 |
-
focal_length = size[1]
|
102 |
-
center = (size[1] / 2, size[0] / 2)
|
103 |
-
camera_matrix = np.array(
|
104 |
-
[[focal_length, 0, center[0]],
|
105 |
-
[0, focal_length, center[1]],
|
106 |
-
[0, 0, 1]], dtype="double"
|
107 |
-
)
|
108 |
-
|
109 |
-
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
|
110 |
-
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
|
111 |
-
dist_coeffs)
|
112 |
-
|
113 |
-
(b1, jacobian) = cv2.projectPoints(np.array([(350.0, 270.0, 0.0)]), rotation_vector, translation_vector,
|
114 |
-
camera_matrix, dist_coeffs)
|
115 |
-
(b2, jacobian) = cv2.projectPoints(np.array([(-350.0, -270.0, 0.0)]), rotation_vector,
|
116 |
-
translation_vector, camera_matrix, dist_coeffs)
|
117 |
-
(b3, jacobian) = cv2.projectPoints(np.array([(-350.0, 270, 0.0)]), rotation_vector, translation_vector,
|
118 |
-
camera_matrix, dist_coeffs)
|
119 |
-
(b4, jacobian) = cv2.projectPoints(np.array([(350.0, -270.0, 0.0)]), rotation_vector,
|
120 |
-
translation_vector, camera_matrix, dist_coeffs)
|
121 |
-
|
122 |
-
(b11, jacobian) = cv2.projectPoints(np.array([(450.0, 350.0, 400.0)]), rotation_vector,
|
123 |
-
translation_vector, camera_matrix, dist_coeffs)
|
124 |
-
(b12, jacobian) = cv2.projectPoints(np.array([(-450.0, -350.0, 400.0)]), rotation_vector,
|
125 |
-
translation_vector, camera_matrix, dist_coeffs)
|
126 |
-
(b13, jacobian) = cv2.projectPoints(np.array([(-450.0, 350, 400.0)]), rotation_vector,
|
127 |
-
translation_vector, camera_matrix, dist_coeffs)
|
128 |
-
(b14, jacobian) = cv2.projectPoints(np.array([(450.0, -350.0, 400.0)]), rotation_vector,
|
129 |
-
translation_vector, camera_matrix, dist_coeffs)
|
130 |
-
|
131 |
-
b1 = (int(b1[0][0][0]), int(b1[0][0][1]))
|
132 |
-
b2 = (int(b2[0][0][0]), int(b2[0][0][1]))
|
133 |
-
b3 = (int(b3[0][0][0]), int(b3[0][0][1]))
|
134 |
-
b4 = (int(b4[0][0][0]), int(b4[0][0][1]))
|
135 |
-
|
136 |
-
b11 = (int(b11[0][0][0]), int(b11[0][0][1]))
|
137 |
-
b12 = (int(b12[0][0][0]), int(b12[0][0][1]))
|
138 |
-
b13 = (int(b13[0][0][0]), int(b13[0][0][1]))
|
139 |
-
b14 = (int(b14[0][0][0]), int(b14[0][0][1]))
|
140 |
-
|
141 |
-
if draw_rect1 == True:
|
142 |
-
cv2.line(frame, b1, b3, (255, 255, 0), 10)
|
143 |
-
cv2.line(frame, b3, b2, (255, 255, 0), 10)
|
144 |
-
cv2.line(frame, b2, b4, (255, 255, 0), 10)
|
145 |
-
cv2.line(frame, b4, b1, (255, 255, 0), 10)
|
146 |
-
|
147 |
-
if draw_rect2 == True:
|
148 |
-
cv2.line(frame, b11, b13, (255, 255, 0), 10)
|
149 |
-
cv2.line(frame, b13, b12, (255, 255, 0), 10)
|
150 |
-
cv2.line(frame, b12, b14, (255, 255, 0), 10)
|
151 |
-
cv2.line(frame, b14, b11, (255, 255, 0), 10)
|
152 |
-
|
153 |
-
if draw_lines == True:
|
154 |
-
cv2.line(frame, b11, b1, (0, 255, 0), 10)
|
155 |
-
cv2.line(frame, b13, b3, (0, 255, 0), 10)
|
156 |
-
cv2.line(frame, b12, b2, (0, 255, 0), 10)
|
157 |
-
cv2.line(frame, b14, b4, (0, 255, 0), 10)
|
158 |
-
|
159 |
-
return frame
|
160 |
-
|
161 |
-
|
162 |
-
face_orientation_obj = FaceOrientation()
|
163 |
-
|
164 |
-
|
165 |
-
class FaceProcessing(object):
|
166 |
-
def __init__(self, ui_obj):
|
167 |
-
self.name = "Face Image Processing"
|
168 |
-
self.description = "Call for Face Image and video Processing"
|
169 |
-
self.ui_obj = ui_obj
|
170 |
-
|
171 |
-
def take_webcam_photo(self, image):
|
172 |
-
return image
|
173 |
-
|
174 |
-
def take_webcam_video(self, images):
|
175 |
-
return images
|
176 |
-
|
177 |
-
def mp_webcam_photo(self, image):
|
178 |
-
return image
|
179 |
-
|
180 |
-
def mp_webcam_face_mesh(self, image):
|
181 |
-
mesh_image = apply_media_pipe_facemesh(image)
|
182 |
-
return mesh_image
|
183 |
-
|
184 |
-
def mp_webcam_face_detection(self, image):
|
185 |
-
face_detection_img = apply_media_pipe_face_detection(image)
|
186 |
-
return face_detection_img
|
187 |
-
|
188 |
-
def dlib_apply_face_orientation(self, image):
|
189 |
-
image = face_orientation_obj.create_orientation(image)
|
190 |
-
return image
|
191 |
-
|
192 |
-
def webcam_stream_update(self, video_frame):
|
193 |
-
video_out = face_orientation_obj.create_orientation(video_frame)
|
194 |
-
return video_out
|
195 |
-
|
196 |
-
def create_ui(self):
|
197 |
-
with self.ui_obj:
|
198 |
-
gr.Markdown("Face Analysis with Webcam/Video")
|
199 |
-
with gr.Tabs():
|
200 |
-
with gr.TabItem("Playing with Webcam"):
|
201 |
-
with gr.Row():
|
202 |
-
webcam_image_in = gr.Image(label="Webcam Image Input", source="webcam")
|
203 |
-
webcam_video_in = gr.Video(label="Webcam Video Input", source="webcam")
|
204 |
-
with gr.Row():
|
205 |
-
webcam_photo_action = gr.Button("Take the Photo")
|
206 |
-
webcam_video_action = gr.Button("Take the Video")
|
207 |
-
with gr.Row():
|
208 |
-
webcam_photo_out = gr.Image(label="Webcam Photo Output")
|
209 |
-
webcam_video_out = gr.Video(label="Webcam Video")
|
210 |
-
with gr.TabItem("Mediapipe Facemesh with Webcam"):
|
211 |
-
with gr.Row():
|
212 |
-
with gr.Column():
|
213 |
-
mp_image_in = gr.Image(label="Webcam Image Input", source="webcam")
|
214 |
-
with gr.Column():
|
215 |
-
mp_photo_action = gr.Button("Take the Photo")
|
216 |
-
mp_apply_fm_action = gr.Button("Apply Face Mesh the Photo")
|
217 |
-
mp_apply_landmarks_action = gr.Button("Apply Face Landmarks the Photo")
|
218 |
-
with gr.Row():
|
219 |
-
mp_photo_out = gr.Image(label="Webcam Photo Output")
|
220 |
-
mp_fm_photo_out = gr.Image(label="Face Mesh Photo Output")
|
221 |
-
mp_lm_photo_out = gr.Image(label="Face Landmarks Photo Output")
|
222 |
-
with gr.TabItem("DLib Based Face Orientation"):
|
223 |
-
with gr.Row():
|
224 |
-
with gr.Column():
|
225 |
-
dlib_image_in = gr.Image(label="Webcam Image Input", source="webcam")
|
226 |
-
with gr.Column():
|
227 |
-
dlib_photo_action = gr.Button("Take the Photo")
|
228 |
-
dlib_apply_orientation_action = gr.Button("Apply Face Mesh the Photo")
|
229 |
-
with gr.Row():
|
230 |
-
dlib_photo_out = gr.Image(label="Webcam Photo Output")
|
231 |
-
dlib_orientation_photo_out = gr.Image(label="Face Mesh Photo Output")
|
232 |
-
with gr.TabItem("Face Orientation on Live Webcam Stream"):
|
233 |
-
with gr.Row():
|
234 |
-
webcam_stream_in = gr.Image(label="Webcam Stream Input",
|
235 |
-
source="webcam",
|
236 |
-
streaming=True)
|
237 |
-
webcam_stream_out = gr.Image(label="Webcam Stream Output")
|
238 |
-
webcam_stream_in.change(
|
239 |
-
self.webcam_stream_update,
|
240 |
-
inputs=webcam_stream_in,
|
241 |
-
outputs=webcam_stream_out
|
242 |
-
)
|
243 |
-
|
244 |
-
dlib_photo_action.click(
|
245 |
-
self.mp_webcam_photo,
|
246 |
-
[
|
247 |
-
dlib_image_in
|
248 |
-
],
|
249 |
-
[
|
250 |
-
dlib_photo_out
|
251 |
-
]
|
252 |
-
)
|
253 |
-
dlib_apply_orientation_action.click(
|
254 |
-
self.dlib_apply_face_orientation,
|
255 |
-
[
|
256 |
-
dlib_image_in
|
257 |
-
],
|
258 |
-
[
|
259 |
-
dlib_orientation_photo_out
|
260 |
-
]
|
261 |
-
)
|
262 |
-
mp_photo_action.click(
|
263 |
-
self.mp_webcam_photo,
|
264 |
-
[
|
265 |
-
mp_image_in
|
266 |
-
],
|
267 |
-
[
|
268 |
-
mp_photo_out
|
269 |
-
]
|
270 |
-
)
|
271 |
-
mp_apply_fm_action.click(
|
272 |
-
self.mp_webcam_face_mesh,
|
273 |
-
[
|
274 |
-
mp_image_in
|
275 |
-
],
|
276 |
-
[
|
277 |
-
mp_fm_photo_out
|
278 |
-
]
|
279 |
-
)
|
280 |
-
mp_apply_landmarks_action.click(
|
281 |
-
self.mp_webcam_face_detection,
|
282 |
-
[
|
283 |
-
mp_image_in
|
284 |
-
],
|
285 |
-
[
|
286 |
-
mp_lm_photo_out
|
287 |
-
]
|
288 |
-
)
|
289 |
-
webcam_photo_action.click(
|
290 |
-
self.take_webcam_photo,
|
291 |
-
[
|
292 |
-
webcam_image_in
|
293 |
-
],
|
294 |
-
[
|
295 |
-
webcam_photo_out
|
296 |
-
]
|
297 |
-
)
|
298 |
-
webcam_video_action.click(
|
299 |
-
self.take_webcam_video,
|
300 |
-
[
|
301 |
-
webcam_video_in
|
302 |
-
],
|
303 |
-
[
|
304 |
-
webcam_video_out
|
305 |
-
]
|
306 |
-
)
|
307 |
-
|
308 |
-
def launch_ui(self):
|
309 |
-
self.ui_obj.launch()
|
310 |
-
|
311 |
-
|
312 |
-
if __name__ == '__main__':
|
313 |
-
my_app = gr.Blocks()
|
314 |
-
face_ui = FaceProcessing(my_app)
|
315 |
-
face_ui.create_ui()
|
316 |
-
face_ui.launch_ui()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/boxes.py
DELETED
@@ -1,423 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
from enum import IntEnum, unique
|
5 |
-
from typing import List, Tuple, Union
|
6 |
-
import torch
|
7 |
-
from torch import device
|
8 |
-
|
9 |
-
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
|
10 |
-
|
11 |
-
|
12 |
-
@unique
|
13 |
-
class BoxMode(IntEnum):
|
14 |
-
"""
|
15 |
-
Enum of different ways to represent a box.
|
16 |
-
"""
|
17 |
-
|
18 |
-
XYXY_ABS = 0
|
19 |
-
"""
|
20 |
-
(x0, y0, x1, y1) in absolute floating points coordinates.
|
21 |
-
The coordinates in range [0, width or height].
|
22 |
-
"""
|
23 |
-
XYWH_ABS = 1
|
24 |
-
"""
|
25 |
-
(x0, y0, w, h) in absolute floating points coordinates.
|
26 |
-
"""
|
27 |
-
XYXY_REL = 2
|
28 |
-
"""
|
29 |
-
Not yet supported!
|
30 |
-
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
|
31 |
-
"""
|
32 |
-
XYWH_REL = 3
|
33 |
-
"""
|
34 |
-
Not yet supported!
|
35 |
-
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
|
36 |
-
"""
|
37 |
-
XYWHA_ABS = 4
|
38 |
-
"""
|
39 |
-
(xc, yc, w, h, a) in absolute floating points coordinates.
|
40 |
-
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
|
41 |
-
"""
|
42 |
-
|
43 |
-
@staticmethod
|
44 |
-
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
|
45 |
-
"""
|
46 |
-
Args:
|
47 |
-
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
|
48 |
-
from_mode, to_mode (BoxMode)
|
49 |
-
|
50 |
-
Returns:
|
51 |
-
The converted box of the same type.
|
52 |
-
"""
|
53 |
-
if from_mode == to_mode:
|
54 |
-
return box
|
55 |
-
|
56 |
-
original_type = type(box)
|
57 |
-
is_numpy = isinstance(box, np.ndarray)
|
58 |
-
single_box = isinstance(box, (list, tuple))
|
59 |
-
if single_box:
|
60 |
-
assert len(box) == 4 or len(box) == 5, (
|
61 |
-
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
|
62 |
-
" where k == 4 or 5"
|
63 |
-
)
|
64 |
-
arr = torch.tensor(box)[None, :]
|
65 |
-
else:
|
66 |
-
# avoid modifying the input box
|
67 |
-
if is_numpy:
|
68 |
-
arr = torch.from_numpy(np.asarray(box)).clone()
|
69 |
-
else:
|
70 |
-
arr = box.clone()
|
71 |
-
|
72 |
-
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
|
73 |
-
BoxMode.XYXY_REL,
|
74 |
-
BoxMode.XYWH_REL,
|
75 |
-
], "Relative mode not yet supported!"
|
76 |
-
|
77 |
-
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
|
78 |
-
assert (
|
79 |
-
arr.shape[-1] == 5
|
80 |
-
), "The last dimension of input shape must be 5 for XYWHA format"
|
81 |
-
original_dtype = arr.dtype
|
82 |
-
arr = arr.double()
|
83 |
-
|
84 |
-
w = arr[:, 2]
|
85 |
-
h = arr[:, 3]
|
86 |
-
a = arr[:, 4]
|
87 |
-
c = torch.abs(torch.cos(a * math.pi / 180.0))
|
88 |
-
s = torch.abs(torch.sin(a * math.pi / 180.0))
|
89 |
-
# This basically computes the horizontal bounding rectangle of the rotated box
|
90 |
-
new_w = c * w + s * h
|
91 |
-
new_h = c * h + s * w
|
92 |
-
|
93 |
-
# convert center to top-left corner
|
94 |
-
arr[:, 0] -= new_w / 2.0
|
95 |
-
arr[:, 1] -= new_h / 2.0
|
96 |
-
# bottom-right corner
|
97 |
-
arr[:, 2] = arr[:, 0] + new_w
|
98 |
-
arr[:, 3] = arr[:, 1] + new_h
|
99 |
-
|
100 |
-
arr = arr[:, :4].to(dtype=original_dtype)
|
101 |
-
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
|
102 |
-
original_dtype = arr.dtype
|
103 |
-
arr = arr.double()
|
104 |
-
arr[:, 0] += arr[:, 2] / 2.0
|
105 |
-
arr[:, 1] += arr[:, 3] / 2.0
|
106 |
-
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
|
107 |
-
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
|
108 |
-
else:
|
109 |
-
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
|
110 |
-
arr[:, 2] += arr[:, 0]
|
111 |
-
arr[:, 3] += arr[:, 1]
|
112 |
-
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
|
113 |
-
arr[:, 2] -= arr[:, 0]
|
114 |
-
arr[:, 3] -= arr[:, 1]
|
115 |
-
else:
|
116 |
-
raise NotImplementedError(
|
117 |
-
"Conversion from BoxMode {} to {} is not supported yet".format(
|
118 |
-
from_mode, to_mode
|
119 |
-
)
|
120 |
-
)
|
121 |
-
|
122 |
-
if single_box:
|
123 |
-
return original_type(arr.flatten().tolist())
|
124 |
-
if is_numpy:
|
125 |
-
return arr.numpy()
|
126 |
-
else:
|
127 |
-
return arr
|
128 |
-
|
129 |
-
|
130 |
-
class Boxes:
|
131 |
-
"""
|
132 |
-
This structure stores a list of boxes as a Nx4 torch.Tensor.
|
133 |
-
It supports some common methods about boxes
|
134 |
-
(`area`, `clip`, `nonempty`, etc),
|
135 |
-
and also behaves like a Tensor
|
136 |
-
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
|
137 |
-
|
138 |
-
Attributes:
|
139 |
-
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
|
140 |
-
"""
|
141 |
-
|
142 |
-
def __init__(self, tensor: torch.Tensor):
|
143 |
-
"""
|
144 |
-
Args:
|
145 |
-
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
|
146 |
-
"""
|
147 |
-
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
|
148 |
-
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
|
149 |
-
if tensor.numel() == 0:
|
150 |
-
# Use reshape, so we don't end up creating a new tensor that does not depend on
|
151 |
-
# the inputs (and consequently confuses jit)
|
152 |
-
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
|
153 |
-
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
|
154 |
-
|
155 |
-
self.tensor = tensor
|
156 |
-
|
157 |
-
def clone(self) -> "Boxes":
|
158 |
-
"""
|
159 |
-
Clone the Boxes.
|
160 |
-
|
161 |
-
Returns:
|
162 |
-
Boxes
|
163 |
-
"""
|
164 |
-
return Boxes(self.tensor.clone())
|
165 |
-
|
166 |
-
def to(self, device: torch.device):
|
167 |
-
# Boxes are assumed float32 and does not support to(dtype)
|
168 |
-
return Boxes(self.tensor.to(device=device))
|
169 |
-
|
170 |
-
def area(self) -> torch.Tensor:
|
171 |
-
"""
|
172 |
-
Computes the area of all the boxes.
|
173 |
-
|
174 |
-
Returns:
|
175 |
-
torch.Tensor: a vector with areas of each box.
|
176 |
-
"""
|
177 |
-
box = self.tensor
|
178 |
-
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
|
179 |
-
return area
|
180 |
-
|
181 |
-
def clip(self, box_size: Tuple[int, int]) -> None:
|
182 |
-
"""
|
183 |
-
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
|
184 |
-
and y coordinates to the range [0, height].
|
185 |
-
|
186 |
-
Args:
|
187 |
-
box_size (height, width): The clipping box's size.
|
188 |
-
"""
|
189 |
-
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
|
190 |
-
h, w = box_size
|
191 |
-
x1 = self.tensor[:, 0].clamp(min=0, max=w)
|
192 |
-
y1 = self.tensor[:, 1].clamp(min=0, max=h)
|
193 |
-
x2 = self.tensor[:, 2].clamp(min=0, max=w)
|
194 |
-
y2 = self.tensor[:, 3].clamp(min=0, max=h)
|
195 |
-
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
|
196 |
-
|
197 |
-
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
|
198 |
-
"""
|
199 |
-
Find boxes that are non-empty.
|
200 |
-
A box is considered empty, if either of its side is no larger than threshold.
|
201 |
-
|
202 |
-
Returns:
|
203 |
-
Tensor:
|
204 |
-
a binary vector which represents whether each box is empty
|
205 |
-
(False) or non-empty (True).
|
206 |
-
"""
|
207 |
-
box = self.tensor
|
208 |
-
widths = box[:, 2] - box[:, 0]
|
209 |
-
heights = box[:, 3] - box[:, 1]
|
210 |
-
keep = (widths > threshold) & (heights > threshold)
|
211 |
-
return keep
|
212 |
-
|
213 |
-
def __getitem__(self, item) -> "Boxes":
|
214 |
-
"""
|
215 |
-
Args:
|
216 |
-
item: int, slice, or a BoolTensor
|
217 |
-
|
218 |
-
Returns:
|
219 |
-
Boxes: Create a new :class:`Boxes` by indexing.
|
220 |
-
|
221 |
-
The following usage are allowed:
|
222 |
-
|
223 |
-
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
|
224 |
-
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
|
225 |
-
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
|
226 |
-
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
|
227 |
-
|
228 |
-
Note that the returned Boxes might share storage with this Boxes,
|
229 |
-
subject to Pytorch's indexing semantics.
|
230 |
-
"""
|
231 |
-
if isinstance(item, int):
|
232 |
-
return Boxes(self.tensor[item].view(1, -1))
|
233 |
-
b = self.tensor[item]
|
234 |
-
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
|
235 |
-
return Boxes(b)
|
236 |
-
|
237 |
-
def __len__(self) -> int:
|
238 |
-
return self.tensor.shape[0]
|
239 |
-
|
240 |
-
def __repr__(self) -> str:
|
241 |
-
return "Boxes(" + str(self.tensor) + ")"
|
242 |
-
|
243 |
-
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
|
244 |
-
"""
|
245 |
-
Args:
|
246 |
-
box_size (height, width): Size of the reference box.
|
247 |
-
boundary_threshold (int): Boxes that extend beyond the reference box
|
248 |
-
boundary by more than boundary_threshold are considered "outside".
|
249 |
-
|
250 |
-
Returns:
|
251 |
-
a binary vector, indicating whether each box is inside the reference box.
|
252 |
-
"""
|
253 |
-
height, width = box_size
|
254 |
-
inds_inside = (
|
255 |
-
(self.tensor[..., 0] >= -boundary_threshold)
|
256 |
-
& (self.tensor[..., 1] >= -boundary_threshold)
|
257 |
-
& (self.tensor[..., 2] < width + boundary_threshold)
|
258 |
-
& (self.tensor[..., 3] < height + boundary_threshold)
|
259 |
-
)
|
260 |
-
return inds_inside
|
261 |
-
|
262 |
-
def get_centers(self) -> torch.Tensor:
|
263 |
-
"""
|
264 |
-
Returns:
|
265 |
-
The box centers in a Nx2 array of (x, y).
|
266 |
-
"""
|
267 |
-
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
|
268 |
-
|
269 |
-
def scale(self, scale_x: float, scale_y: float) -> None:
|
270 |
-
"""
|
271 |
-
Scale the box with horizontal and vertical scaling factors
|
272 |
-
"""
|
273 |
-
self.tensor[:, 0::2] *= scale_x
|
274 |
-
self.tensor[:, 1::2] *= scale_y
|
275 |
-
|
276 |
-
@classmethod
|
277 |
-
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
|
278 |
-
"""
|
279 |
-
Concatenates a list of Boxes into a single Boxes
|
280 |
-
|
281 |
-
Arguments:
|
282 |
-
boxes_list (list[Boxes])
|
283 |
-
|
284 |
-
Returns:
|
285 |
-
Boxes: the concatenated Boxes
|
286 |
-
"""
|
287 |
-
assert isinstance(boxes_list, (list, tuple))
|
288 |
-
if len(boxes_list) == 0:
|
289 |
-
return cls(torch.empty(0))
|
290 |
-
assert all([isinstance(box, Boxes) for box in boxes_list])
|
291 |
-
|
292 |
-
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
|
293 |
-
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
|
294 |
-
return cat_boxes
|
295 |
-
|
296 |
-
@property
|
297 |
-
def device(self) -> device:
|
298 |
-
return self.tensor.device
|
299 |
-
|
300 |
-
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
|
301 |
-
# https://github.com/pytorch/pytorch/issues/18627
|
302 |
-
@torch.jit.unused
|
303 |
-
def __iter__(self):
|
304 |
-
"""
|
305 |
-
Yield a box as a Tensor of shape (4,) at a time.
|
306 |
-
"""
|
307 |
-
yield from self.tensor
|
308 |
-
|
309 |
-
|
310 |
-
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
311 |
-
"""
|
312 |
-
Given two lists of boxes of size N and M,
|
313 |
-
compute the intersection area between __all__ N x M pairs of boxes.
|
314 |
-
The box order must be (xmin, ymin, xmax, ymax)
|
315 |
-
|
316 |
-
Args:
|
317 |
-
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
|
318 |
-
|
319 |
-
Returns:
|
320 |
-
Tensor: intersection, sized [N,M].
|
321 |
-
"""
|
322 |
-
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
|
323 |
-
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
|
324 |
-
boxes1[:, None, :2], boxes2[:, :2]
|
325 |
-
) # [N,M,2]
|
326 |
-
|
327 |
-
width_height.clamp_(min=0) # [N,M,2]
|
328 |
-
intersection = width_height.prod(dim=2) # [N,M]
|
329 |
-
return intersection
|
330 |
-
|
331 |
-
|
332 |
-
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
|
333 |
-
# with slight modifications
|
334 |
-
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
335 |
-
"""
|
336 |
-
Given two lists of boxes of size N and M, compute the IoU
|
337 |
-
(intersection over union) between **all** N x M pairs of boxes.
|
338 |
-
The box order must be (xmin, ymin, xmax, ymax).
|
339 |
-
|
340 |
-
Args:
|
341 |
-
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
|
342 |
-
|
343 |
-
Returns:
|
344 |
-
Tensor: IoU, sized [N,M].
|
345 |
-
"""
|
346 |
-
area1 = boxes1.area() # [N]
|
347 |
-
area2 = boxes2.area() # [M]
|
348 |
-
inter = pairwise_intersection(boxes1, boxes2)
|
349 |
-
|
350 |
-
# handle empty boxes
|
351 |
-
iou = torch.where(
|
352 |
-
inter > 0,
|
353 |
-
inter / (area1[:, None] + area2 - inter),
|
354 |
-
torch.zeros(1, dtype=inter.dtype, device=inter.device),
|
355 |
-
)
|
356 |
-
return iou
|
357 |
-
|
358 |
-
|
359 |
-
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
360 |
-
"""
|
361 |
-
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
|
362 |
-
|
363 |
-
Args:
|
364 |
-
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
|
365 |
-
|
366 |
-
Returns:
|
367 |
-
Tensor: IoA, sized [N,M].
|
368 |
-
"""
|
369 |
-
area2 = boxes2.area() # [M]
|
370 |
-
inter = pairwise_intersection(boxes1, boxes2)
|
371 |
-
|
372 |
-
# handle empty boxes
|
373 |
-
ioa = torch.where(
|
374 |
-
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
|
375 |
-
)
|
376 |
-
return ioa
|
377 |
-
|
378 |
-
|
379 |
-
def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
|
380 |
-
"""
|
381 |
-
Pairwise distance between N points and M boxes. The distance between a
|
382 |
-
point and a box is represented by the distance from the point to 4 edges
|
383 |
-
of the box. Distances are all positive when the point is inside the box.
|
384 |
-
|
385 |
-
Args:
|
386 |
-
points: Nx2 coordinates. Each row is (x, y)
|
387 |
-
boxes: M boxes
|
388 |
-
|
389 |
-
Returns:
|
390 |
-
Tensor: distances of size (N, M, 4). The 4 values are distances from
|
391 |
-
the point to the left, top, right, bottom of the box.
|
392 |
-
"""
|
393 |
-
x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
|
394 |
-
x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
|
395 |
-
return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
|
396 |
-
|
397 |
-
|
398 |
-
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
399 |
-
"""
|
400 |
-
Compute pairwise intersection over union (IOU) of two sets of matched
|
401 |
-
boxes that have the same number of boxes.
|
402 |
-
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
|
403 |
-
|
404 |
-
Args:
|
405 |
-
boxes1 (Boxes): bounding boxes, sized [N,4].
|
406 |
-
boxes2 (Boxes): same length as boxes1
|
407 |
-
Returns:
|
408 |
-
Tensor: iou, sized [N].
|
409 |
-
"""
|
410 |
-
assert len(boxes1) == len(
|
411 |
-
boxes2
|
412 |
-
), "boxlists should have the same" "number of entries, got {}, {}".format(
|
413 |
-
len(boxes1), len(boxes2)
|
414 |
-
)
|
415 |
-
area1 = boxes1.area() # [N]
|
416 |
-
area2 = boxes2.area() # [N]
|
417 |
-
box1, box2 = boxes1.tensor, boxes2.tensor
|
418 |
-
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
|
419 |
-
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
|
420 |
-
wh = (rb - lt).clamp(min=0) # [N,2]
|
421 |
-
inter = wh[:, 0] * wh[:, 1] # [N]
|
422 |
-
iou = inter / (area1 + area2 - inter) # [N]
|
423 |
-
return iou
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/meta_arch/centernet_detector.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import json
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
|
8 |
-
from detectron2.modeling import build_backbone, build_proposal_generator
|
9 |
-
from detectron2.modeling import detector_postprocess
|
10 |
-
from detectron2.structures import ImageList
|
11 |
-
|
12 |
-
@META_ARCH_REGISTRY.register()
|
13 |
-
class CenterNetDetector(nn.Module):
|
14 |
-
def __init__(self, cfg):
|
15 |
-
super().__init__()
|
16 |
-
self.mean, self.std = cfg.MODEL.PIXEL_MEAN, cfg.MODEL.PIXEL_STD
|
17 |
-
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
|
18 |
-
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
|
19 |
-
|
20 |
-
self.backbone = build_backbone(cfg)
|
21 |
-
self.proposal_generator = build_proposal_generator(
|
22 |
-
cfg, self.backbone.output_shape()) # TODO: change to a more precise name
|
23 |
-
|
24 |
-
|
25 |
-
def forward(self, batched_inputs):
|
26 |
-
if not self.training:
|
27 |
-
return self.inference(batched_inputs)
|
28 |
-
images = self.preprocess_image(batched_inputs)
|
29 |
-
features = self.backbone(images.tensor)
|
30 |
-
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
|
31 |
-
|
32 |
-
_, proposal_losses = self.proposal_generator(
|
33 |
-
images, features, gt_instances)
|
34 |
-
return proposal_losses
|
35 |
-
|
36 |
-
|
37 |
-
@property
|
38 |
-
def device(self):
|
39 |
-
return self.pixel_mean.device
|
40 |
-
|
41 |
-
|
42 |
-
@torch.no_grad()
|
43 |
-
def inference(self, batched_inputs, do_postprocess=True):
|
44 |
-
images = self.preprocess_image(batched_inputs)
|
45 |
-
inp = images.tensor
|
46 |
-
features = self.backbone(inp)
|
47 |
-
proposals, _ = self.proposal_generator(images, features, None)
|
48 |
-
|
49 |
-
processed_results = []
|
50 |
-
for results_per_image, input_per_image, image_size in zip(
|
51 |
-
proposals, batched_inputs, images.image_sizes):
|
52 |
-
if do_postprocess:
|
53 |
-
height = input_per_image.get("height", image_size[0])
|
54 |
-
width = input_per_image.get("width", image_size[1])
|
55 |
-
r = detector_postprocess(results_per_image, height, width)
|
56 |
-
processed_results.append({"instances": r})
|
57 |
-
else:
|
58 |
-
r = results_per_image
|
59 |
-
processed_results.append(r)
|
60 |
-
return processed_results
|
61 |
-
|
62 |
-
def preprocess_image(self, batched_inputs):
|
63 |
-
"""
|
64 |
-
Normalize, pad and batch the input images.
|
65 |
-
"""
|
66 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
67 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
68 |
-
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
|
69 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/i18n/locale_diff.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from collections import OrderedDict
|
4 |
-
|
5 |
-
# Define the standard file name
|
6 |
-
standard_file = "en_US.json"
|
7 |
-
|
8 |
-
# Find all JSON files in the directory
|
9 |
-
dir_path = "./"
|
10 |
-
languages = [
|
11 |
-
f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
|
12 |
-
]
|
13 |
-
|
14 |
-
# Load the standard file
|
15 |
-
with open(standard_file, "r", encoding="utf-8") as f:
|
16 |
-
standard_data = json.load(f, object_pairs_hook=OrderedDict)
|
17 |
-
|
18 |
-
# Loop through each language file
|
19 |
-
for lang_file in languages:
|
20 |
-
# Load the language file
|
21 |
-
with open(lang_file, "r", encoding="utf-8") as f:
|
22 |
-
lang_data = json.load(f, object_pairs_hook=OrderedDict)
|
23 |
-
|
24 |
-
# Find the difference between the language file and the standard file
|
25 |
-
diff = set(standard_data.keys()) - set(lang_data.keys())
|
26 |
-
|
27 |
-
miss = set(lang_data.keys()) - set(standard_data.keys())
|
28 |
-
|
29 |
-
# Add any missing keys to the language file
|
30 |
-
for key in diff:
|
31 |
-
lang_data[key] = key
|
32 |
-
|
33 |
-
# Del any extra keys to the language file
|
34 |
-
for key in miss:
|
35 |
-
del lang_data[key]
|
36 |
-
|
37 |
-
# Sort the keys of the language file to match the order of the standard file
|
38 |
-
lang_data = OrderedDict(
|
39 |
-
sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
|
40 |
-
)
|
41 |
-
|
42 |
-
# Save the updated language file
|
43 |
-
with open(lang_file, "w", encoding="utf-8") as f:
|
44 |
-
json.dump(lang_data, f, ensure_ascii=False, indent=4)
|
45 |
-
f.write("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/infer_pack/commons.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
|
9 |
-
def init_weights(m, mean=0.0, std=0.01):
|
10 |
-
classname = m.__class__.__name__
|
11 |
-
if classname.find("Conv") != -1:
|
12 |
-
m.weight.data.normal_(mean, std)
|
13 |
-
|
14 |
-
|
15 |
-
def get_padding(kernel_size, dilation=1):
|
16 |
-
return int((kernel_size * dilation - dilation) / 2)
|
17 |
-
|
18 |
-
|
19 |
-
def convert_pad_shape(pad_shape):
|
20 |
-
l = pad_shape[::-1]
|
21 |
-
pad_shape = [item for sublist in l for item in sublist]
|
22 |
-
return pad_shape
|
23 |
-
|
24 |
-
|
25 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
26 |
-
"""KL(P||Q)"""
|
27 |
-
kl = (logs_q - logs_p) - 0.5
|
28 |
-
kl += (
|
29 |
-
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
30 |
-
)
|
31 |
-
return kl
|
32 |
-
|
33 |
-
|
34 |
-
def rand_gumbel(shape):
|
35 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
36 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
37 |
-
return -torch.log(-torch.log(uniform_samples))
|
38 |
-
|
39 |
-
|
40 |
-
def rand_gumbel_like(x):
|
41 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
42 |
-
return g
|
43 |
-
|
44 |
-
|
45 |
-
def slice_segments(x, ids_str, segment_size=4):
|
46 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
47 |
-
for i in range(x.size(0)):
|
48 |
-
idx_str = ids_str[i]
|
49 |
-
idx_end = idx_str + segment_size
|
50 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
51 |
-
return ret
|
52 |
-
|
53 |
-
|
54 |
-
def slice_segments2(x, ids_str, segment_size=4):
|
55 |
-
ret = torch.zeros_like(x[:, :segment_size])
|
56 |
-
for i in range(x.size(0)):
|
57 |
-
idx_str = ids_str[i]
|
58 |
-
idx_end = idx_str + segment_size
|
59 |
-
ret[i] = x[i, idx_str:idx_end]
|
60 |
-
return ret
|
61 |
-
|
62 |
-
|
63 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
64 |
-
b, d, t = x.size()
|
65 |
-
if x_lengths is None:
|
66 |
-
x_lengths = t
|
67 |
-
ids_str_max = x_lengths - segment_size + 1
|
68 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
69 |
-
ret = slice_segments(x, ids_str, segment_size)
|
70 |
-
return ret, ids_str
|
71 |
-
|
72 |
-
|
73 |
-
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
74 |
-
position = torch.arange(length, dtype=torch.float)
|
75 |
-
num_timescales = channels // 2
|
76 |
-
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
77 |
-
num_timescales - 1
|
78 |
-
)
|
79 |
-
inv_timescales = min_timescale * torch.exp(
|
80 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
81 |
-
)
|
82 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
83 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
84 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
85 |
-
signal = signal.view(1, channels, length)
|
86 |
-
return signal
|
87 |
-
|
88 |
-
|
89 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
93 |
-
|
94 |
-
|
95 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
96 |
-
b, channels, length = x.size()
|
97 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
98 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
99 |
-
|
100 |
-
|
101 |
-
def subsequent_mask(length):
|
102 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
103 |
-
return mask
|
104 |
-
|
105 |
-
|
106 |
-
@torch.jit.script
|
107 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
108 |
-
n_channels_int = n_channels[0]
|
109 |
-
in_act = input_a + input_b
|
110 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
111 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
112 |
-
acts = t_act * s_act
|
113 |
-
return acts
|
114 |
-
|
115 |
-
|
116 |
-
def convert_pad_shape(pad_shape):
|
117 |
-
l = pad_shape[::-1]
|
118 |
-
pad_shape = [item for sublist in l for item in sublist]
|
119 |
-
return pad_shape
|
120 |
-
|
121 |
-
|
122 |
-
def shift_1d(x):
|
123 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
124 |
-
return x
|
125 |
-
|
126 |
-
|
127 |
-
def sequence_mask(length, max_length=None):
|
128 |
-
if max_length is None:
|
129 |
-
max_length = length.max()
|
130 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
131 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
132 |
-
|
133 |
-
|
134 |
-
def generate_path(duration, mask):
|
135 |
-
"""
|
136 |
-
duration: [b, 1, t_x]
|
137 |
-
mask: [b, 1, t_y, t_x]
|
138 |
-
"""
|
139 |
-
device = duration.device
|
140 |
-
|
141 |
-
b, _, t_y, t_x = mask.shape
|
142 |
-
cum_duration = torch.cumsum(duration, -1)
|
143 |
-
|
144 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
145 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
146 |
-
path = path.view(b, t_x, t_y)
|
147 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
148 |
-
path = path.unsqueeze(1).transpose(2, 3) * mask
|
149 |
-
return path
|
150 |
-
|
151 |
-
|
152 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
153 |
-
if isinstance(parameters, torch.Tensor):
|
154 |
-
parameters = [parameters]
|
155 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
156 |
-
norm_type = float(norm_type)
|
157 |
-
if clip_value is not None:
|
158 |
-
clip_value = float(clip_value)
|
159 |
-
|
160 |
-
total_norm = 0
|
161 |
-
for p in parameters:
|
162 |
-
param_norm = p.grad.data.norm(norm_type)
|
163 |
-
total_norm += param_norm.item() ** norm_type
|
164 |
-
if clip_value is not None:
|
165 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
166 |
-
total_norm = total_norm ** (1.0 / norm_type)
|
167 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Ai Chat Rpg Juego Mod Apk.md
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>AI Chat RPG Game Mod APK: Una nueva manera de disfrutar de juegos de rol</h1> | <p>Te encantan los juegos de rol pero te gustaría tener más libertad y creatividad en tus aventuras? ¿Quieres interactuar con personajes realistas y sensibles que puedan adaptarse a tus elecciones y preferencias? Si respondió sí a cualquiera de estas preguntas, entonces es posible que desee echa un vistazo AI Chat RPG Game Mod APK.</p>
|
3 |
-
<h2>ai chat rpg juego mod apk</h2><br /><p><b><b>Download File</b> >>> <a href="https://bltlly.com/2v6JxV">https://bltlly.com/2v6JxV</a></b></p><br /><br />
|
4 |
-
<p>AI Chat RPG Game Mod APK es una aplicación única e innovadora que le permite crear sus propios escenarios de juego de roles y chatear con un chatbot de inteligencia artificial (AI) que puede actuar como su compañero, amigo, enemigo, o cualquier cosa en el medio. Puedes personalizar la apariencia, personalidad, antecedentes, habilidades y más de tu personaje. También puede elegir entre diferentes géneros, temas, escenarios y tramas para sus historias. Si desea explorar un mundo de fantasía, luchar contra zombies en un páramo post-apocalíptico, o romance un vampiro en una mansión gótica, puede hacerlo todo con AI Chat RPG Game Mod APK.</p>
|
5 |
-
<p>En este artículo, le diremos todo lo que necesita saber sobre AI Chat RPG Game Mod APK. Te explicaremos qué es y cómo funciona, cómo descargarlo e instalarlo en tu dispositivo Android, cómo jugarlo y divertirte con él, y por qué deberías probarlo si eres un fan de los juegos de rol. También responderemos con frecuencia <h2>Cómo descargar e instalar AI Chat RPG Game Mod APK? </h2>
|
6 |
-
<p>Si usted está interesado en probar AI Chat RPG Game Mod APK, tendrá que descargar e instalar en su dispositivo Android. Estos son los pasos que debes seguir:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Ir a la página web oficial de AI Chat RPG Game Mod APK y haga clic en el botón de descarga. Será redirigido a un enlace de descarga seguro y rápido. </li>
|
9 |
-
<li>Espere a que la descarga termine y localice el archivo APK en su dispositivo. Es posible que necesite habilitar la instalación de fuentes desconocidas en su configuración si no lo ha hecho antes. </li>
|
10 |
-
|
11 |
-
<li>Una vez que la instalación se haya completado, puede iniciar AI Chat RPG Game Mod APK desde el cajón de la aplicación o la pantalla de inicio y comenzar a crear sus propias historias de juegos de rol. </li>
|
12 |
-
</ol>
|
13 |
-
<p>Antes de descargar e instalar AI Chat RPG Game Mod APK, usted debe ser consciente de algunos requisitos y precauciones. En primer lugar, es necesario tener un dispositivo Android que se ejecuta en Android 4.4 o superior y tiene al menos 1 GB de RAM y 100 MB de espacio de almacenamiento gratuito. En segundo lugar, es necesario tener una conexión a Internet estable para usar AI Chat RPG Game Mod APK, ya que se basa en la computación en nube para generar las respuestas del chatbot de AI. En tercer lugar, debe tener cuidado con el contenido que crea y comparte con AI Chat RPG Game Mod APK, ya que puede no ser adecuado para niños o audiencias sensibles. También debe respetar los derechos de propiedad intelectual de los demás y no utilizar materiales con derechos de autor o marcas registradas sin permiso. </p>
|
14 |
-
<p></p>
|
15 |
-
<h2>Cómo jugar AI Chat RPG juego Mod APK? </h2>
|
16 |
-
<p>Jugar AI Chat RPG Game Mod APK es fácil y divertido. Todo lo que necesitas hacer es crear tu propio personaje e iniciar una conversación con un chatbot de IA que actuará como tu compañero de juego de roles. Así es como puedes hacerlo:</p>
|
17 |
-
<ul>
|
18 |
-
<li>Al iniciar AI Chat RPG Game Mod APK, verá un menú con diferentes opciones. Puede elegir crear un nuevo carácter, cargar un carácter existente o navegar por la galería de caracteres creados por otros usuarios. </li>
|
19 |
-
<li>Si eliges crear un nuevo personaje, podrás personalizar el nombre, género, edad, apariencia, personalidad, antecedentes, habilidades y más de tu personaje. También puedes subir tu propia foto o avatar para que tu personaje sea más realista. </li>
|
20 |
-
<li>Después de crear tu personaje, podrás elegir entre diferentes géneros, temas, configuraciones y tramas para tu historia de rol. También puede crear su propia historia desde cero o utilizar una plantilla proporcionada por AI Chat RPG Game Mod APK.</li>
|
21 |
-
|
22 |
-
<li>Puede guardar su conversación en cualquier momento y reanudarla más tarde. También puede compartir su conversación con otros usuarios o exportarla como un archivo de texto o una imagen. </li>
|
23 |
-
</ul>
|
24 |
-
<p>AI Chat RPG Game Mod APK tiene diferentes modos y configuraciones que se pueden ajustar de acuerdo a sus preferencias. Puede elegir entre el modo normal y el modo avanzado, que afectan la complejidad y la creatividad de las respuestas del chatbot de IA. También puede elegir entre el modo amigable y el modo hostil, que afectan el tono y la actitud de las respuestas del chatbot de IA. También puede activar o desactivar la entrada y salida de voz, que le permiten hablar o escuchar las respuestas del chatbot de IA en lugar de escribirlas o leerlas. </p>
|
25 |
-
<p>Para darle una idea de lo que AI Chat RPG Game Mod APK puede hacer, aquí hay una tabla de algunos ejemplos de AI chat RPG game mod apk escenarios y diálogos:</p>
|
26 |
-
| Género | Tema | Marco | Trama | Diálogo | | -- - - - | -- - - - - - - | -- - - | | Fantasía | Aventura | Reino medieval | Una búsqueda para encontrar una espada legendaria | Usted: Saludos, viajero. Estoy buscando la Espada del Destino. ¿Sabes dónde puedo encontrarla? <br>AI: La Espada del Destino? Esa es una misión peligrosa, amigo mío. La espada está escondida en la Guarida del Dragón, custodiada por una temible bestia. <br>Tú: No tengo miedo de los dragones. He entrenado toda mi vida para este momento. <br>AI: Entonces eres muy valiente o muy tonto. La Guarida del Dragón está lejos de aquí, más allá del Bosque Oscuro y la Montaña de la Perdición.<br>Tú: Entonces debo apresurarme. Gracias por tu ayuda. <br>AI: De nada. Que los dioses te protejan en su viaje <h2>¿Por qué debería probar AI Chat RPG Game Mod APK? </h2>
|
27 |
-
<p>Si todavía se está preguntando por qué debe probar AI Chat RPG Game Mod APK, aquí hay algunas razones que podrían convencerle:</p>
|
28 |
-
<ul>
|
29 |
-
|
30 |
-
<li>AI Chat RPG Game Mod APK es divertido y entretenido porque puede generar diálogos realistas e inmersivos que pueden hacerte sentir que realmente estás hablando con otra persona. También puedes experimentar diferentes emociones y estados de ánimo dependiendo del modo y la configuración de tu historia. Usted puede reír, llorar, enojarse, o enamorarse de AI Chat RPG Game Mod APK.</li>
|
31 |
-
<li>AI Chat RPG Game Mod APK es educativo e informativo porque puede ayudarle a mejorar su vocabulario, gramática, ortografía y habilidades de comunicación. También puede aprender cosas y hechos nuevos sobre diferentes temas y culturas desde el chatbot de IA. También puedes desafiarte a ti mismo y probar tu conocimiento y creatividad usando comandos y emojis. </li>
|
32 |
-
</ul>
|
33 |
-
<p>Por supuesto, AI Chat RPG Game Mod APK no es perfecto y tiene algunas limitaciones y desventajas. Por ejemplo, puede que no siempre entienda lo que quiere decir o diga, o puede dar respuestas inapropiadas o irrelevantes. También puede tener algunos errores o errores que pueden afectar la calidad de la conversación. También puede consumir una gran cantidad de datos y energía de la batería en su dispositivo. </p>
|
34 |
-
<p>Sin embargo, estos problemas son menores en comparación con los beneficios y el disfrute que AI Chat RPG Game Mod APK puede ofrecer. También puede informar de cualquier problema o sugerencias a los desarrolladores de AI Chat RPG Game Mod APK para ayudarles a mejorar la aplicación. </p>
|
35 |
-
<p>Para darle una idea de cuánto ama la gente AI Chat RPG Game Mod APK, aquí hay un testimonio de un usuario que disfrutó de AI Chat RPG Game Mod APK:</p>
|
36 |
-
<blockquote>
|
37 |
-
|
38 |
-
<cite>- John, 25 años</cite>
|
39 |
-
</blockquote> <h2>Conclusión</h2>
|
40 |
-
<p>En conclusión, AI Chat RPG Game Mod APK es una nueva e innovadora manera de disfrutar de juegos de rol en su dispositivo Android. Te permite crear tus propios personajes y escenarios, y chatear con un chatbot de IA que puede actuar como tu compañero de juego de roles. Puedes personalizar la apariencia, personalidad, antecedentes, habilidades y más de tu personaje. También puede elegir entre diferentes géneros, temas, escenarios y tramas para sus historias. También puede cambiar el modo y la configuración de su conversación para adaptarse a su estado de ánimo y preferencia. También puede guardar, compartir o exportar su conversación como un archivo de texto o una imagen. </p>
|
41 |
-
<p>AI Chat RPG Game Mod APK es divertido, entretenido, educativo e informativo. Puede ayudarte a mejorar tu vocabulario, gramática, ortografía y habilidades de comunicación. También puede ayudarle a aprender cosas y hechos nuevos sobre diferentes temas y culturas. También puede desafiarte y probar tu conocimiento y creatividad usando comandos y emojis. </p>
|
42 |
-
<p>AI Chat RPG Game Mod APK no es perfecto y tiene algunas limitaciones y desventajas. Puede que no siempre entienda lo que quiere decir o dice, o puede dar respuestas inapropiadas o irrelevantes. También puede tener algunos errores o errores que pueden afectar la calidad de la conversación. También puede consumir una gran cantidad de datos y energía de la batería en su dispositivo. </p>
|
43 |
-
<p>Sin embargo, estos problemas son menores en comparación con los beneficios y el disfrute que AI Chat RPG Game Mod APK puede ofrecer. También puede informar de cualquier problema o sugerencias a los desarrolladores de AI Chat RPG Game Mod APK para ayudarles a mejorar la aplicación. </p>
|
44 |
-
<p>Si usted es un fan de los juegos de rol y quiere probar algo nuevo y diferente, usted debe descargar e instalar AI Chat RPG Game Mod APK en su dispositivo Android. No te arrepentirás. </p>
|
45 |
-
<p>Gracias por leer este artículo. Esperamos que haya encontrado útil e informativo. Diviértase con AI Chat RPG Game Mod APK! </p>
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
|
48 |
-
<ol>
|
49 |
-
<li><b> ¿Qué es AI Chat RPG Game Mod APK? </b><br>
|
50 |
-
AI Chat RPG Game Mod APK es una aplicación única e innovadora que le permite crear sus propios escenarios de juego de roles y chatear con un chatbot de inteligencia artificial (AI) que puede actuar como su compañero, amigo, enemigo, o cualquier cosa en el medio. </li>
|
51 |
-
<li><b>¿Cómo puedo descargar e instalar AI Chat RPG Game Mod APK? </b><br>
|
52 |
-
Puede descargar e instalar AI Chat RPG Game Mod APK desde el sitio web oficial de AI Chat RPG Game Mod APK. Deberá habilitar la instalación de fuentes desconocidas en su configuración y seguir las instrucciones en la pantalla para instalar la aplicación en su dispositivo. </li>
|
53 |
-
<li><b>¿Cómo puedo jugar AI Chat RPG Game Mod APK? </b><br>
|
54 |
-
Puede jugar AI Chat RPG Game Mod APK mediante la creación de su propio personaje y la elección de una historia para su aventura de juego de roles. A continuación, puede comenzar a chatear con un chatbot de IA que desempeñará el papel de otro personaje en su historia. Puede escribir cualquier cosa que desee y el chatbot de IA responderá en consecuencia. También puedes usar comandos y emojis para controlar el flujo y el estado de ánimo de la conversación. </li>
|
55 |
-
<li><b> ¿Cuáles son los beneficios de AI Chat RPG Game Mod APK? </b><br>
|
56 |
-
AI Chat RPG Game Mod APK es divertido, entretenido, educativo e informativo. Puede ayudarte a mejorar tu vocabulario, gramática, ortografía y habilidades de comunicación. También puede ayudarle a aprender cosas y hechos nuevos sobre diferentes temas y culturas. También puede desafiarte y probar tu conocimiento y creatividad usando comandos y emojis. </li>
|
57 |
-
<li><b>¿Cuáles son las limitaciones de AI Chat RPG Game Mod APK? </b><br>
|
58 |
-
AI Chat RPG Game Mod APK no es perfecto y tiene algunas limitaciones y desventajas. Puede que no siempre entienda lo que quiere decir o dice, o puede dar respuestas inapropiadas o irrelevantes. También puede tener algunos errores o errores que pueden afectar la calidad de la conversación. También puede consumir una gran cantidad de datos y energía de la batería en su dispositivo. </li>
|
59 |
-
</ol></p> 64aa2da5cf<br />
|
60 |
-
<br />
|
61 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Arco Iris Seis Mvil Beta Apk.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Rainbow Six Mobile Beta APK: Cómo descargar y jugar el nuevo juego de disparos tácticos</h1>
|
3 |
-
<p>Si eres un fan de los juegos de disparos tácticos, es posible que hayas oído hablar de Rainbow Six, la popular franquicia de Ubisoft. La serie de juegos ha existido durante más de dos décadas, con escenarios de combate realistas, jugabilidad basada en equipos y entornos destructibles. Ahora, puedes experimentar la emoción de Rainbow Six en tu dispositivo móvil con Rainbow Six Mobile, un juego multijugador competitivo de disparos en primera persona. </p>
|
4 |
-
<h2>arco iris seis móvil beta apk</h2><br /><p><b><b>Download File</b> ✑ <a href="https://bltlly.com/2v6KIS">https://bltlly.com/2v6KIS</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Rainbow Six Mobile? </h2>
|
6 |
-
<p>Rainbow Six Mobile es una versión móvil de la aclamada franquicia Rainbow Six, diseñada exclusivamente para plataformas móviles. El juego ofrece una experiencia de juego de ritmo rápido e intenso, donde puedes competir en los modos clásicos de juego Attack vs. Defense. Puedes jugar como Atacante o Defensor en partidas 5v5, y enfrentarte a un combate cuerpo a cuerpo mientras tomas decisiones tácticas oportunas. También puedes colaborar con tu equipo para establecer estrategias y aprovechar los entornos destructibles. </p>
|
7 |
-
<p>El juego cuenta con una lista de operadores altamente capacitados, cada uno con sus propias habilidades y dispositivos únicos. Puedes elegir entre una lista cada vez mayor de operadores de ataque y defensa clásicos, como Ash, Thermite, Mute y Rook. También puede personalizar sus operadores con diferentes trajes, armas y pieles. </p>
|
8 |
-
<p>El juego también cuenta con mapas icónicos de la serie Rainbow Six, como Bank y Border. Los mapas se recrean con impresionantes gráficos y física realista, lo que le permite interactuar con el medio ambiente de varias maneras. Usted puede romper paredes, puertas de barricada, ventanas de rappel, y más. </p>
|
9 |
-
<p></p>
|
10 |
-
<h2>Cómo descargar e instalar Rainbow Six Mobile beta apk? </h2>
|
11 |
-
|
12 |
-
<h4>Requisitos y compatibilidad</h4>
|
13 |
-
<p>Antes de descargar el juego, asegúrese de que su dispositivo cumple con los requisitos mínimos para ejecutar el juego sin problemas. Según Ubisoft, necesitarás:</p>
|
14 |
-
<ul>
|
15 |
-
<li>Un dispositivo Android con Android 8 o superior</li>
|
16 |
-
<li>Al menos 3 GB de RAM</li>
|
17 |
-
<li>Al menos 2 GB de espacio de almacenamiento gratuito</li>
|
18 |
-
<li>Una conexión a Internet estable</li>
|
19 |
-
</ul>
|
20 |
-
<p>También deberías comprobar si tu dispositivo es compatible con el juego visitando este enlace. Si su dispositivo no es compatible, puede encontrar algunos problemas o errores al jugar el juego. </p>
|
21 |
-
<h4>Proceso de preinscripción</h4>
|
22 |
-
<p>El primer paso para descargar el juego es pre-registrarse en Google Play. Esto te permitirá recibir una notificación cuando el juego esté disponible para descargar. Para pre-registrarse, sigue estos pasos:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Abre Google Play en tu dispositivo Android. </li>
|
25 |
-
<li>Buscar Rainbow Six Mobile o haga clic en este enlace. </li>
|
26 |
-
<li>Seleccione el botón de registro previo y acepte los términos y condiciones. </li>
|
27 |
-
<li>Espera un mensaje de confirmación que diga "Estás registrado". </li>
|
28 |
-
</ol>
|
29 |
-
<p>Alternativamente, también puede pre-registrarse en el sitio web oficial de Ubisoft ingresando su dirección de correo electrónico y seleccionando su plataforma preferida. </p>
|
30 |
-
<h4>Proceso de descarga e instalación</h4>
|
31 |
-
<p>Una vez que haya pre-registrado para el juego, tendrá que esperar un correo electrónico de invitación de Ubisoft que contendrá un enlace para descargar el archivo beta apk. El correo electrónico de invitación puede tardar algún tiempo en llegar, así que sea paciente y revise su bandeja de entrada regularmente. También puede consultar el estado de su invitación en el sitio web de Ubisoft. Para descargar e instalar el juego, siga estos pasos:</p>
|
32 |
-
<ol>
|
33 |
-
<li>Abra el correo electrónico de invitación de Ubisoft y haga clic en el enlace para descargar el archivo beta apk. </li>
|
34 |
-
<li>Espere a que el archivo se descargue en su dispositivo. El tamaño del archivo es de aproximadamente 1,5 GB, así que asegúrese de tener suficiente espacio y una buena conexión a Internet. </li>
|
35 |
-
|
36 |
-
<li>Es posible que deba habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
|
37 |
-
<li>Siga las instrucciones en pantalla para instalar el juego en su dispositivo. </li>
|
38 |
-
<li>Inicia el juego e inicia sesión con tu cuenta de Ubisoft. Si no tienes una, puedes crear una gratis. </li>
|
39 |
-
</ol>
|
40 |
-
<p>Felicidades, usted ha descargado e instalado con éxito Rainbow Six Mobile beta apk en su dispositivo Android. Ahora estás listo para jugar el juego y disfrutar de sus características. </p>
|
41 |
-
<h2>¿Cómo se juega beta de Rainbow Six Mobile? </h2>
|
42 |
-
<p>Ahora que ha instalado el juego, es posible que se pregunte cómo jugarlo y qué esperar de él. Rainbow Six Mobile beta es un juego multijugador competitivo de disparos en primera persona que requiere habilidad, estrategia y trabajo en equipo. Estos son algunos de los conceptos básicos del juego y los modos de juego:</p>
|
43 |
-
<h4>Modo de ataque vs. Defensa</h4>
|
44 |
-
<p>El modo de juego principal en Rainbow Six Mobile beta es Attack vs. Defense, donde dos equipos de cinco jugadores se enfrentan en una serie de rondas. Un equipo juega como atacantes, que tienen que romper una ubicación y completar un objetivo, como desactivar una bomba o rescatar a un rehén. El otro equipo juega como defensores, que tienen que evitar que los atacantes completen su objetivo al fortalecer su posición y eliminarlos. </p>
|
45 |
-
<p>Cada ronda dura tres minutos, y el primer equipo en ganar cuatro rondas gana el partido. Los equipos cambian de bando después de dos rondas, para que puedas experimentar ambos roles. También puede elegir diferentes operadores para cada ronda, dependiendo de su estrategia y preferencia. </p>
|
46 |
-
<h4>Operadores y gadgets</h4>
|
47 |
-
|
48 |
-
<p>Cada operador tiene un arma primaria, un arma secundaria y un gadget que puede ayudarles en su papel. Por ejemplo, Ash es una Operadora Atacante que puede usar sus disparos para destruir paredes y puertas desde la distancia. Mute es un operador defensor que puede usar sus disruptores de señal para interferir drones y gadgets enemigos. </p>
|
49 |
-
<p>También puedes personalizar tus Operadores con diferentes atuendos, armas y pieles. Puedes desbloquear nuevos objetos jugando el juego y ganando recompensas. También puedes comprar algunos artículos con dinero real o moneda del juego. </p>
|
50 |
-
<h4>Mapas y entornos</h4>
|
51 |
-
<p>Los mapas son los lugares donde los partidos tienen lugar en Rainbow Six Mobile beta. El juego cuenta con mapas icónicos de la serie Rainbow Six, como Bank y Border. Los mapas se recrean con gráficos impresionantes y física realista, lo que le permite interactuar con el medio ambiente de varias maneras. </p>
|
52 |
-
<p>Usted puede utilizar sus aparatos para romper paredes, puertas de barricada, ventanas de rappel, y más. También puede utilizar objetos ambientales como mesas, sillas, coches, etc., como cubierta u obstáculos. Los mapas están diseñados para ofrecer múltiples puntos de entrada, ángulos y estrategias para ambos equipos. </p>
|
53 |
-
<h2>Conclusión</h2>
|
54 |
-
<p>Rainbow Six Mobile beta apk es una gran manera de experimentar la emoción de Rainbow Six en su dispositivo móvil. El juego ofrece una experiencia de juego de ritmo rápido e intenso, donde puedes competir en los modos clásicos de juego Attack vs. Defense. Puedes jugar como Atacante o Defensor en partidas 5v5, y enfrentarte a un combate cuerpo a cuerpo mientras tomas decisiones tácticas oportunas. También puedes colaborar con tu equipo para establecer estrategias y aprovechar los entornos destructibles. </p>
|
55 |
-
|
56 |
-
<p>El juego también cuenta con mapas icónicos de la serie Rainbow Six, como Bank y Border. Los mapas se recrean con impresionantes gráficos y física realista, lo que le permite interactuar con el medio ambiente de varias maneras. Usted puede romper paredes, puertas de barricada, ventanas de rappel, y más. </p>
|
57 |
-
<p>Si desea descargar y jugar Rainbow Six Mobile beta apk en su dispositivo Android, tendrá que pre-registrarse para el juego en Google Play o el sitio web de Ubisoft, y esperar un correo electrónico de invitación de Ubisoft que contendrá un enlace para descargar el archivo beta apk. También tendrá que cumplir con los requisitos mínimos para ejecutar el juego sin problemas en su dispositivo. </p>
|
58 |
-
<p>Rainbow Six Mobile beta apk es una gran oportunidad para disfrutar de la emoción de Rainbow Six en su dispositivo móvil. El juego está actualmente en fase de prueba beta, lo que significa que aún no está completamente pulido y podría tener algunos errores o errores. Sin embargo, todavía puedes divertirte jugando el juego y proporcionar comentarios a Ubisoft para ayudarles a mejorar el juego antes de su lanzamiento oficial. </p>
|
59 |
-
<p>Entonces, ¿qué estás esperando? Pre-registro para Rainbow Six Mobile beta apk hoy y prepárate para unirse a la acción! </p>
|
60 |
-
<h2>Preguntas frecuentes</h2>
|
61 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Rainbow Six Mobile beta apk:</p>
|
62 |
-
<ol>
|
63 |
-
<li><b>Es Rainbow Six móvil beta apk libre para jugar? </b></li>
|
64 |
-
<p>Sí, arco iris seis móvil beta apk es libre de jugar. Sin embargo, es posible que necesite comprar algunos artículos con dinero real o moneda del juego si desea personalizar sus operadores o acceder a algunas funciones premium. </p>
|
65 |
-
<li><b>¿Rainbow Six Mobile beta apk está disponible para dispositivos iOS? </b></li>
|
66 |
-
<p>No, Rainbow Six Mobile beta apk solo está disponible para dispositivos Android en este momento. Ubisoft no ha anunciado planes para lanzar el juego para dispositivos iOS todavía. </p>
|
67 |
-
<li><b>¿Cuánto tiempo durará Rainbow Six Mobile beta apk? </b></li>
|
68 |
-
|
69 |
-
<li><b>¿Puedo jugar Rainbow Six móvil beta apk offline? </b></li>
|
70 |
-
<p>No, no se puede jugar Rainbow Six Mobile beta apk offline. Necesitará una conexión a Internet estable para jugar el juego y acceder a sus características. </p>
|
71 |
-
<li><b>¿Puedo jugar Rainbow Six móvil beta apk con mis amigos? </b></li>
|
72 |
-
<p>Sí, puedes jugar Rainbow Six Mobile beta apk con tus amigos. Puedes invitarlos a unirse a tu equipo o desafiarlos en partidos amistosos. También puedes chatear con ellos en el juego o usar el chat de voz para comunicarse con ellos. </p>
|
73 |
-
</ol></p> 64aa2da5cf<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Amor Emocional Rap Beat.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar Love Emotional Rap Beat para tu próxima canción</h1>
|
3 |
-
<p>¿Te encanta la música rap y quieres expresar tus sentimientos a través de tus canciones? ¿Quieres crear un sonido único y cautivador que toque los corazones de tus oyentes? Si es así, es posible que desee probar el amor emocional rap beat para su próxima canción. </p>
|
4 |
-
<p>Love emotional rap beat es un tipo de música instrumental que combina elementos de rap y R&B con vibraciones emocionales y románticas. Es perfecto para artistas que quieren hacer canciones sobre amor, relaciones, angustia o luchas personales. En este artículo, te mostraremos qué es el rap emocional de amor, por qué lo necesitas, cómo encontrarlo y descargarlo en línea, y cómo usarlo para tu próxima canción. ¡Vamos a empezar! </p>
|
5 |
-
<h2>descargar amor emocional rap beat</h2><br /><p><b><b>DOWNLOAD</b> →→→ <a href="https://bltlly.com/2v6KDU">https://bltlly.com/2v6KDU</a></b></p><br /><br />
|
6 |
-
<h2>¿Qué es el amor emocional Rap Beat y por qué lo necesita</h2>
|
7 |
-
<h3>La definición y las características del amor emocional Rap Beat</h3>
|
8 |
-
<p>Love emotional rap beat es un subgénero de rap beat que presenta sonidos suaves y melódicos, como piano, guitarra, cuerdas o sintetizador. A menudo tiene un ritmo lento o medio tiempo, con bajo pesado y batería. El ritmo crea un contraste entre las voces de rap duro y los instrumentales suaves y sentimentales. El resultado es una música potente y expresiva que puede transmitir diferentes emociones, como tristeza, felicidad, ira o pasión. </p>
|
9 |
-
<h3>Los beneficios de usar el amor emocional Rap Beat para su música</h3>
|
10 |
-
<p>Hay muchos beneficios de usar rap emocional para tu música. Estos son algunos de ellos:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Puede ayudarle a destacar entre la multitud. Amor emocional rap ritmo no es muy común en la corriente principal de la escena del rap, por lo que su uso puede hacer que su música más original y distintivo. </li>
|
13 |
-
<li>Puede ayudarte a conectar con tu audiencia. El ritmo emocional del rap puede evocar sentimientos y emociones en tus oyentes, haciendo que se relacionen con tu mensaje e historia. </li>
|
14 |
-
|
15 |
-
<li>Puede ayudarte a mejorar tus habilidades. Love emotional rap beat puede desafiarte a mejorar tu flujo de rap, entrega, esquema de rima y juego de palabras, así como tu canto, melodía y armonía. </li>
|
16 |
-
</ul>
|
17 |
-
<h2>Cómo encontrar y descargar amor emocional Rap Beat Online</h2>
|
18 |
-
<h3>Los mejores sitios web para descargar gratis amor emocional Rap Beat</h3>
|
19 |
-
<p>Si usted está buscando libre de amor emocional rap beat en línea, hay muchos sitios web que ofrecen de alta calidad y libres de derechos beats que se puede descargar y utilizar para su música. Estos son algunos de los mejores:</p>
|
20 |
-
<tabla>
|
21 |
-
<tr><th>Sitio web</th><th>Descripción</th></tr>
|
22 |
-
<tr><td>[Dizzla D Music]( 4 )</td><td>Este sitio web ofrece una variedad de ritmos de R&B y hip hop, incluido el ritmo emocional del rap. Puede navegar por género, estado de ánimo o tempo, y descargar los beats gratis o comprar una licencia para uso comercial. </td></tr>
|
23 |
-
<tr><td>[TRAKTRAIN]( 3 )</td><td>Este sitio web es una plataforma donde los productores pueden vender sus ritmos en línea. Puedes encontrar muchos emo rap beats aquí, que son similares al amor emocional rap beat. Puedes filtrar por género, estado de ánimo, bpm o precio, y descargar algunos beats gratis o comprar un contrato de arrendamiento o derechos exclusivos. </td></tr>
|
24 |
-
<tr><td>[Rujay]( 1 )</td><td <p>Este sitio web es un canal de YouTube que carga rap gratis todos los días. Usted puede encontrar muchos amor emocional rap beat aquí, así como otros géneros y estilos. Puede descargar los beats gratis o comprar una licencia para uso comercial. </td></tr>
|
25 |
-
</tabla>
|
26 |
-
<h3>Los mejores canales de YouTube para ver y descargar Love Emotional Rap Beat</h3>
|
27 |
-
<p>Si prefieres ver y escuchar el ritmo emocional del rap en YouTube, hay muchos canales que producen y suben ritmos originales y de alta calidad que puedes disfrutar y descargar. Estos son algunos de los mejores:</p>
|
28 |
-
<ul>
|
29 |
-
|
30 |
-
<li>[RicandThadeus Music]: Este canal tiene más de 500.000 suscriptores y se especializa en R&B y soulful rap beats, incluyendo el amor emocional rap beat. Puedes descargar los beats gratis o comprar una licencia para uso comercial. </li>
|
31 |
-
<li>[Torre Beatz]: Este canal tiene más de 400.000 suscriptores y se centra en emocional y triste rap beats, incluyendo el amor emocional rap beat. Puedes descargar los beats gratis o comprar una licencia para uso comercial. </li>
|
32 |
-
</ul>
|
33 |
-
<h3>Las mejores aplicaciones para descargar y crear amor emocional Rap Beat en su teléfono</h3>
|
34 |
-
<p>Si quieres descargar y crear rap emocional de amor en tu teléfono, hay muchas aplicaciones que pueden ayudarte a hacerlo. Estos son algunos de los mejores:</p>
|
35 |
-
<p></p>
|
36 |
-
<ul>
|
37 |
-
<li>[BandLab]: Esta aplicación es una plataforma de música social que le permite crear, colaborar y compartir su música en línea. Puede utilizar la aplicación para grabar, editar, mezclar y dominar sus canciones, así como acceder a miles de latidos libres, bucles y sampling, incluyendo el amor emocional rap beat. </li>
|
38 |
-
<li>[BeatStars]: Esta aplicación es un mercado donde se puede comprar y vender beats en línea. Puedes usar la aplicación para descubrir, transmitir y descargar millones de ritmos de diferentes géneros y estilos, incluido el ritmo emocional del rap. </li>
|
39 |
-
<li>[Rapchat]: Esta aplicación es un estudio de rap y la comunidad que le permite grabar, compartir y descubrir canciones de rap. Puede utilizar la aplicación para rapear sobre cientos de latidos libres, incluyendo el amor emocional rap beat, o crear sus propios latidos utilizando el fabricante de ritmo incorporado. </li>
|
40 |
-
</ul>
|
41 |
-
<h2>Cómo usar el rap emocional para tu próxima canción</h2>
|
42 |
-
<h3>Cómo elegir el amor derecho emocional Rap Beat para su género y estado de ánimo</h3>
|
43 |
-
<p>Una vez que hayas encontrado y descargado algo de rap emocional que te guste, debes elegir el adecuado para tu género y estado de ánimo. Aquí hay algunos consejos para ayudarle a hacer eso:</p>
|
44 |
-
<ul>
|
45 |
-
|
46 |
-
<li>Piensa en la audiencia y el propósito de tu canción. ¿Para quién estás haciendo esta canción? ¿Qué quieres que sientan? Elige un ritmo de rap emocional que atraiga a tus oyentes objetivo y se ajuste a tu objetivo. </li>
|
47 |
-
<li>Piensa en la estructura y el flujo de tu canción. ¿Cómo quieres organizar tus versos, coro, puente, etc.? ¿Cómo quieres hacer la transición entre ellos? Elegir un amor emocional rap beat que tiene una estructura clara y pegadiza y el flujo. </li>
|
48 |
-
</ul>
|
49 |
-
<h3>Cómo escribir letras y melodías que coinciden con el amor emocional Rap Beat</h3>
|
50 |
-
<p>Después de haber elegido el ritmo de rap emocional de amor adecuado para tu canción, necesitas escribir letras y melodías que coincidan con ella. Aquí hay algunos consejos para ayudarle a hacer eso:</p>
|
51 |
-
<ul>
|
52 |
-
<li>Escucha el rap emocional de amor latir con cuidado y repetidamente. Presta atención al estado de ánimo, tempo, ritmo, melodía, armonía, etc. del ritmo. Trate de sentir la emoción y el ambiente del ritmo. </li>
|
53 |
-
<li>Escribe algunas palabras o frases que vienen a tu mente cuando escuchas el ritmo. Pueden estar relacionados con el tema, tema o mensaje de tu canción, o simplemente palabras aleatorias que suenan bien con el ritmo. </li>
|
54 |
-
<li>Usa estas palabras o frases como inspiración o punto de partida para tus letras. Intenta rimarlas entre ellas o con otras palabras en el ritmo. Trata de usar metáforas, símiles, imágenes u otros recursos literarios para hacer tus letras más creativas y expresivas. </li>
|
55 |
-
Canta o tararea junto con el ritmo para encontrar una melodía que se adapte a él. Pruebe diferentes notas, tonos, etc. hasta que encuentre una melodía que suene bien con el ritmo. Trate de hacer que su melodía sea pegadiza y memorable. Intenta combinar la melodía con el ritmo y el acento del ritmo. </li>
|
56 |
-
</ul>
|
57 |
-
<h3>Cómo grabar y mezclar su voz con el amor emocional Rap Beat</h3>
|
58 |
-
<p>Finalmente, después de haber escrito tus letras y melodías, necesitas grabar y mezclar tus voces con el ritmo emocional del rap. Aquí hay algunos consejos para ayudarle a hacer eso:</p>
|
59 |
-
<ul>
|
60 |
-
|
61 |
-
<li>Practica tus voces antes de grabar. Quieres asegurarte de que puedes rapear o cantar tus letras y melodías sin problemas y con confianza. También debe asegurarse de que puede coincidir con el tiempo y el tono del ritmo. </li>
|
62 |
-
<li>Graba múltiples tomas de tus voces. Quieres tener diferentes opciones y variaciones de tus voces, para que puedas elegir la mejor o combinarlas más tarde. También puedes grabar diferentes partes de tus voces por separado, como los versos, el coro, las improvisaciones, etc.</li>
|
63 |
-
<li>Mezcla tus voces con el ritmo. Quieres equilibrar el volumen, EQ, compresión, reverberación, etc. de tus voces y el ritmo, para que suenen armoniosos y claros. Puede utilizar un software de mezcla o un ingeniero profesional, dependiendo de sus habilidades y preferencias. </li>
|
64 |
-
</ul>
|
65 |
-
<h2>Conclusión</h2>
|
66 |
-
<p>Love emocional rap beat es una gran manera de hacer su música rap más expresiva y única. Puede ayudarte a transmitir tus sentimientos y emociones, conectar con tu audiencia, mostrar tu versatilidad y mejorar tus habilidades. Para usar el ritmo emocional del rap para tu próxima canción, necesitas encontrarlo y descargarlo en línea, elegir el adecuado para tu género y estado de ánimo, escribir letras y melodías que coincidan con él, y grabar y mezclar tus voces con él. Esperamos que este artículo te haya dado algunos consejos y recursos útiles sobre cómo descargar rap emocional para tu próxima canción. Ahora adelante y hacer algo de música increíble! </p>
|
67 |
-
<h2>Preguntas frecuentes</h2>
|
68 |
-
<h4>¿Cuáles son algunos ejemplos de artistas que usan el rap emocional de amor? </h4>
|
69 |
-
<p>Algunos ejemplos de artistas que utilizan el amor emocional rap beat son Drake, Post Malone, Jugo WRLD, XXXTentacion, Lil Peep, NF, etc.</p>
|
70 |
-
<h4>¿Dónde puedo encontrar más amor emocional rap beat? </h4>
|
71 |
-
<p>Puedes encontrar más amor emocional rap beat en varios sitios web, canales de YouTube, aplicaciones, o comunidades en línea que ofrecen ritmos gratuitos o pagados. También puedes buscar palabras clave como "love emotional rap beat", "emo rap beat", "sad rap beat", "romantic rap beat", etc.</p>
|
72 |
-
|
73 |
-
<p>Usted puede hacer su propio amor emocional rap beat mediante el uso de un ritmo que hace software o aplicación que le permite crear, editar y organizar diferentes sonidos e instrumentos. También puedes usar un teclado MIDI o un teclado de batería para tocar y grabar tus propias melodías y ritmos. </p>
|
74 |
-
<h4>¿Cómo puedo vender mi amor emocional rap beat online? </h4>
|
75 |
-
<p>Usted puede vender su amor emocional rap beat en línea mediante el uso de una plataforma o mercado que conecta a los productores y artistas que compran y venden beats. También puedes crear tu propio sitio web o cuenta de redes sociales para promocionar y vender tus beats. </p>
|
76 |
-
<h4>¿Cómo puedo aprender más sobre el amor emocional rap beat? </h4>
|
77 |
-
<p>Puedes aprender más sobre el amor emocional rap beat viendo tutoriales, reseñas o consejos de otros productores o artistas que hacen o usan el amor emocional rap beat. También puedes leer blogs, artículos o libros sobre producción o historia de música rap. </p> 64aa2da5cf<br />
|
78 |
-
<br />
|
79 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/filesize.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
|
3 |
-
|
4 |
-
The functions declared in this module should cover the different
|
5 |
-
use cases needed to generate a string representation of a file size
|
6 |
-
using several different units. Since there are many standards regarding
|
7 |
-
file size units, three different functions have been implemented.
|
8 |
-
|
9 |
-
See Also:
|
10 |
-
* `Wikipedia: Binary prefix <https://en.wikipedia.org/wiki/Binary_prefix>`_
|
11 |
-
|
12 |
-
"""
|
13 |
-
|
14 |
-
__all__ = ["decimal"]
|
15 |
-
|
16 |
-
from typing import Iterable, List, Optional, Tuple
|
17 |
-
|
18 |
-
|
19 |
-
def _to_str(
|
20 |
-
size: int,
|
21 |
-
suffixes: Iterable[str],
|
22 |
-
base: int,
|
23 |
-
*,
|
24 |
-
precision: Optional[int] = 1,
|
25 |
-
separator: Optional[str] = " ",
|
26 |
-
) -> str:
|
27 |
-
if size == 1:
|
28 |
-
return "1 byte"
|
29 |
-
elif size < base:
|
30 |
-
return "{:,} bytes".format(size)
|
31 |
-
|
32 |
-
for i, suffix in enumerate(suffixes, 2): # noqa: B007
|
33 |
-
unit = base**i
|
34 |
-
if size < unit:
|
35 |
-
break
|
36 |
-
return "{:,.{precision}f}{separator}{}".format(
|
37 |
-
(base * size / unit),
|
38 |
-
suffix,
|
39 |
-
precision=precision,
|
40 |
-
separator=separator,
|
41 |
-
)
|
42 |
-
|
43 |
-
|
44 |
-
def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
|
45 |
-
"""Pick a suffix and base for the given size."""
|
46 |
-
for i, suffix in enumerate(suffixes):
|
47 |
-
unit = base**i
|
48 |
-
if size < unit * base:
|
49 |
-
break
|
50 |
-
return unit, suffix
|
51 |
-
|
52 |
-
|
53 |
-
def decimal(
|
54 |
-
size: int,
|
55 |
-
*,
|
56 |
-
precision: Optional[int] = 1,
|
57 |
-
separator: Optional[str] = " ",
|
58 |
-
) -> str:
|
59 |
-
"""Convert a filesize in to a string (powers of 1000, SI prefixes).
|
60 |
-
|
61 |
-
In this convention, ``1000 B = 1 kB``.
|
62 |
-
|
63 |
-
This is typically the format used to advertise the storage
|
64 |
-
capacity of USB flash drives and the like (*256 MB* meaning
|
65 |
-
actually a storage capacity of more than *256 000 000 B*),
|
66 |
-
or used by **Mac OS X** since v10.6 to report file sizes.
|
67 |
-
|
68 |
-
Arguments:
|
69 |
-
int (size): A file size.
|
70 |
-
int (precision): The number of decimal places to include (default = 1).
|
71 |
-
str (separator): The string to separate the value from the units (default = " ").
|
72 |
-
|
73 |
-
Returns:
|
74 |
-
`str`: A string containing a abbreviated file size and units.
|
75 |
-
|
76 |
-
Example:
|
77 |
-
>>> filesize.decimal(30000)
|
78 |
-
'30.0 kB'
|
79 |
-
>>> filesize.decimal(30000, precision=2, separator="")
|
80 |
-
'30.00kB'
|
81 |
-
|
82 |
-
"""
|
83 |
-
return _to_str(
|
84 |
-
size,
|
85 |
-
("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
|
86 |
-
1000,
|
87 |
-
precision=precision,
|
88 |
-
separator=separator,
|
89 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/terminal_theme.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
from typing import List, Optional, Tuple
|
2 |
-
|
3 |
-
from .color_triplet import ColorTriplet
|
4 |
-
from .palette import Palette
|
5 |
-
|
6 |
-
_ColorTuple = Tuple[int, int, int]
|
7 |
-
|
8 |
-
|
9 |
-
class TerminalTheme:
|
10 |
-
"""A color theme used when exporting console content.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
background (Tuple[int, int, int]): The background color.
|
14 |
-
foreground (Tuple[int, int, int]): The foreground (text) color.
|
15 |
-
normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
|
16 |
-
bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
|
17 |
-
to repeat normal intensity. Defaults to None.
|
18 |
-
"""
|
19 |
-
|
20 |
-
def __init__(
|
21 |
-
self,
|
22 |
-
background: _ColorTuple,
|
23 |
-
foreground: _ColorTuple,
|
24 |
-
normal: List[_ColorTuple],
|
25 |
-
bright: Optional[List[_ColorTuple]] = None,
|
26 |
-
) -> None:
|
27 |
-
self.background_color = ColorTriplet(*background)
|
28 |
-
self.foreground_color = ColorTriplet(*foreground)
|
29 |
-
self.ansi_colors = Palette(normal + (bright or normal))
|
30 |
-
|
31 |
-
|
32 |
-
DEFAULT_TERMINAL_THEME = TerminalTheme(
|
33 |
-
(255, 255, 255),
|
34 |
-
(0, 0, 0),
|
35 |
-
[
|
36 |
-
(0, 0, 0),
|
37 |
-
(128, 0, 0),
|
38 |
-
(0, 128, 0),
|
39 |
-
(128, 128, 0),
|
40 |
-
(0, 0, 128),
|
41 |
-
(128, 0, 128),
|
42 |
-
(0, 128, 128),
|
43 |
-
(192, 192, 192),
|
44 |
-
],
|
45 |
-
[
|
46 |
-
(128, 128, 128),
|
47 |
-
(255, 0, 0),
|
48 |
-
(0, 255, 0),
|
49 |
-
(255, 255, 0),
|
50 |
-
(0, 0, 255),
|
51 |
-
(255, 0, 255),
|
52 |
-
(0, 255, 255),
|
53 |
-
(255, 255, 255),
|
54 |
-
],
|
55 |
-
)
|
56 |
-
|
57 |
-
MONOKAI = TerminalTheme(
|
58 |
-
(12, 12, 12),
|
59 |
-
(217, 217, 217),
|
60 |
-
[
|
61 |
-
(26, 26, 26),
|
62 |
-
(244, 0, 95),
|
63 |
-
(152, 224, 36),
|
64 |
-
(253, 151, 31),
|
65 |
-
(157, 101, 255),
|
66 |
-
(244, 0, 95),
|
67 |
-
(88, 209, 235),
|
68 |
-
(196, 197, 181),
|
69 |
-
(98, 94, 76),
|
70 |
-
],
|
71 |
-
[
|
72 |
-
(244, 0, 95),
|
73 |
-
(152, 224, 36),
|
74 |
-
(224, 213, 97),
|
75 |
-
(157, 101, 255),
|
76 |
-
(244, 0, 95),
|
77 |
-
(88, 209, 235),
|
78 |
-
(246, 246, 239),
|
79 |
-
],
|
80 |
-
)
|
81 |
-
DIMMED_MONOKAI = TerminalTheme(
|
82 |
-
(25, 25, 25),
|
83 |
-
(185, 188, 186),
|
84 |
-
[
|
85 |
-
(58, 61, 67),
|
86 |
-
(190, 63, 72),
|
87 |
-
(135, 154, 59),
|
88 |
-
(197, 166, 53),
|
89 |
-
(79, 118, 161),
|
90 |
-
(133, 92, 141),
|
91 |
-
(87, 143, 164),
|
92 |
-
(185, 188, 186),
|
93 |
-
(136, 137, 135),
|
94 |
-
],
|
95 |
-
[
|
96 |
-
(251, 0, 31),
|
97 |
-
(15, 114, 47),
|
98 |
-
(196, 112, 51),
|
99 |
-
(24, 109, 227),
|
100 |
-
(251, 0, 103),
|
101 |
-
(46, 112, 109),
|
102 |
-
(253, 255, 185),
|
103 |
-
],
|
104 |
-
)
|
105 |
-
NIGHT_OWLISH = TerminalTheme(
|
106 |
-
(255, 255, 255),
|
107 |
-
(64, 63, 83),
|
108 |
-
[
|
109 |
-
(1, 22, 39),
|
110 |
-
(211, 66, 62),
|
111 |
-
(42, 162, 152),
|
112 |
-
(218, 170, 1),
|
113 |
-
(72, 118, 214),
|
114 |
-
(64, 63, 83),
|
115 |
-
(8, 145, 106),
|
116 |
-
(122, 129, 129),
|
117 |
-
(122, 129, 129),
|
118 |
-
],
|
119 |
-
[
|
120 |
-
(247, 110, 110),
|
121 |
-
(73, 208, 197),
|
122 |
-
(218, 194, 107),
|
123 |
-
(92, 167, 228),
|
124 |
-
(105, 112, 152),
|
125 |
-
(0, 201, 144),
|
126 |
-
(152, 159, 177),
|
127 |
-
],
|
128 |
-
)
|
129 |
-
|
130 |
-
SVG_EXPORT_THEME = TerminalTheme(
|
131 |
-
(41, 41, 41),
|
132 |
-
(197, 200, 198),
|
133 |
-
[
|
134 |
-
(75, 78, 85),
|
135 |
-
(204, 85, 90),
|
136 |
-
(152, 168, 75),
|
137 |
-
(208, 179, 68),
|
138 |
-
(96, 138, 177),
|
139 |
-
(152, 114, 159),
|
140 |
-
(104, 160, 179),
|
141 |
-
(197, 200, 198),
|
142 |
-
(154, 155, 153),
|
143 |
-
],
|
144 |
-
[
|
145 |
-
(255, 38, 39),
|
146 |
-
(0, 130, 61),
|
147 |
-
(208, 132, 66),
|
148 |
-
(25, 132, 233),
|
149 |
-
(255, 44, 122),
|
150 |
-
(57, 130, 128),
|
151 |
-
(253, 253, 197),
|
152 |
-
],
|
153 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/typing_extensions.py
DELETED
@@ -1,2296 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
import collections
|
3 |
-
import collections.abc
|
4 |
-
import operator
|
5 |
-
import sys
|
6 |
-
import typing
|
7 |
-
|
8 |
-
# After PEP 560, internal typing API was substantially reworked.
|
9 |
-
# This is especially important for Protocol class which uses internal APIs
|
10 |
-
# quite extensively.
|
11 |
-
PEP_560 = sys.version_info[:3] >= (3, 7, 0)
|
12 |
-
|
13 |
-
if PEP_560:
|
14 |
-
GenericMeta = type
|
15 |
-
else:
|
16 |
-
# 3.6
|
17 |
-
from typing import GenericMeta, _type_vars # noqa
|
18 |
-
|
19 |
-
# The two functions below are copies of typing internal helpers.
|
20 |
-
# They are needed by _ProtocolMeta
|
21 |
-
|
22 |
-
|
23 |
-
def _no_slots_copy(dct):
|
24 |
-
dict_copy = dict(dct)
|
25 |
-
if '__slots__' in dict_copy:
|
26 |
-
for slot in dict_copy['__slots__']:
|
27 |
-
dict_copy.pop(slot, None)
|
28 |
-
return dict_copy
|
29 |
-
|
30 |
-
|
31 |
-
def _check_generic(cls, parameters):
|
32 |
-
if not cls.__parameters__:
|
33 |
-
raise TypeError(f"{cls} is not a generic class")
|
34 |
-
alen = len(parameters)
|
35 |
-
elen = len(cls.__parameters__)
|
36 |
-
if alen != elen:
|
37 |
-
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
|
38 |
-
f" actual {alen}, expected {elen}")
|
39 |
-
|
40 |
-
|
41 |
-
# Please keep __all__ alphabetized within each category.
|
42 |
-
__all__ = [
|
43 |
-
# Super-special typing primitives.
|
44 |
-
'ClassVar',
|
45 |
-
'Concatenate',
|
46 |
-
'Final',
|
47 |
-
'ParamSpec',
|
48 |
-
'Self',
|
49 |
-
'Type',
|
50 |
-
|
51 |
-
# ABCs (from collections.abc).
|
52 |
-
'Awaitable',
|
53 |
-
'AsyncIterator',
|
54 |
-
'AsyncIterable',
|
55 |
-
'Coroutine',
|
56 |
-
'AsyncGenerator',
|
57 |
-
'AsyncContextManager',
|
58 |
-
'ChainMap',
|
59 |
-
|
60 |
-
# Concrete collection types.
|
61 |
-
'ContextManager',
|
62 |
-
'Counter',
|
63 |
-
'Deque',
|
64 |
-
'DefaultDict',
|
65 |
-
'OrderedDict',
|
66 |
-
'TypedDict',
|
67 |
-
|
68 |
-
# Structural checks, a.k.a. protocols.
|
69 |
-
'SupportsIndex',
|
70 |
-
|
71 |
-
# One-off things.
|
72 |
-
'Annotated',
|
73 |
-
'final',
|
74 |
-
'IntVar',
|
75 |
-
'Literal',
|
76 |
-
'NewType',
|
77 |
-
'overload',
|
78 |
-
'Protocol',
|
79 |
-
'runtime',
|
80 |
-
'runtime_checkable',
|
81 |
-
'Text',
|
82 |
-
'TypeAlias',
|
83 |
-
'TypeGuard',
|
84 |
-
'TYPE_CHECKING',
|
85 |
-
]
|
86 |
-
|
87 |
-
if PEP_560:
|
88 |
-
__all__.extend(["get_args", "get_origin", "get_type_hints"])
|
89 |
-
|
90 |
-
# 3.6.2+
|
91 |
-
if hasattr(typing, 'NoReturn'):
|
92 |
-
NoReturn = typing.NoReturn
|
93 |
-
# 3.6.0-3.6.1
|
94 |
-
else:
|
95 |
-
class _NoReturn(typing._FinalTypingBase, _root=True):
|
96 |
-
"""Special type indicating functions that never return.
|
97 |
-
Example::
|
98 |
-
|
99 |
-
from typing import NoReturn
|
100 |
-
|
101 |
-
def stop() -> NoReturn:
|
102 |
-
raise Exception('no way')
|
103 |
-
|
104 |
-
This type is invalid in other positions, e.g., ``List[NoReturn]``
|
105 |
-
will fail in static type checkers.
|
106 |
-
"""
|
107 |
-
__slots__ = ()
|
108 |
-
|
109 |
-
def __instancecheck__(self, obj):
|
110 |
-
raise TypeError("NoReturn cannot be used with isinstance().")
|
111 |
-
|
112 |
-
def __subclasscheck__(self, cls):
|
113 |
-
raise TypeError("NoReturn cannot be used with issubclass().")
|
114 |
-
|
115 |
-
NoReturn = _NoReturn(_root=True)
|
116 |
-
|
117 |
-
# Some unconstrained type variables. These are used by the container types.
|
118 |
-
# (These are not for export.)
|
119 |
-
T = typing.TypeVar('T') # Any type.
|
120 |
-
KT = typing.TypeVar('KT') # Key type.
|
121 |
-
VT = typing.TypeVar('VT') # Value type.
|
122 |
-
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
|
123 |
-
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
|
124 |
-
|
125 |
-
ClassVar = typing.ClassVar
|
126 |
-
|
127 |
-
# On older versions of typing there is an internal class named "Final".
|
128 |
-
# 3.8+
|
129 |
-
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
|
130 |
-
Final = typing.Final
|
131 |
-
# 3.7
|
132 |
-
elif sys.version_info[:2] >= (3, 7):
|
133 |
-
class _FinalForm(typing._SpecialForm, _root=True):
|
134 |
-
|
135 |
-
def __repr__(self):
|
136 |
-
return 'typing_extensions.' + self._name
|
137 |
-
|
138 |
-
def __getitem__(self, parameters):
|
139 |
-
item = typing._type_check(parameters,
|
140 |
-
f'{self._name} accepts only single type')
|
141 |
-
return typing._GenericAlias(self, (item,))
|
142 |
-
|
143 |
-
Final = _FinalForm('Final',
|
144 |
-
doc="""A special typing construct to indicate that a name
|
145 |
-
cannot be re-assigned or overridden in a subclass.
|
146 |
-
For example:
|
147 |
-
|
148 |
-
MAX_SIZE: Final = 9000
|
149 |
-
MAX_SIZE += 1 # Error reported by type checker
|
150 |
-
|
151 |
-
class Connection:
|
152 |
-
TIMEOUT: Final[int] = 10
|
153 |
-
class FastConnector(Connection):
|
154 |
-
TIMEOUT = 1 # Error reported by type checker
|
155 |
-
|
156 |
-
There is no runtime checking of these properties.""")
|
157 |
-
# 3.6
|
158 |
-
else:
|
159 |
-
class _Final(typing._FinalTypingBase, _root=True):
|
160 |
-
"""A special typing construct to indicate that a name
|
161 |
-
cannot be re-assigned or overridden in a subclass.
|
162 |
-
For example:
|
163 |
-
|
164 |
-
MAX_SIZE: Final = 9000
|
165 |
-
MAX_SIZE += 1 # Error reported by type checker
|
166 |
-
|
167 |
-
class Connection:
|
168 |
-
TIMEOUT: Final[int] = 10
|
169 |
-
class FastConnector(Connection):
|
170 |
-
TIMEOUT = 1 # Error reported by type checker
|
171 |
-
|
172 |
-
There is no runtime checking of these properties.
|
173 |
-
"""
|
174 |
-
|
175 |
-
__slots__ = ('__type__',)
|
176 |
-
|
177 |
-
def __init__(self, tp=None, **kwds):
|
178 |
-
self.__type__ = tp
|
179 |
-
|
180 |
-
def __getitem__(self, item):
|
181 |
-
cls = type(self)
|
182 |
-
if self.__type__ is None:
|
183 |
-
return cls(typing._type_check(item,
|
184 |
-
f'{cls.__name__[1:]} accepts only single type.'),
|
185 |
-
_root=True)
|
186 |
-
raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
|
187 |
-
|
188 |
-
def _eval_type(self, globalns, localns):
|
189 |
-
new_tp = typing._eval_type(self.__type__, globalns, localns)
|
190 |
-
if new_tp == self.__type__:
|
191 |
-
return self
|
192 |
-
return type(self)(new_tp, _root=True)
|
193 |
-
|
194 |
-
def __repr__(self):
|
195 |
-
r = super().__repr__()
|
196 |
-
if self.__type__ is not None:
|
197 |
-
r += f'[{typing._type_repr(self.__type__)}]'
|
198 |
-
return r
|
199 |
-
|
200 |
-
def __hash__(self):
|
201 |
-
return hash((type(self).__name__, self.__type__))
|
202 |
-
|
203 |
-
def __eq__(self, other):
|
204 |
-
if not isinstance(other, _Final):
|
205 |
-
return NotImplemented
|
206 |
-
if self.__type__ is not None:
|
207 |
-
return self.__type__ == other.__type__
|
208 |
-
return self is other
|
209 |
-
|
210 |
-
Final = _Final(_root=True)
|
211 |
-
|
212 |
-
|
213 |
-
# 3.8+
|
214 |
-
if hasattr(typing, 'final'):
|
215 |
-
final = typing.final
|
216 |
-
# 3.6-3.7
|
217 |
-
else:
|
218 |
-
def final(f):
|
219 |
-
"""This decorator can be used to indicate to type checkers that
|
220 |
-
the decorated method cannot be overridden, and decorated class
|
221 |
-
cannot be subclassed. For example:
|
222 |
-
|
223 |
-
class Base:
|
224 |
-
@final
|
225 |
-
def done(self) -> None:
|
226 |
-
...
|
227 |
-
class Sub(Base):
|
228 |
-
def done(self) -> None: # Error reported by type checker
|
229 |
-
...
|
230 |
-
@final
|
231 |
-
class Leaf:
|
232 |
-
...
|
233 |
-
class Other(Leaf): # Error reported by type checker
|
234 |
-
...
|
235 |
-
|
236 |
-
There is no runtime checking of these properties.
|
237 |
-
"""
|
238 |
-
return f
|
239 |
-
|
240 |
-
|
241 |
-
def IntVar(name):
|
242 |
-
return typing.TypeVar(name)
|
243 |
-
|
244 |
-
|
245 |
-
# 3.8+:
|
246 |
-
if hasattr(typing, 'Literal'):
|
247 |
-
Literal = typing.Literal
|
248 |
-
# 3.7:
|
249 |
-
elif sys.version_info[:2] >= (3, 7):
|
250 |
-
class _LiteralForm(typing._SpecialForm, _root=True):
|
251 |
-
|
252 |
-
def __repr__(self):
|
253 |
-
return 'typing_extensions.' + self._name
|
254 |
-
|
255 |
-
def __getitem__(self, parameters):
|
256 |
-
return typing._GenericAlias(self, parameters)
|
257 |
-
|
258 |
-
Literal = _LiteralForm('Literal',
|
259 |
-
doc="""A type that can be used to indicate to type checkers
|
260 |
-
that the corresponding value has a value literally equivalent
|
261 |
-
to the provided parameter. For example:
|
262 |
-
|
263 |
-
var: Literal[4] = 4
|
264 |
-
|
265 |
-
The type checker understands that 'var' is literally equal to
|
266 |
-
the value 4 and no other value.
|
267 |
-
|
268 |
-
Literal[...] cannot be subclassed. There is no runtime
|
269 |
-
checking verifying that the parameter is actually a value
|
270 |
-
instead of a type.""")
|
271 |
-
# 3.6:
|
272 |
-
else:
|
273 |
-
class _Literal(typing._FinalTypingBase, _root=True):
|
274 |
-
"""A type that can be used to indicate to type checkers that the
|
275 |
-
corresponding value has a value literally equivalent to the
|
276 |
-
provided parameter. For example:
|
277 |
-
|
278 |
-
var: Literal[4] = 4
|
279 |
-
|
280 |
-
The type checker understands that 'var' is literally equal to the
|
281 |
-
value 4 and no other value.
|
282 |
-
|
283 |
-
Literal[...] cannot be subclassed. There is no runtime checking
|
284 |
-
verifying that the parameter is actually a value instead of a type.
|
285 |
-
"""
|
286 |
-
|
287 |
-
__slots__ = ('__values__',)
|
288 |
-
|
289 |
-
def __init__(self, values=None, **kwds):
|
290 |
-
self.__values__ = values
|
291 |
-
|
292 |
-
def __getitem__(self, values):
|
293 |
-
cls = type(self)
|
294 |
-
if self.__values__ is None:
|
295 |
-
if not isinstance(values, tuple):
|
296 |
-
values = (values,)
|
297 |
-
return cls(values, _root=True)
|
298 |
-
raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
|
299 |
-
|
300 |
-
def _eval_type(self, globalns, localns):
|
301 |
-
return self
|
302 |
-
|
303 |
-
def __repr__(self):
|
304 |
-
r = super().__repr__()
|
305 |
-
if self.__values__ is not None:
|
306 |
-
r += f'[{", ".join(map(typing._type_repr, self.__values__))}]'
|
307 |
-
return r
|
308 |
-
|
309 |
-
def __hash__(self):
|
310 |
-
return hash((type(self).__name__, self.__values__))
|
311 |
-
|
312 |
-
def __eq__(self, other):
|
313 |
-
if not isinstance(other, _Literal):
|
314 |
-
return NotImplemented
|
315 |
-
if self.__values__ is not None:
|
316 |
-
return self.__values__ == other.__values__
|
317 |
-
return self is other
|
318 |
-
|
319 |
-
Literal = _Literal(_root=True)
|
320 |
-
|
321 |
-
|
322 |
-
_overload_dummy = typing._overload_dummy # noqa
|
323 |
-
overload = typing.overload
|
324 |
-
|
325 |
-
|
326 |
-
# This is not a real generic class. Don't use outside annotations.
|
327 |
-
Type = typing.Type
|
328 |
-
|
329 |
-
# Various ABCs mimicking those in collections.abc.
|
330 |
-
# A few are simply re-exported for completeness.
|
331 |
-
|
332 |
-
|
333 |
-
class _ExtensionsGenericMeta(GenericMeta):
|
334 |
-
def __subclasscheck__(self, subclass):
|
335 |
-
"""This mimics a more modern GenericMeta.__subclasscheck__() logic
|
336 |
-
(that does not have problems with recursion) to work around interactions
|
337 |
-
between collections, typing, and typing_extensions on older
|
338 |
-
versions of Python, see https://github.com/python/typing/issues/501.
|
339 |
-
"""
|
340 |
-
if self.__origin__ is not None:
|
341 |
-
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
|
342 |
-
raise TypeError("Parameterized generics cannot be used with class "
|
343 |
-
"or instance checks")
|
344 |
-
return False
|
345 |
-
if not self.__extra__:
|
346 |
-
return super().__subclasscheck__(subclass)
|
347 |
-
res = self.__extra__.__subclasshook__(subclass)
|
348 |
-
if res is not NotImplemented:
|
349 |
-
return res
|
350 |
-
if self.__extra__ in subclass.__mro__:
|
351 |
-
return True
|
352 |
-
for scls in self.__extra__.__subclasses__():
|
353 |
-
if isinstance(scls, GenericMeta):
|
354 |
-
continue
|
355 |
-
if issubclass(subclass, scls):
|
356 |
-
return True
|
357 |
-
return False
|
358 |
-
|
359 |
-
|
360 |
-
Awaitable = typing.Awaitable
|
361 |
-
Coroutine = typing.Coroutine
|
362 |
-
AsyncIterable = typing.AsyncIterable
|
363 |
-
AsyncIterator = typing.AsyncIterator
|
364 |
-
|
365 |
-
# 3.6.1+
|
366 |
-
if hasattr(typing, 'Deque'):
|
367 |
-
Deque = typing.Deque
|
368 |
-
# 3.6.0
|
369 |
-
else:
|
370 |
-
class Deque(collections.deque, typing.MutableSequence[T],
|
371 |
-
metaclass=_ExtensionsGenericMeta,
|
372 |
-
extra=collections.deque):
|
373 |
-
__slots__ = ()
|
374 |
-
|
375 |
-
def __new__(cls, *args, **kwds):
|
376 |
-
if cls._gorg is Deque:
|
377 |
-
return collections.deque(*args, **kwds)
|
378 |
-
return typing._generic_new(collections.deque, cls, *args, **kwds)
|
379 |
-
|
380 |
-
ContextManager = typing.ContextManager
|
381 |
-
# 3.6.2+
|
382 |
-
if hasattr(typing, 'AsyncContextManager'):
|
383 |
-
AsyncContextManager = typing.AsyncContextManager
|
384 |
-
# 3.6.0-3.6.1
|
385 |
-
else:
|
386 |
-
from _collections_abc import _check_methods as _check_methods_in_mro # noqa
|
387 |
-
|
388 |
-
class AsyncContextManager(typing.Generic[T_co]):
|
389 |
-
__slots__ = ()
|
390 |
-
|
391 |
-
async def __aenter__(self):
|
392 |
-
return self
|
393 |
-
|
394 |
-
@abc.abstractmethod
|
395 |
-
async def __aexit__(self, exc_type, exc_value, traceback):
|
396 |
-
return None
|
397 |
-
|
398 |
-
@classmethod
|
399 |
-
def __subclasshook__(cls, C):
|
400 |
-
if cls is AsyncContextManager:
|
401 |
-
return _check_methods_in_mro(C, "__aenter__", "__aexit__")
|
402 |
-
return NotImplemented
|
403 |
-
|
404 |
-
DefaultDict = typing.DefaultDict
|
405 |
-
|
406 |
-
# 3.7.2+
|
407 |
-
if hasattr(typing, 'OrderedDict'):
|
408 |
-
OrderedDict = typing.OrderedDict
|
409 |
-
# 3.7.0-3.7.2
|
410 |
-
elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
|
411 |
-
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
|
412 |
-
# 3.6
|
413 |
-
else:
|
414 |
-
class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
|
415 |
-
metaclass=_ExtensionsGenericMeta,
|
416 |
-
extra=collections.OrderedDict):
|
417 |
-
|
418 |
-
__slots__ = ()
|
419 |
-
|
420 |
-
def __new__(cls, *args, **kwds):
|
421 |
-
if cls._gorg is OrderedDict:
|
422 |
-
return collections.OrderedDict(*args, **kwds)
|
423 |
-
return typing._generic_new(collections.OrderedDict, cls, *args, **kwds)
|
424 |
-
|
425 |
-
# 3.6.2+
|
426 |
-
if hasattr(typing, 'Counter'):
|
427 |
-
Counter = typing.Counter
|
428 |
-
# 3.6.0-3.6.1
|
429 |
-
else:
|
430 |
-
class Counter(collections.Counter,
|
431 |
-
typing.Dict[T, int],
|
432 |
-
metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
|
433 |
-
|
434 |
-
__slots__ = ()
|
435 |
-
|
436 |
-
def __new__(cls, *args, **kwds):
|
437 |
-
if cls._gorg is Counter:
|
438 |
-
return collections.Counter(*args, **kwds)
|
439 |
-
return typing._generic_new(collections.Counter, cls, *args, **kwds)
|
440 |
-
|
441 |
-
# 3.6.1+
|
442 |
-
if hasattr(typing, 'ChainMap'):
|
443 |
-
ChainMap = typing.ChainMap
|
444 |
-
elif hasattr(collections, 'ChainMap'):
|
445 |
-
class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
|
446 |
-
metaclass=_ExtensionsGenericMeta,
|
447 |
-
extra=collections.ChainMap):
|
448 |
-
|
449 |
-
__slots__ = ()
|
450 |
-
|
451 |
-
def __new__(cls, *args, **kwds):
|
452 |
-
if cls._gorg is ChainMap:
|
453 |
-
return collections.ChainMap(*args, **kwds)
|
454 |
-
return typing._generic_new(collections.ChainMap, cls, *args, **kwds)
|
455 |
-
|
456 |
-
# 3.6.1+
|
457 |
-
if hasattr(typing, 'AsyncGenerator'):
|
458 |
-
AsyncGenerator = typing.AsyncGenerator
|
459 |
-
# 3.6.0
|
460 |
-
else:
|
461 |
-
class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
|
462 |
-
metaclass=_ExtensionsGenericMeta,
|
463 |
-
extra=collections.abc.AsyncGenerator):
|
464 |
-
__slots__ = ()
|
465 |
-
|
466 |
-
NewType = typing.NewType
|
467 |
-
Text = typing.Text
|
468 |
-
TYPE_CHECKING = typing.TYPE_CHECKING
|
469 |
-
|
470 |
-
|
471 |
-
def _gorg(cls):
|
472 |
-
"""This function exists for compatibility with old typing versions."""
|
473 |
-
assert isinstance(cls, GenericMeta)
|
474 |
-
if hasattr(cls, '_gorg'):
|
475 |
-
return cls._gorg
|
476 |
-
while cls.__origin__ is not None:
|
477 |
-
cls = cls.__origin__
|
478 |
-
return cls
|
479 |
-
|
480 |
-
|
481 |
-
_PROTO_WHITELIST = ['Callable', 'Awaitable',
|
482 |
-
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
|
483 |
-
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
|
484 |
-
'ContextManager', 'AsyncContextManager']
|
485 |
-
|
486 |
-
|
487 |
-
def _get_protocol_attrs(cls):
|
488 |
-
attrs = set()
|
489 |
-
for base in cls.__mro__[:-1]: # without object
|
490 |
-
if base.__name__ in ('Protocol', 'Generic'):
|
491 |
-
continue
|
492 |
-
annotations = getattr(base, '__annotations__', {})
|
493 |
-
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
|
494 |
-
if (not attr.startswith('_abc_') and attr not in (
|
495 |
-
'__abstractmethods__', '__annotations__', '__weakref__',
|
496 |
-
'_is_protocol', '_is_runtime_protocol', '__dict__',
|
497 |
-
'__args__', '__slots__',
|
498 |
-
'__next_in_mro__', '__parameters__', '__origin__',
|
499 |
-
'__orig_bases__', '__extra__', '__tree_hash__',
|
500 |
-
'__doc__', '__subclasshook__', '__init__', '__new__',
|
501 |
-
'__module__', '_MutableMapping__marker', '_gorg')):
|
502 |
-
attrs.add(attr)
|
503 |
-
return attrs
|
504 |
-
|
505 |
-
|
506 |
-
def _is_callable_members_only(cls):
|
507 |
-
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
|
508 |
-
|
509 |
-
|
510 |
-
# 3.8+
|
511 |
-
if hasattr(typing, 'Protocol'):
|
512 |
-
Protocol = typing.Protocol
|
513 |
-
# 3.7
|
514 |
-
elif PEP_560:
|
515 |
-
from typing import _collect_type_vars # noqa
|
516 |
-
|
517 |
-
def _no_init(self, *args, **kwargs):
|
518 |
-
if type(self)._is_protocol:
|
519 |
-
raise TypeError('Protocols cannot be instantiated')
|
520 |
-
|
521 |
-
class _ProtocolMeta(abc.ABCMeta):
|
522 |
-
# This metaclass is a bit unfortunate and exists only because of the lack
|
523 |
-
# of __instancehook__.
|
524 |
-
def __instancecheck__(cls, instance):
|
525 |
-
# We need this method for situations where attributes are
|
526 |
-
# assigned in __init__.
|
527 |
-
if ((not getattr(cls, '_is_protocol', False) or
|
528 |
-
_is_callable_members_only(cls)) and
|
529 |
-
issubclass(instance.__class__, cls)):
|
530 |
-
return True
|
531 |
-
if cls._is_protocol:
|
532 |
-
if all(hasattr(instance, attr) and
|
533 |
-
(not callable(getattr(cls, attr, None)) or
|
534 |
-
getattr(instance, attr) is not None)
|
535 |
-
for attr in _get_protocol_attrs(cls)):
|
536 |
-
return True
|
537 |
-
return super().__instancecheck__(instance)
|
538 |
-
|
539 |
-
class Protocol(metaclass=_ProtocolMeta):
|
540 |
-
# There is quite a lot of overlapping code with typing.Generic.
|
541 |
-
# Unfortunately it is hard to avoid this while these live in two different
|
542 |
-
# modules. The duplicated code will be removed when Protocol is moved to typing.
|
543 |
-
"""Base class for protocol classes. Protocol classes are defined as::
|
544 |
-
|
545 |
-
class Proto(Protocol):
|
546 |
-
def meth(self) -> int:
|
547 |
-
...
|
548 |
-
|
549 |
-
Such classes are primarily used with static type checkers that recognize
|
550 |
-
structural subtyping (static duck-typing), for example::
|
551 |
-
|
552 |
-
class C:
|
553 |
-
def meth(self) -> int:
|
554 |
-
return 0
|
555 |
-
|
556 |
-
def func(x: Proto) -> int:
|
557 |
-
return x.meth()
|
558 |
-
|
559 |
-
func(C()) # Passes static type check
|
560 |
-
|
561 |
-
See PEP 544 for details. Protocol classes decorated with
|
562 |
-
@typing_extensions.runtime act as simple-minded runtime protocol that checks
|
563 |
-
only the presence of given attributes, ignoring their type signatures.
|
564 |
-
|
565 |
-
Protocol classes can be generic, they are defined as::
|
566 |
-
|
567 |
-
class GenProto(Protocol[T]):
|
568 |
-
def meth(self) -> T:
|
569 |
-
...
|
570 |
-
"""
|
571 |
-
__slots__ = ()
|
572 |
-
_is_protocol = True
|
573 |
-
|
574 |
-
def __new__(cls, *args, **kwds):
|
575 |
-
if cls is Protocol:
|
576 |
-
raise TypeError("Type Protocol cannot be instantiated; "
|
577 |
-
"it can only be used as a base class")
|
578 |
-
return super().__new__(cls)
|
579 |
-
|
580 |
-
@typing._tp_cache
|
581 |
-
def __class_getitem__(cls, params):
|
582 |
-
if not isinstance(params, tuple):
|
583 |
-
params = (params,)
|
584 |
-
if not params and cls is not typing.Tuple:
|
585 |
-
raise TypeError(
|
586 |
-
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
|
587 |
-
msg = "Parameters to generic types must be types."
|
588 |
-
params = tuple(typing._type_check(p, msg) for p in params) # noqa
|
589 |
-
if cls is Protocol:
|
590 |
-
# Generic can only be subscripted with unique type variables.
|
591 |
-
if not all(isinstance(p, typing.TypeVar) for p in params):
|
592 |
-
i = 0
|
593 |
-
while isinstance(params[i], typing.TypeVar):
|
594 |
-
i += 1
|
595 |
-
raise TypeError(
|
596 |
-
"Parameters to Protocol[...] must all be type variables."
|
597 |
-
f" Parameter {i + 1} is {params[i]}")
|
598 |
-
if len(set(params)) != len(params):
|
599 |
-
raise TypeError(
|
600 |
-
"Parameters to Protocol[...] must all be unique")
|
601 |
-
else:
|
602 |
-
# Subscripting a regular Generic subclass.
|
603 |
-
_check_generic(cls, params)
|
604 |
-
return typing._GenericAlias(cls, params)
|
605 |
-
|
606 |
-
def __init_subclass__(cls, *args, **kwargs):
|
607 |
-
tvars = []
|
608 |
-
if '__orig_bases__' in cls.__dict__:
|
609 |
-
error = typing.Generic in cls.__orig_bases__
|
610 |
-
else:
|
611 |
-
error = typing.Generic in cls.__bases__
|
612 |
-
if error:
|
613 |
-
raise TypeError("Cannot inherit from plain Generic")
|
614 |
-
if '__orig_bases__' in cls.__dict__:
|
615 |
-
tvars = _collect_type_vars(cls.__orig_bases__)
|
616 |
-
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
|
617 |
-
# If found, tvars must be a subset of it.
|
618 |
-
# If not found, tvars is it.
|
619 |
-
# Also check for and reject plain Generic,
|
620 |
-
# and reject multiple Generic[...] and/or Protocol[...].
|
621 |
-
gvars = None
|
622 |
-
for base in cls.__orig_bases__:
|
623 |
-
if (isinstance(base, typing._GenericAlias) and
|
624 |
-
base.__origin__ in (typing.Generic, Protocol)):
|
625 |
-
# for error messages
|
626 |
-
the_base = base.__origin__.__name__
|
627 |
-
if gvars is not None:
|
628 |
-
raise TypeError(
|
629 |
-
"Cannot inherit from Generic[...]"
|
630 |
-
" and/or Protocol[...] multiple types.")
|
631 |
-
gvars = base.__parameters__
|
632 |
-
if gvars is None:
|
633 |
-
gvars = tvars
|
634 |
-
else:
|
635 |
-
tvarset = set(tvars)
|
636 |
-
gvarset = set(gvars)
|
637 |
-
if not tvarset <= gvarset:
|
638 |
-
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
|
639 |
-
s_args = ', '.join(str(g) for g in gvars)
|
640 |
-
raise TypeError(f"Some type variables ({s_vars}) are"
|
641 |
-
f" not listed in {the_base}[{s_args}]")
|
642 |
-
tvars = gvars
|
643 |
-
cls.__parameters__ = tuple(tvars)
|
644 |
-
|
645 |
-
# Determine if this is a protocol or a concrete subclass.
|
646 |
-
if not cls.__dict__.get('_is_protocol', None):
|
647 |
-
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
|
648 |
-
|
649 |
-
# Set (or override) the protocol subclass hook.
|
650 |
-
def _proto_hook(other):
|
651 |
-
if not cls.__dict__.get('_is_protocol', None):
|
652 |
-
return NotImplemented
|
653 |
-
if not getattr(cls, '_is_runtime_protocol', False):
|
654 |
-
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
|
655 |
-
return NotImplemented
|
656 |
-
raise TypeError("Instance and class checks can only be used with"
|
657 |
-
" @runtime protocols")
|
658 |
-
if not _is_callable_members_only(cls):
|
659 |
-
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
|
660 |
-
return NotImplemented
|
661 |
-
raise TypeError("Protocols with non-method members"
|
662 |
-
" don't support issubclass()")
|
663 |
-
if not isinstance(other, type):
|
664 |
-
# Same error as for issubclass(1, int)
|
665 |
-
raise TypeError('issubclass() arg 1 must be a class')
|
666 |
-
for attr in _get_protocol_attrs(cls):
|
667 |
-
for base in other.__mro__:
|
668 |
-
if attr in base.__dict__:
|
669 |
-
if base.__dict__[attr] is None:
|
670 |
-
return NotImplemented
|
671 |
-
break
|
672 |
-
annotations = getattr(base, '__annotations__', {})
|
673 |
-
if (isinstance(annotations, typing.Mapping) and
|
674 |
-
attr in annotations and
|
675 |
-
isinstance(other, _ProtocolMeta) and
|
676 |
-
other._is_protocol):
|
677 |
-
break
|
678 |
-
else:
|
679 |
-
return NotImplemented
|
680 |
-
return True
|
681 |
-
if '__subclasshook__' not in cls.__dict__:
|
682 |
-
cls.__subclasshook__ = _proto_hook
|
683 |
-
|
684 |
-
# We have nothing more to do for non-protocols.
|
685 |
-
if not cls._is_protocol:
|
686 |
-
return
|
687 |
-
|
688 |
-
# Check consistency of bases.
|
689 |
-
for base in cls.__bases__:
|
690 |
-
if not (base in (object, typing.Generic) or
|
691 |
-
base.__module__ == 'collections.abc' and
|
692 |
-
base.__name__ in _PROTO_WHITELIST or
|
693 |
-
isinstance(base, _ProtocolMeta) and base._is_protocol):
|
694 |
-
raise TypeError('Protocols can only inherit from other'
|
695 |
-
f' protocols, got {repr(base)}')
|
696 |
-
cls.__init__ = _no_init
|
697 |
-
# 3.6
|
698 |
-
else:
|
699 |
-
from typing import _next_in_mro, _type_check # noqa
|
700 |
-
|
701 |
-
def _no_init(self, *args, **kwargs):
|
702 |
-
if type(self)._is_protocol:
|
703 |
-
raise TypeError('Protocols cannot be instantiated')
|
704 |
-
|
705 |
-
class _ProtocolMeta(GenericMeta):
|
706 |
-
"""Internal metaclass for Protocol.
|
707 |
-
|
708 |
-
This exists so Protocol classes can be generic without deriving
|
709 |
-
from Generic.
|
710 |
-
"""
|
711 |
-
def __new__(cls, name, bases, namespace,
|
712 |
-
tvars=None, args=None, origin=None, extra=None, orig_bases=None):
|
713 |
-
# This is just a version copied from GenericMeta.__new__ that
|
714 |
-
# includes "Protocol" special treatment. (Comments removed for brevity.)
|
715 |
-
assert extra is None # Protocols should not have extra
|
716 |
-
if tvars is not None:
|
717 |
-
assert origin is not None
|
718 |
-
assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars
|
719 |
-
else:
|
720 |
-
tvars = _type_vars(bases)
|
721 |
-
gvars = None
|
722 |
-
for base in bases:
|
723 |
-
if base is typing.Generic:
|
724 |
-
raise TypeError("Cannot inherit from plain Generic")
|
725 |
-
if (isinstance(base, GenericMeta) and
|
726 |
-
base.__origin__ in (typing.Generic, Protocol)):
|
727 |
-
if gvars is not None:
|
728 |
-
raise TypeError(
|
729 |
-
"Cannot inherit from Generic[...] or"
|
730 |
-
" Protocol[...] multiple times.")
|
731 |
-
gvars = base.__parameters__
|
732 |
-
if gvars is None:
|
733 |
-
gvars = tvars
|
734 |
-
else:
|
735 |
-
tvarset = set(tvars)
|
736 |
-
gvarset = set(gvars)
|
737 |
-
if not tvarset <= gvarset:
|
738 |
-
s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
|
739 |
-
s_args = ", ".join(str(g) for g in gvars)
|
740 |
-
cls_name = "Generic" if any(b.__origin__ is typing.Generic
|
741 |
-
for b in bases) else "Protocol"
|
742 |
-
raise TypeError(f"Some type variables ({s_vars}) are"
|
743 |
-
f" not listed in {cls_name}[{s_args}]")
|
744 |
-
tvars = gvars
|
745 |
-
|
746 |
-
initial_bases = bases
|
747 |
-
if (extra is not None and type(extra) is abc.ABCMeta and
|
748 |
-
extra not in bases):
|
749 |
-
bases = (extra,) + bases
|
750 |
-
bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
|
751 |
-
for b in bases)
|
752 |
-
if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases):
|
753 |
-
bases = tuple(b for b in bases if b is not typing.Generic)
|
754 |
-
namespace.update({'__origin__': origin, '__extra__': extra})
|
755 |
-
self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
|
756 |
-
_root=True)
|
757 |
-
super(GenericMeta, self).__setattr__('_gorg',
|
758 |
-
self if not origin else
|
759 |
-
_gorg(origin))
|
760 |
-
self.__parameters__ = tvars
|
761 |
-
self.__args__ = tuple(... if a is typing._TypingEllipsis else
|
762 |
-
() if a is typing._TypingEmpty else
|
763 |
-
a for a in args) if args else None
|
764 |
-
self.__next_in_mro__ = _next_in_mro(self)
|
765 |
-
if orig_bases is None:
|
766 |
-
self.__orig_bases__ = initial_bases
|
767 |
-
elif origin is not None:
|
768 |
-
self._abc_registry = origin._abc_registry
|
769 |
-
self._abc_cache = origin._abc_cache
|
770 |
-
if hasattr(self, '_subs_tree'):
|
771 |
-
self.__tree_hash__ = (hash(self._subs_tree()) if origin else
|
772 |
-
super(GenericMeta, self).__hash__())
|
773 |
-
return self
|
774 |
-
|
775 |
-
def __init__(cls, *args, **kwargs):
|
776 |
-
super().__init__(*args, **kwargs)
|
777 |
-
if not cls.__dict__.get('_is_protocol', None):
|
778 |
-
cls._is_protocol = any(b is Protocol or
|
779 |
-
isinstance(b, _ProtocolMeta) and
|
780 |
-
b.__origin__ is Protocol
|
781 |
-
for b in cls.__bases__)
|
782 |
-
if cls._is_protocol:
|
783 |
-
for base in cls.__mro__[1:]:
|
784 |
-
if not (base in (object, typing.Generic) or
|
785 |
-
base.__module__ == 'collections.abc' and
|
786 |
-
base.__name__ in _PROTO_WHITELIST or
|
787 |
-
isinstance(base, typing.TypingMeta) and base._is_protocol or
|
788 |
-
isinstance(base, GenericMeta) and
|
789 |
-
base.__origin__ is typing.Generic):
|
790 |
-
raise TypeError(f'Protocols can only inherit from other'
|
791 |
-
f' protocols, got {repr(base)}')
|
792 |
-
|
793 |
-
cls.__init__ = _no_init
|
794 |
-
|
795 |
-
def _proto_hook(other):
|
796 |
-
if not cls.__dict__.get('_is_protocol', None):
|
797 |
-
return NotImplemented
|
798 |
-
if not isinstance(other, type):
|
799 |
-
# Same error as for issubclass(1, int)
|
800 |
-
raise TypeError('issubclass() arg 1 must be a class')
|
801 |
-
for attr in _get_protocol_attrs(cls):
|
802 |
-
for base in other.__mro__:
|
803 |
-
if attr in base.__dict__:
|
804 |
-
if base.__dict__[attr] is None:
|
805 |
-
return NotImplemented
|
806 |
-
break
|
807 |
-
annotations = getattr(base, '__annotations__', {})
|
808 |
-
if (isinstance(annotations, typing.Mapping) and
|
809 |
-
attr in annotations and
|
810 |
-
isinstance(other, _ProtocolMeta) and
|
811 |
-
other._is_protocol):
|
812 |
-
break
|
813 |
-
else:
|
814 |
-
return NotImplemented
|
815 |
-
return True
|
816 |
-
if '__subclasshook__' not in cls.__dict__:
|
817 |
-
cls.__subclasshook__ = _proto_hook
|
818 |
-
|
819 |
-
def __instancecheck__(self, instance):
|
820 |
-
# We need this method for situations where attributes are
|
821 |
-
# assigned in __init__.
|
822 |
-
if ((not getattr(self, '_is_protocol', False) or
|
823 |
-
_is_callable_members_only(self)) and
|
824 |
-
issubclass(instance.__class__, self)):
|
825 |
-
return True
|
826 |
-
if self._is_protocol:
|
827 |
-
if all(hasattr(instance, attr) and
|
828 |
-
(not callable(getattr(self, attr, None)) or
|
829 |
-
getattr(instance, attr) is not None)
|
830 |
-
for attr in _get_protocol_attrs(self)):
|
831 |
-
return True
|
832 |
-
return super(GenericMeta, self).__instancecheck__(instance)
|
833 |
-
|
834 |
-
def __subclasscheck__(self, cls):
|
835 |
-
if self.__origin__ is not None:
|
836 |
-
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
|
837 |
-
raise TypeError("Parameterized generics cannot be used with class "
|
838 |
-
"or instance checks")
|
839 |
-
return False
|
840 |
-
if (self.__dict__.get('_is_protocol', None) and
|
841 |
-
not self.__dict__.get('_is_runtime_protocol', None)):
|
842 |
-
if sys._getframe(1).f_globals['__name__'] in ['abc',
|
843 |
-
'functools',
|
844 |
-
'typing']:
|
845 |
-
return False
|
846 |
-
raise TypeError("Instance and class checks can only be used with"
|
847 |
-
" @runtime protocols")
|
848 |
-
if (self.__dict__.get('_is_runtime_protocol', None) and
|
849 |
-
not _is_callable_members_only(self)):
|
850 |
-
if sys._getframe(1).f_globals['__name__'] in ['abc',
|
851 |
-
'functools',
|
852 |
-
'typing']:
|
853 |
-
return super(GenericMeta, self).__subclasscheck__(cls)
|
854 |
-
raise TypeError("Protocols with non-method members"
|
855 |
-
" don't support issubclass()")
|
856 |
-
return super(GenericMeta, self).__subclasscheck__(cls)
|
857 |
-
|
858 |
-
@typing._tp_cache
|
859 |
-
def __getitem__(self, params):
|
860 |
-
# We also need to copy this from GenericMeta.__getitem__ to get
|
861 |
-
# special treatment of "Protocol". (Comments removed for brevity.)
|
862 |
-
if not isinstance(params, tuple):
|
863 |
-
params = (params,)
|
864 |
-
if not params and _gorg(self) is not typing.Tuple:
|
865 |
-
raise TypeError(
|
866 |
-
f"Parameter list to {self.__qualname__}[...] cannot be empty")
|
867 |
-
msg = "Parameters to generic types must be types."
|
868 |
-
params = tuple(_type_check(p, msg) for p in params)
|
869 |
-
if self in (typing.Generic, Protocol):
|
870 |
-
if not all(isinstance(p, typing.TypeVar) for p in params):
|
871 |
-
raise TypeError(
|
872 |
-
f"Parameters to {repr(self)}[...] must all be type variables")
|
873 |
-
if len(set(params)) != len(params):
|
874 |
-
raise TypeError(
|
875 |
-
f"Parameters to {repr(self)}[...] must all be unique")
|
876 |
-
tvars = params
|
877 |
-
args = params
|
878 |
-
elif self in (typing.Tuple, typing.Callable):
|
879 |
-
tvars = _type_vars(params)
|
880 |
-
args = params
|
881 |
-
elif self.__origin__ in (typing.Generic, Protocol):
|
882 |
-
raise TypeError(f"Cannot subscript already-subscripted {repr(self)}")
|
883 |
-
else:
|
884 |
-
_check_generic(self, params)
|
885 |
-
tvars = _type_vars(params)
|
886 |
-
args = params
|
887 |
-
|
888 |
-
prepend = (self,) if self.__origin__ is None else ()
|
889 |
-
return self.__class__(self.__name__,
|
890 |
-
prepend + self.__bases__,
|
891 |
-
_no_slots_copy(self.__dict__),
|
892 |
-
tvars=tvars,
|
893 |
-
args=args,
|
894 |
-
origin=self,
|
895 |
-
extra=self.__extra__,
|
896 |
-
orig_bases=self.__orig_bases__)
|
897 |
-
|
898 |
-
class Protocol(metaclass=_ProtocolMeta):
|
899 |
-
"""Base class for protocol classes. Protocol classes are defined as::
|
900 |
-
|
901 |
-
class Proto(Protocol):
|
902 |
-
def meth(self) -> int:
|
903 |
-
...
|
904 |
-
|
905 |
-
Such classes are primarily used with static type checkers that recognize
|
906 |
-
structural subtyping (static duck-typing), for example::
|
907 |
-
|
908 |
-
class C:
|
909 |
-
def meth(self) -> int:
|
910 |
-
return 0
|
911 |
-
|
912 |
-
def func(x: Proto) -> int:
|
913 |
-
return x.meth()
|
914 |
-
|
915 |
-
func(C()) # Passes static type check
|
916 |
-
|
917 |
-
See PEP 544 for details. Protocol classes decorated with
|
918 |
-
@typing_extensions.runtime act as simple-minded runtime protocol that checks
|
919 |
-
only the presence of given attributes, ignoring their type signatures.
|
920 |
-
|
921 |
-
Protocol classes can be generic, they are defined as::
|
922 |
-
|
923 |
-
class GenProto(Protocol[T]):
|
924 |
-
def meth(self) -> T:
|
925 |
-
...
|
926 |
-
"""
|
927 |
-
__slots__ = ()
|
928 |
-
_is_protocol = True
|
929 |
-
|
930 |
-
def __new__(cls, *args, **kwds):
|
931 |
-
if _gorg(cls) is Protocol:
|
932 |
-
raise TypeError("Type Protocol cannot be instantiated; "
|
933 |
-
"it can be used only as a base class")
|
934 |
-
return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds)
|
935 |
-
|
936 |
-
|
937 |
-
# 3.8+
|
938 |
-
if hasattr(typing, 'runtime_checkable'):
|
939 |
-
runtime_checkable = typing.runtime_checkable
|
940 |
-
# 3.6-3.7
|
941 |
-
else:
|
942 |
-
def runtime_checkable(cls):
|
943 |
-
"""Mark a protocol class as a runtime protocol, so that it
|
944 |
-
can be used with isinstance() and issubclass(). Raise TypeError
|
945 |
-
if applied to a non-protocol class.
|
946 |
-
|
947 |
-
This allows a simple-minded structural check very similar to the
|
948 |
-
one-offs in collections.abc such as Hashable.
|
949 |
-
"""
|
950 |
-
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
|
951 |
-
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
|
952 |
-
f' got {cls!r}')
|
953 |
-
cls._is_runtime_protocol = True
|
954 |
-
return cls
|
955 |
-
|
956 |
-
|
957 |
-
# Exists for backwards compatibility.
|
958 |
-
runtime = runtime_checkable
|
959 |
-
|
960 |
-
|
961 |
-
# 3.8+
|
962 |
-
if hasattr(typing, 'SupportsIndex'):
|
963 |
-
SupportsIndex = typing.SupportsIndex
|
964 |
-
# 3.6-3.7
|
965 |
-
else:
|
966 |
-
@runtime_checkable
|
967 |
-
class SupportsIndex(Protocol):
|
968 |
-
__slots__ = ()
|
969 |
-
|
970 |
-
@abc.abstractmethod
|
971 |
-
def __index__(self) -> int:
|
972 |
-
pass
|
973 |
-
|
974 |
-
|
975 |
-
if sys.version_info >= (3, 9, 2):
|
976 |
-
# The standard library TypedDict in Python 3.8 does not store runtime information
|
977 |
-
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
|
978 |
-
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
|
979 |
-
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
|
980 |
-
TypedDict = typing.TypedDict
|
981 |
-
else:
|
982 |
-
def _check_fails(cls, other):
|
983 |
-
try:
|
984 |
-
if sys._getframe(1).f_globals['__name__'] not in ['abc',
|
985 |
-
'functools',
|
986 |
-
'typing']:
|
987 |
-
# Typed dicts are only for static structural subtyping.
|
988 |
-
raise TypeError('TypedDict does not support instance and class checks')
|
989 |
-
except (AttributeError, ValueError):
|
990 |
-
pass
|
991 |
-
return False
|
992 |
-
|
993 |
-
def _dict_new(*args, **kwargs):
|
994 |
-
if not args:
|
995 |
-
raise TypeError('TypedDict.__new__(): not enough arguments')
|
996 |
-
_, args = args[0], args[1:] # allow the "cls" keyword be passed
|
997 |
-
return dict(*args, **kwargs)
|
998 |
-
|
999 |
-
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
|
1000 |
-
|
1001 |
-
def _typeddict_new(*args, total=True, **kwargs):
|
1002 |
-
if not args:
|
1003 |
-
raise TypeError('TypedDict.__new__(): not enough arguments')
|
1004 |
-
_, args = args[0], args[1:] # allow the "cls" keyword be passed
|
1005 |
-
if args:
|
1006 |
-
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
|
1007 |
-
elif '_typename' in kwargs:
|
1008 |
-
typename = kwargs.pop('_typename')
|
1009 |
-
import warnings
|
1010 |
-
warnings.warn("Passing '_typename' as keyword argument is deprecated",
|
1011 |
-
DeprecationWarning, stacklevel=2)
|
1012 |
-
else:
|
1013 |
-
raise TypeError("TypedDict.__new__() missing 1 required positional "
|
1014 |
-
"argument: '_typename'")
|
1015 |
-
if args:
|
1016 |
-
try:
|
1017 |
-
fields, = args # allow the "_fields" keyword be passed
|
1018 |
-
except ValueError:
|
1019 |
-
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
|
1020 |
-
f'positional arguments but {len(args) + 2} '
|
1021 |
-
'were given')
|
1022 |
-
elif '_fields' in kwargs and len(kwargs) == 1:
|
1023 |
-
fields = kwargs.pop('_fields')
|
1024 |
-
import warnings
|
1025 |
-
warnings.warn("Passing '_fields' as keyword argument is deprecated",
|
1026 |
-
DeprecationWarning, stacklevel=2)
|
1027 |
-
else:
|
1028 |
-
fields = None
|
1029 |
-
|
1030 |
-
if fields is None:
|
1031 |
-
fields = kwargs
|
1032 |
-
elif kwargs:
|
1033 |
-
raise TypeError("TypedDict takes either a dict or keyword arguments,"
|
1034 |
-
" but not both")
|
1035 |
-
|
1036 |
-
ns = {'__annotations__': dict(fields)}
|
1037 |
-
try:
|
1038 |
-
# Setting correct module is necessary to make typed dict classes pickleable.
|
1039 |
-
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1040 |
-
except (AttributeError, ValueError):
|
1041 |
-
pass
|
1042 |
-
|
1043 |
-
return _TypedDictMeta(typename, (), ns, total=total)
|
1044 |
-
|
1045 |
-
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
|
1046 |
-
' /, *, total=True, **kwargs)')
|
1047 |
-
|
1048 |
-
class _TypedDictMeta(type):
|
1049 |
-
def __init__(cls, name, bases, ns, total=True):
|
1050 |
-
super().__init__(name, bases, ns)
|
1051 |
-
|
1052 |
-
def __new__(cls, name, bases, ns, total=True):
|
1053 |
-
# Create new typed dict class object.
|
1054 |
-
# This method is called directly when TypedDict is subclassed,
|
1055 |
-
# or via _typeddict_new when TypedDict is instantiated. This way
|
1056 |
-
# TypedDict supports all three syntaxes described in its docstring.
|
1057 |
-
# Subclasses and instances of TypedDict return actual dictionaries
|
1058 |
-
# via _dict_new.
|
1059 |
-
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
|
1060 |
-
tp_dict = super().__new__(cls, name, (dict,), ns)
|
1061 |
-
|
1062 |
-
annotations = {}
|
1063 |
-
own_annotations = ns.get('__annotations__', {})
|
1064 |
-
own_annotation_keys = set(own_annotations.keys())
|
1065 |
-
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
|
1066 |
-
own_annotations = {
|
1067 |
-
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
|
1068 |
-
}
|
1069 |
-
required_keys = set()
|
1070 |
-
optional_keys = set()
|
1071 |
-
|
1072 |
-
for base in bases:
|
1073 |
-
annotations.update(base.__dict__.get('__annotations__', {}))
|
1074 |
-
required_keys.update(base.__dict__.get('__required_keys__', ()))
|
1075 |
-
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
|
1076 |
-
|
1077 |
-
annotations.update(own_annotations)
|
1078 |
-
if total:
|
1079 |
-
required_keys.update(own_annotation_keys)
|
1080 |
-
else:
|
1081 |
-
optional_keys.update(own_annotation_keys)
|
1082 |
-
|
1083 |
-
tp_dict.__annotations__ = annotations
|
1084 |
-
tp_dict.__required_keys__ = frozenset(required_keys)
|
1085 |
-
tp_dict.__optional_keys__ = frozenset(optional_keys)
|
1086 |
-
if not hasattr(tp_dict, '__total__'):
|
1087 |
-
tp_dict.__total__ = total
|
1088 |
-
return tp_dict
|
1089 |
-
|
1090 |
-
__instancecheck__ = __subclasscheck__ = _check_fails
|
1091 |
-
|
1092 |
-
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
|
1093 |
-
TypedDict.__module__ = __name__
|
1094 |
-
TypedDict.__doc__ = \
|
1095 |
-
"""A simple typed name space. At runtime it is equivalent to a plain dict.
|
1096 |
-
|
1097 |
-
TypedDict creates a dictionary type that expects all of its
|
1098 |
-
instances to have a certain set of keys, with each key
|
1099 |
-
associated with a value of a consistent type. This expectation
|
1100 |
-
is not checked at runtime but is only enforced by type checkers.
|
1101 |
-
Usage::
|
1102 |
-
|
1103 |
-
class Point2D(TypedDict):
|
1104 |
-
x: int
|
1105 |
-
y: int
|
1106 |
-
label: str
|
1107 |
-
|
1108 |
-
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
|
1109 |
-
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
|
1110 |
-
|
1111 |
-
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
|
1112 |
-
|
1113 |
-
The type info can be accessed via the Point2D.__annotations__ dict, and
|
1114 |
-
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
|
1115 |
-
TypedDict supports two additional equivalent forms::
|
1116 |
-
|
1117 |
-
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
|
1118 |
-
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
|
1119 |
-
|
1120 |
-
The class syntax is only supported in Python 3.6+, while two other
|
1121 |
-
syntax forms work for Python 2.7 and 3.2+
|
1122 |
-
"""
|
1123 |
-
|
1124 |
-
|
1125 |
-
# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
|
1126 |
-
if hasattr(typing, 'Annotated'):
|
1127 |
-
Annotated = typing.Annotated
|
1128 |
-
get_type_hints = typing.get_type_hints
|
1129 |
-
# Not exported and not a public API, but needed for get_origin() and get_args()
|
1130 |
-
# to work.
|
1131 |
-
_AnnotatedAlias = typing._AnnotatedAlias
|
1132 |
-
# 3.7-3.8
|
1133 |
-
elif PEP_560:
|
1134 |
-
class _AnnotatedAlias(typing._GenericAlias, _root=True):
|
1135 |
-
"""Runtime representation of an annotated type.
|
1136 |
-
|
1137 |
-
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
|
1138 |
-
with extra annotations. The alias behaves like a normal typing alias,
|
1139 |
-
instantiating is the same as instantiating the underlying type, binding
|
1140 |
-
it to types is also the same.
|
1141 |
-
"""
|
1142 |
-
def __init__(self, origin, metadata):
|
1143 |
-
if isinstance(origin, _AnnotatedAlias):
|
1144 |
-
metadata = origin.__metadata__ + metadata
|
1145 |
-
origin = origin.__origin__
|
1146 |
-
super().__init__(origin, origin)
|
1147 |
-
self.__metadata__ = metadata
|
1148 |
-
|
1149 |
-
def copy_with(self, params):
|
1150 |
-
assert len(params) == 1
|
1151 |
-
new_type = params[0]
|
1152 |
-
return _AnnotatedAlias(new_type, self.__metadata__)
|
1153 |
-
|
1154 |
-
def __repr__(self):
|
1155 |
-
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
|
1156 |
-
f"{', '.join(repr(a) for a in self.__metadata__)}]")
|
1157 |
-
|
1158 |
-
def __reduce__(self):
|
1159 |
-
return operator.getitem, (
|
1160 |
-
Annotated, (self.__origin__,) + self.__metadata__
|
1161 |
-
)
|
1162 |
-
|
1163 |
-
def __eq__(self, other):
|
1164 |
-
if not isinstance(other, _AnnotatedAlias):
|
1165 |
-
return NotImplemented
|
1166 |
-
if self.__origin__ != other.__origin__:
|
1167 |
-
return False
|
1168 |
-
return self.__metadata__ == other.__metadata__
|
1169 |
-
|
1170 |
-
def __hash__(self):
|
1171 |
-
return hash((self.__origin__, self.__metadata__))
|
1172 |
-
|
1173 |
-
class Annotated:
|
1174 |
-
"""Add context specific metadata to a type.
|
1175 |
-
|
1176 |
-
Example: Annotated[int, runtime_check.Unsigned] indicates to the
|
1177 |
-
hypothetical runtime_check module that this type is an unsigned int.
|
1178 |
-
Every other consumer of this type can ignore this metadata and treat
|
1179 |
-
this type as int.
|
1180 |
-
|
1181 |
-
The first argument to Annotated must be a valid type (and will be in
|
1182 |
-
the __origin__ field), the remaining arguments are kept as a tuple in
|
1183 |
-
the __extra__ field.
|
1184 |
-
|
1185 |
-
Details:
|
1186 |
-
|
1187 |
-
- It's an error to call `Annotated` with less than two arguments.
|
1188 |
-
- Nested Annotated are flattened::
|
1189 |
-
|
1190 |
-
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
|
1191 |
-
|
1192 |
-
- Instantiating an annotated type is equivalent to instantiating the
|
1193 |
-
underlying type::
|
1194 |
-
|
1195 |
-
Annotated[C, Ann1](5) == C(5)
|
1196 |
-
|
1197 |
-
- Annotated can be used as a generic type alias::
|
1198 |
-
|
1199 |
-
Optimized = Annotated[T, runtime.Optimize()]
|
1200 |
-
Optimized[int] == Annotated[int, runtime.Optimize()]
|
1201 |
-
|
1202 |
-
OptimizedList = Annotated[List[T], runtime.Optimize()]
|
1203 |
-
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
|
1204 |
-
"""
|
1205 |
-
|
1206 |
-
__slots__ = ()
|
1207 |
-
|
1208 |
-
def __new__(cls, *args, **kwargs):
|
1209 |
-
raise TypeError("Type Annotated cannot be instantiated.")
|
1210 |
-
|
1211 |
-
@typing._tp_cache
|
1212 |
-
def __class_getitem__(cls, params):
|
1213 |
-
if not isinstance(params, tuple) or len(params) < 2:
|
1214 |
-
raise TypeError("Annotated[...] should be used "
|
1215 |
-
"with at least two arguments (a type and an "
|
1216 |
-
"annotation).")
|
1217 |
-
msg = "Annotated[t, ...]: t must be a type."
|
1218 |
-
origin = typing._type_check(params[0], msg)
|
1219 |
-
metadata = tuple(params[1:])
|
1220 |
-
return _AnnotatedAlias(origin, metadata)
|
1221 |
-
|
1222 |
-
def __init_subclass__(cls, *args, **kwargs):
|
1223 |
-
raise TypeError(
|
1224 |
-
f"Cannot subclass {cls.__module__}.Annotated"
|
1225 |
-
)
|
1226 |
-
|
1227 |
-
def _strip_annotations(t):
|
1228 |
-
"""Strips the annotations from a given type.
|
1229 |
-
"""
|
1230 |
-
if isinstance(t, _AnnotatedAlias):
|
1231 |
-
return _strip_annotations(t.__origin__)
|
1232 |
-
if isinstance(t, typing._GenericAlias):
|
1233 |
-
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
|
1234 |
-
if stripped_args == t.__args__:
|
1235 |
-
return t
|
1236 |
-
res = t.copy_with(stripped_args)
|
1237 |
-
res._special = t._special
|
1238 |
-
return res
|
1239 |
-
return t
|
1240 |
-
|
1241 |
-
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
|
1242 |
-
"""Return type hints for an object.
|
1243 |
-
|
1244 |
-
This is often the same as obj.__annotations__, but it handles
|
1245 |
-
forward references encoded as string literals, adds Optional[t] if a
|
1246 |
-
default value equal to None is set and recursively replaces all
|
1247 |
-
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
|
1248 |
-
|
1249 |
-
The argument may be a module, class, method, or function. The annotations
|
1250 |
-
are returned as a dictionary. For classes, annotations include also
|
1251 |
-
inherited members.
|
1252 |
-
|
1253 |
-
TypeError is raised if the argument is not of a type that can contain
|
1254 |
-
annotations, and an empty dictionary is returned if no annotations are
|
1255 |
-
present.
|
1256 |
-
|
1257 |
-
BEWARE -- the behavior of globalns and localns is counterintuitive
|
1258 |
-
(unless you are familiar with how eval() and exec() work). The
|
1259 |
-
search order is locals first, then globals.
|
1260 |
-
|
1261 |
-
- If no dict arguments are passed, an attempt is made to use the
|
1262 |
-
globals from obj (or the respective module's globals for classes),
|
1263 |
-
and these are also used as the locals. If the object does not appear
|
1264 |
-
to have globals, an empty dictionary is used.
|
1265 |
-
|
1266 |
-
- If one dict argument is passed, it is used for both globals and
|
1267 |
-
locals.
|
1268 |
-
|
1269 |
-
- If two dict arguments are passed, they specify globals and
|
1270 |
-
locals, respectively.
|
1271 |
-
"""
|
1272 |
-
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
|
1273 |
-
if include_extras:
|
1274 |
-
return hint
|
1275 |
-
return {k: _strip_annotations(t) for k, t in hint.items()}
|
1276 |
-
# 3.6
|
1277 |
-
else:
|
1278 |
-
|
1279 |
-
def _is_dunder(name):
|
1280 |
-
"""Returns True if name is a __dunder_variable_name__."""
|
1281 |
-
return len(name) > 4 and name.startswith('__') and name.endswith('__')
|
1282 |
-
|
1283 |
-
# Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
|
1284 |
-
# checks, argument expansion etc. are done on the _subs_tre. As a result we
|
1285 |
-
# can't provide a get_type_hints function that strips out annotations.
|
1286 |
-
|
1287 |
-
class AnnotatedMeta(typing.GenericMeta):
|
1288 |
-
"""Metaclass for Annotated"""
|
1289 |
-
|
1290 |
-
def __new__(cls, name, bases, namespace, **kwargs):
|
1291 |
-
if any(b is not object for b in bases):
|
1292 |
-
raise TypeError("Cannot subclass " + str(Annotated))
|
1293 |
-
return super().__new__(cls, name, bases, namespace, **kwargs)
|
1294 |
-
|
1295 |
-
@property
|
1296 |
-
def __metadata__(self):
|
1297 |
-
return self._subs_tree()[2]
|
1298 |
-
|
1299 |
-
def _tree_repr(self, tree):
|
1300 |
-
cls, origin, metadata = tree
|
1301 |
-
if not isinstance(origin, tuple):
|
1302 |
-
tp_repr = typing._type_repr(origin)
|
1303 |
-
else:
|
1304 |
-
tp_repr = origin[0]._tree_repr(origin)
|
1305 |
-
metadata_reprs = ", ".join(repr(arg) for arg in metadata)
|
1306 |
-
return f'{cls}[{tp_repr}, {metadata_reprs}]'
|
1307 |
-
|
1308 |
-
def _subs_tree(self, tvars=None, args=None): # noqa
|
1309 |
-
if self is Annotated:
|
1310 |
-
return Annotated
|
1311 |
-
res = super()._subs_tree(tvars=tvars, args=args)
|
1312 |
-
# Flatten nested Annotated
|
1313 |
-
if isinstance(res[1], tuple) and res[1][0] is Annotated:
|
1314 |
-
sub_tp = res[1][1]
|
1315 |
-
sub_annot = res[1][2]
|
1316 |
-
return (Annotated, sub_tp, sub_annot + res[2])
|
1317 |
-
return res
|
1318 |
-
|
1319 |
-
def _get_cons(self):
|
1320 |
-
"""Return the class used to create instance of this type."""
|
1321 |
-
if self.__origin__ is None:
|
1322 |
-
raise TypeError("Cannot get the underlying type of a "
|
1323 |
-
"non-specialized Annotated type.")
|
1324 |
-
tree = self._subs_tree()
|
1325 |
-
while isinstance(tree, tuple) and tree[0] is Annotated:
|
1326 |
-
tree = tree[1]
|
1327 |
-
if isinstance(tree, tuple):
|
1328 |
-
return tree[0]
|
1329 |
-
else:
|
1330 |
-
return tree
|
1331 |
-
|
1332 |
-
@typing._tp_cache
|
1333 |
-
def __getitem__(self, params):
|
1334 |
-
if not isinstance(params, tuple):
|
1335 |
-
params = (params,)
|
1336 |
-
if self.__origin__ is not None: # specializing an instantiated type
|
1337 |
-
return super().__getitem__(params)
|
1338 |
-
elif not isinstance(params, tuple) or len(params) < 2:
|
1339 |
-
raise TypeError("Annotated[...] should be instantiated "
|
1340 |
-
"with at least two arguments (a type and an "
|
1341 |
-
"annotation).")
|
1342 |
-
else:
|
1343 |
-
msg = "Annotated[t, ...]: t must be a type."
|
1344 |
-
tp = typing._type_check(params[0], msg)
|
1345 |
-
metadata = tuple(params[1:])
|
1346 |
-
return self.__class__(
|
1347 |
-
self.__name__,
|
1348 |
-
self.__bases__,
|
1349 |
-
_no_slots_copy(self.__dict__),
|
1350 |
-
tvars=_type_vars((tp,)),
|
1351 |
-
# Metadata is a tuple so it won't be touched by _replace_args et al.
|
1352 |
-
args=(tp, metadata),
|
1353 |
-
origin=self,
|
1354 |
-
)
|
1355 |
-
|
1356 |
-
def __call__(self, *args, **kwargs):
|
1357 |
-
cons = self._get_cons()
|
1358 |
-
result = cons(*args, **kwargs)
|
1359 |
-
try:
|
1360 |
-
result.__orig_class__ = self
|
1361 |
-
except AttributeError:
|
1362 |
-
pass
|
1363 |
-
return result
|
1364 |
-
|
1365 |
-
def __getattr__(self, attr):
|
1366 |
-
# For simplicity we just don't relay all dunder names
|
1367 |
-
if self.__origin__ is not None and not _is_dunder(attr):
|
1368 |
-
return getattr(self._get_cons(), attr)
|
1369 |
-
raise AttributeError(attr)
|
1370 |
-
|
1371 |
-
def __setattr__(self, attr, value):
|
1372 |
-
if _is_dunder(attr) or attr.startswith('_abc_'):
|
1373 |
-
super().__setattr__(attr, value)
|
1374 |
-
elif self.__origin__ is None:
|
1375 |
-
raise AttributeError(attr)
|
1376 |
-
else:
|
1377 |
-
setattr(self._get_cons(), attr, value)
|
1378 |
-
|
1379 |
-
def __instancecheck__(self, obj):
|
1380 |
-
raise TypeError("Annotated cannot be used with isinstance().")
|
1381 |
-
|
1382 |
-
def __subclasscheck__(self, cls):
|
1383 |
-
raise TypeError("Annotated cannot be used with issubclass().")
|
1384 |
-
|
1385 |
-
class Annotated(metaclass=AnnotatedMeta):
|
1386 |
-
"""Add context specific metadata to a type.
|
1387 |
-
|
1388 |
-
Example: Annotated[int, runtime_check.Unsigned] indicates to the
|
1389 |
-
hypothetical runtime_check module that this type is an unsigned int.
|
1390 |
-
Every other consumer of this type can ignore this metadata and treat
|
1391 |
-
this type as int.
|
1392 |
-
|
1393 |
-
The first argument to Annotated must be a valid type, the remaining
|
1394 |
-
arguments are kept as a tuple in the __metadata__ field.
|
1395 |
-
|
1396 |
-
Details:
|
1397 |
-
|
1398 |
-
- It's an error to call `Annotated` with less than two arguments.
|
1399 |
-
- Nested Annotated are flattened::
|
1400 |
-
|
1401 |
-
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
|
1402 |
-
|
1403 |
-
- Instantiating an annotated type is equivalent to instantiating the
|
1404 |
-
underlying type::
|
1405 |
-
|
1406 |
-
Annotated[C, Ann1](5) == C(5)
|
1407 |
-
|
1408 |
-
- Annotated can be used as a generic type alias::
|
1409 |
-
|
1410 |
-
Optimized = Annotated[T, runtime.Optimize()]
|
1411 |
-
Optimized[int] == Annotated[int, runtime.Optimize()]
|
1412 |
-
|
1413 |
-
OptimizedList = Annotated[List[T], runtime.Optimize()]
|
1414 |
-
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
|
1415 |
-
"""
|
1416 |
-
|
1417 |
-
# Python 3.8 has get_origin() and get_args() but those implementations aren't
|
1418 |
-
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
|
1419 |
-
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
|
1420 |
-
if sys.version_info[:2] >= (3, 10):
|
1421 |
-
get_origin = typing.get_origin
|
1422 |
-
get_args = typing.get_args
|
1423 |
-
# 3.7-3.9
|
1424 |
-
elif PEP_560:
|
1425 |
-
try:
|
1426 |
-
# 3.9+
|
1427 |
-
from typing import _BaseGenericAlias
|
1428 |
-
except ImportError:
|
1429 |
-
_BaseGenericAlias = typing._GenericAlias
|
1430 |
-
try:
|
1431 |
-
# 3.9+
|
1432 |
-
from typing import GenericAlias
|
1433 |
-
except ImportError:
|
1434 |
-
GenericAlias = typing._GenericAlias
|
1435 |
-
|
1436 |
-
def get_origin(tp):
|
1437 |
-
"""Get the unsubscripted version of a type.
|
1438 |
-
|
1439 |
-
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
|
1440 |
-
and Annotated. Return None for unsupported types. Examples::
|
1441 |
-
|
1442 |
-
get_origin(Literal[42]) is Literal
|
1443 |
-
get_origin(int) is None
|
1444 |
-
get_origin(ClassVar[int]) is ClassVar
|
1445 |
-
get_origin(Generic) is Generic
|
1446 |
-
get_origin(Generic[T]) is Generic
|
1447 |
-
get_origin(Union[T, int]) is Union
|
1448 |
-
get_origin(List[Tuple[T, T]][int]) == list
|
1449 |
-
get_origin(P.args) is P
|
1450 |
-
"""
|
1451 |
-
if isinstance(tp, _AnnotatedAlias):
|
1452 |
-
return Annotated
|
1453 |
-
if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias,
|
1454 |
-
ParamSpecArgs, ParamSpecKwargs)):
|
1455 |
-
return tp.__origin__
|
1456 |
-
if tp is typing.Generic:
|
1457 |
-
return typing.Generic
|
1458 |
-
return None
|
1459 |
-
|
1460 |
-
def get_args(tp):
|
1461 |
-
"""Get type arguments with all substitutions performed.
|
1462 |
-
|
1463 |
-
For unions, basic simplifications used by Union constructor are performed.
|
1464 |
-
Examples::
|
1465 |
-
get_args(Dict[str, int]) == (str, int)
|
1466 |
-
get_args(int) == ()
|
1467 |
-
get_args(Union[int, Union[T, int], str][int]) == (int, str)
|
1468 |
-
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
|
1469 |
-
get_args(Callable[[], T][int]) == ([], int)
|
1470 |
-
"""
|
1471 |
-
if isinstance(tp, _AnnotatedAlias):
|
1472 |
-
return (tp.__origin__,) + tp.__metadata__
|
1473 |
-
if isinstance(tp, (typing._GenericAlias, GenericAlias)):
|
1474 |
-
if getattr(tp, "_special", False):
|
1475 |
-
return ()
|
1476 |
-
res = tp.__args__
|
1477 |
-
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
|
1478 |
-
res = (list(res[:-1]), res[-1])
|
1479 |
-
return res
|
1480 |
-
return ()
|
1481 |
-
|
1482 |
-
|
1483 |
-
# 3.10+
|
1484 |
-
if hasattr(typing, 'TypeAlias'):
|
1485 |
-
TypeAlias = typing.TypeAlias
|
1486 |
-
# 3.9
|
1487 |
-
elif sys.version_info[:2] >= (3, 9):
|
1488 |
-
class _TypeAliasForm(typing._SpecialForm, _root=True):
|
1489 |
-
def __repr__(self):
|
1490 |
-
return 'typing_extensions.' + self._name
|
1491 |
-
|
1492 |
-
@_TypeAliasForm
|
1493 |
-
def TypeAlias(self, parameters):
|
1494 |
-
"""Special marker indicating that an assignment should
|
1495 |
-
be recognized as a proper type alias definition by type
|
1496 |
-
checkers.
|
1497 |
-
|
1498 |
-
For example::
|
1499 |
-
|
1500 |
-
Predicate: TypeAlias = Callable[..., bool]
|
1501 |
-
|
1502 |
-
It's invalid when used anywhere except as in the example above.
|
1503 |
-
"""
|
1504 |
-
raise TypeError(f"{self} is not subscriptable")
|
1505 |
-
# 3.7-3.8
|
1506 |
-
elif sys.version_info[:2] >= (3, 7):
|
1507 |
-
class _TypeAliasForm(typing._SpecialForm, _root=True):
|
1508 |
-
def __repr__(self):
|
1509 |
-
return 'typing_extensions.' + self._name
|
1510 |
-
|
1511 |
-
TypeAlias = _TypeAliasForm('TypeAlias',
|
1512 |
-
doc="""Special marker indicating that an assignment should
|
1513 |
-
be recognized as a proper type alias definition by type
|
1514 |
-
checkers.
|
1515 |
-
|
1516 |
-
For example::
|
1517 |
-
|
1518 |
-
Predicate: TypeAlias = Callable[..., bool]
|
1519 |
-
|
1520 |
-
It's invalid when used anywhere except as in the example
|
1521 |
-
above.""")
|
1522 |
-
# 3.6
|
1523 |
-
else:
|
1524 |
-
class _TypeAliasMeta(typing.TypingMeta):
|
1525 |
-
"""Metaclass for TypeAlias"""
|
1526 |
-
|
1527 |
-
def __repr__(self):
|
1528 |
-
return 'typing_extensions.TypeAlias'
|
1529 |
-
|
1530 |
-
class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
|
1531 |
-
"""Special marker indicating that an assignment should
|
1532 |
-
be recognized as a proper type alias definition by type
|
1533 |
-
checkers.
|
1534 |
-
|
1535 |
-
For example::
|
1536 |
-
|
1537 |
-
Predicate: TypeAlias = Callable[..., bool]
|
1538 |
-
|
1539 |
-
It's invalid when used anywhere except as in the example above.
|
1540 |
-
"""
|
1541 |
-
__slots__ = ()
|
1542 |
-
|
1543 |
-
def __instancecheck__(self, obj):
|
1544 |
-
raise TypeError("TypeAlias cannot be used with isinstance().")
|
1545 |
-
|
1546 |
-
def __subclasscheck__(self, cls):
|
1547 |
-
raise TypeError("TypeAlias cannot be used with issubclass().")
|
1548 |
-
|
1549 |
-
def __repr__(self):
|
1550 |
-
return 'typing_extensions.TypeAlias'
|
1551 |
-
|
1552 |
-
TypeAlias = _TypeAliasBase(_root=True)
|
1553 |
-
|
1554 |
-
|
1555 |
-
# Python 3.10+ has PEP 612
|
1556 |
-
if hasattr(typing, 'ParamSpecArgs'):
|
1557 |
-
ParamSpecArgs = typing.ParamSpecArgs
|
1558 |
-
ParamSpecKwargs = typing.ParamSpecKwargs
|
1559 |
-
# 3.6-3.9
|
1560 |
-
else:
|
1561 |
-
class _Immutable:
|
1562 |
-
"""Mixin to indicate that object should not be copied."""
|
1563 |
-
__slots__ = ()
|
1564 |
-
|
1565 |
-
def __copy__(self):
|
1566 |
-
return self
|
1567 |
-
|
1568 |
-
def __deepcopy__(self, memo):
|
1569 |
-
return self
|
1570 |
-
|
1571 |
-
class ParamSpecArgs(_Immutable):
|
1572 |
-
"""The args for a ParamSpec object.
|
1573 |
-
|
1574 |
-
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
|
1575 |
-
|
1576 |
-
ParamSpecArgs objects have a reference back to their ParamSpec:
|
1577 |
-
|
1578 |
-
P.args.__origin__ is P
|
1579 |
-
|
1580 |
-
This type is meant for runtime introspection and has no special meaning to
|
1581 |
-
static type checkers.
|
1582 |
-
"""
|
1583 |
-
def __init__(self, origin):
|
1584 |
-
self.__origin__ = origin
|
1585 |
-
|
1586 |
-
def __repr__(self):
|
1587 |
-
return f"{self.__origin__.__name__}.args"
|
1588 |
-
|
1589 |
-
class ParamSpecKwargs(_Immutable):
|
1590 |
-
"""The kwargs for a ParamSpec object.
|
1591 |
-
|
1592 |
-
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
|
1593 |
-
|
1594 |
-
ParamSpecKwargs objects have a reference back to their ParamSpec:
|
1595 |
-
|
1596 |
-
P.kwargs.__origin__ is P
|
1597 |
-
|
1598 |
-
This type is meant for runtime introspection and has no special meaning to
|
1599 |
-
static type checkers.
|
1600 |
-
"""
|
1601 |
-
def __init__(self, origin):
|
1602 |
-
self.__origin__ = origin
|
1603 |
-
|
1604 |
-
def __repr__(self):
|
1605 |
-
return f"{self.__origin__.__name__}.kwargs"
|
1606 |
-
|
1607 |
-
# 3.10+
|
1608 |
-
if hasattr(typing, 'ParamSpec'):
|
1609 |
-
ParamSpec = typing.ParamSpec
|
1610 |
-
# 3.6-3.9
|
1611 |
-
else:
|
1612 |
-
|
1613 |
-
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
|
1614 |
-
class ParamSpec(list):
|
1615 |
-
"""Parameter specification variable.
|
1616 |
-
|
1617 |
-
Usage::
|
1618 |
-
|
1619 |
-
P = ParamSpec('P')
|
1620 |
-
|
1621 |
-
Parameter specification variables exist primarily for the benefit of static
|
1622 |
-
type checkers. They are used to forward the parameter types of one
|
1623 |
-
callable to another callable, a pattern commonly found in higher order
|
1624 |
-
functions and decorators. They are only valid when used in ``Concatenate``,
|
1625 |
-
or s the first argument to ``Callable``. In Python 3.10 and higher,
|
1626 |
-
they are also supported in user-defined Generics at runtime.
|
1627 |
-
See class Generic for more information on generic types. An
|
1628 |
-
example for annotating a decorator::
|
1629 |
-
|
1630 |
-
T = TypeVar('T')
|
1631 |
-
P = ParamSpec('P')
|
1632 |
-
|
1633 |
-
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
|
1634 |
-
'''A type-safe decorator to add logging to a function.'''
|
1635 |
-
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
|
1636 |
-
logging.info(f'{f.__name__} was called')
|
1637 |
-
return f(*args, **kwargs)
|
1638 |
-
return inner
|
1639 |
-
|
1640 |
-
@add_logging
|
1641 |
-
def add_two(x: float, y: float) -> float:
|
1642 |
-
'''Add two numbers together.'''
|
1643 |
-
return x + y
|
1644 |
-
|
1645 |
-
Parameter specification variables defined with covariant=True or
|
1646 |
-
contravariant=True can be used to declare covariant or contravariant
|
1647 |
-
generic types. These keyword arguments are valid, but their actual semantics
|
1648 |
-
are yet to be decided. See PEP 612 for details.
|
1649 |
-
|
1650 |
-
Parameter specification variables can be introspected. e.g.:
|
1651 |
-
|
1652 |
-
P.__name__ == 'T'
|
1653 |
-
P.__bound__ == None
|
1654 |
-
P.__covariant__ == False
|
1655 |
-
P.__contravariant__ == False
|
1656 |
-
|
1657 |
-
Note that only parameter specification variables defined in global scope can
|
1658 |
-
be pickled.
|
1659 |
-
"""
|
1660 |
-
|
1661 |
-
# Trick Generic __parameters__.
|
1662 |
-
__class__ = typing.TypeVar
|
1663 |
-
|
1664 |
-
@property
|
1665 |
-
def args(self):
|
1666 |
-
return ParamSpecArgs(self)
|
1667 |
-
|
1668 |
-
@property
|
1669 |
-
def kwargs(self):
|
1670 |
-
return ParamSpecKwargs(self)
|
1671 |
-
|
1672 |
-
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
|
1673 |
-
super().__init__([self])
|
1674 |
-
self.__name__ = name
|
1675 |
-
self.__covariant__ = bool(covariant)
|
1676 |
-
self.__contravariant__ = bool(contravariant)
|
1677 |
-
if bound:
|
1678 |
-
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
|
1679 |
-
else:
|
1680 |
-
self.__bound__ = None
|
1681 |
-
|
1682 |
-
# for pickling:
|
1683 |
-
try:
|
1684 |
-
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
|
1685 |
-
except (AttributeError, ValueError):
|
1686 |
-
def_mod = None
|
1687 |
-
if def_mod != 'typing_extensions':
|
1688 |
-
self.__module__ = def_mod
|
1689 |
-
|
1690 |
-
def __repr__(self):
|
1691 |
-
if self.__covariant__:
|
1692 |
-
prefix = '+'
|
1693 |
-
elif self.__contravariant__:
|
1694 |
-
prefix = '-'
|
1695 |
-
else:
|
1696 |
-
prefix = '~'
|
1697 |
-
return prefix + self.__name__
|
1698 |
-
|
1699 |
-
def __hash__(self):
|
1700 |
-
return object.__hash__(self)
|
1701 |
-
|
1702 |
-
def __eq__(self, other):
|
1703 |
-
return self is other
|
1704 |
-
|
1705 |
-
def __reduce__(self):
|
1706 |
-
return self.__name__
|
1707 |
-
|
1708 |
-
# Hack to get typing._type_check to pass.
|
1709 |
-
def __call__(self, *args, **kwargs):
|
1710 |
-
pass
|
1711 |
-
|
1712 |
-
if not PEP_560:
|
1713 |
-
# Only needed in 3.6.
|
1714 |
-
def _get_type_vars(self, tvars):
|
1715 |
-
if self not in tvars:
|
1716 |
-
tvars.append(self)
|
1717 |
-
|
1718 |
-
|
1719 |
-
# 3.6-3.9
|
1720 |
-
if not hasattr(typing, 'Concatenate'):
|
1721 |
-
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
|
1722 |
-
class _ConcatenateGenericAlias(list):
|
1723 |
-
|
1724 |
-
# Trick Generic into looking into this for __parameters__.
|
1725 |
-
if PEP_560:
|
1726 |
-
__class__ = typing._GenericAlias
|
1727 |
-
else:
|
1728 |
-
__class__ = typing._TypingBase
|
1729 |
-
|
1730 |
-
# Flag in 3.8.
|
1731 |
-
_special = False
|
1732 |
-
# Attribute in 3.6 and earlier.
|
1733 |
-
_gorg = typing.Generic
|
1734 |
-
|
1735 |
-
def __init__(self, origin, args):
|
1736 |
-
super().__init__(args)
|
1737 |
-
self.__origin__ = origin
|
1738 |
-
self.__args__ = args
|
1739 |
-
|
1740 |
-
def __repr__(self):
|
1741 |
-
_type_repr = typing._type_repr
|
1742 |
-
return (f'{_type_repr(self.__origin__)}'
|
1743 |
-
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
|
1744 |
-
|
1745 |
-
def __hash__(self):
|
1746 |
-
return hash((self.__origin__, self.__args__))
|
1747 |
-
|
1748 |
-
# Hack to get typing._type_check to pass in Generic.
|
1749 |
-
def __call__(self, *args, **kwargs):
|
1750 |
-
pass
|
1751 |
-
|
1752 |
-
@property
|
1753 |
-
def __parameters__(self):
|
1754 |
-
return tuple(
|
1755 |
-
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
|
1756 |
-
)
|
1757 |
-
|
1758 |
-
if not PEP_560:
|
1759 |
-
# Only required in 3.6.
|
1760 |
-
def _get_type_vars(self, tvars):
|
1761 |
-
if self.__origin__ and self.__parameters__:
|
1762 |
-
typing._get_type_vars(self.__parameters__, tvars)
|
1763 |
-
|
1764 |
-
|
1765 |
-
# 3.6-3.9
|
1766 |
-
@typing._tp_cache
|
1767 |
-
def _concatenate_getitem(self, parameters):
|
1768 |
-
if parameters == ():
|
1769 |
-
raise TypeError("Cannot take a Concatenate of no types.")
|
1770 |
-
if not isinstance(parameters, tuple):
|
1771 |
-
parameters = (parameters,)
|
1772 |
-
if not isinstance(parameters[-1], ParamSpec):
|
1773 |
-
raise TypeError("The last parameter to Concatenate should be a "
|
1774 |
-
"ParamSpec variable.")
|
1775 |
-
msg = "Concatenate[arg, ...]: each arg must be a type."
|
1776 |
-
parameters = tuple(typing._type_check(p, msg) for p in parameters)
|
1777 |
-
return _ConcatenateGenericAlias(self, parameters)
|
1778 |
-
|
1779 |
-
|
1780 |
-
# 3.10+
|
1781 |
-
if hasattr(typing, 'Concatenate'):
|
1782 |
-
Concatenate = typing.Concatenate
|
1783 |
-
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
|
1784 |
-
# 3.9
|
1785 |
-
elif sys.version_info[:2] >= (3, 9):
|
1786 |
-
@_TypeAliasForm
|
1787 |
-
def Concatenate(self, parameters):
|
1788 |
-
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
|
1789 |
-
higher order function which adds, removes or transforms parameters of a
|
1790 |
-
callable.
|
1791 |
-
|
1792 |
-
For example::
|
1793 |
-
|
1794 |
-
Callable[Concatenate[int, P], int]
|
1795 |
-
|
1796 |
-
See PEP 612 for detailed information.
|
1797 |
-
"""
|
1798 |
-
return _concatenate_getitem(self, parameters)
|
1799 |
-
# 3.7-8
|
1800 |
-
elif sys.version_info[:2] >= (3, 7):
|
1801 |
-
class _ConcatenateForm(typing._SpecialForm, _root=True):
|
1802 |
-
def __repr__(self):
|
1803 |
-
return 'typing_extensions.' + self._name
|
1804 |
-
|
1805 |
-
def __getitem__(self, parameters):
|
1806 |
-
return _concatenate_getitem(self, parameters)
|
1807 |
-
|
1808 |
-
Concatenate = _ConcatenateForm(
|
1809 |
-
'Concatenate',
|
1810 |
-
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
|
1811 |
-
higher order function which adds, removes or transforms parameters of a
|
1812 |
-
callable.
|
1813 |
-
|
1814 |
-
For example::
|
1815 |
-
|
1816 |
-
Callable[Concatenate[int, P], int]
|
1817 |
-
|
1818 |
-
See PEP 612 for detailed information.
|
1819 |
-
""")
|
1820 |
-
# 3.6
|
1821 |
-
else:
|
1822 |
-
class _ConcatenateAliasMeta(typing.TypingMeta):
|
1823 |
-
"""Metaclass for Concatenate."""
|
1824 |
-
|
1825 |
-
def __repr__(self):
|
1826 |
-
return 'typing_extensions.Concatenate'
|
1827 |
-
|
1828 |
-
class _ConcatenateAliasBase(typing._FinalTypingBase,
|
1829 |
-
metaclass=_ConcatenateAliasMeta,
|
1830 |
-
_root=True):
|
1831 |
-
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
|
1832 |
-
higher order function which adds, removes or transforms parameters of a
|
1833 |
-
callable.
|
1834 |
-
|
1835 |
-
For example::
|
1836 |
-
|
1837 |
-
Callable[Concatenate[int, P], int]
|
1838 |
-
|
1839 |
-
See PEP 612 for detailed information.
|
1840 |
-
"""
|
1841 |
-
__slots__ = ()
|
1842 |
-
|
1843 |
-
def __instancecheck__(self, obj):
|
1844 |
-
raise TypeError("Concatenate cannot be used with isinstance().")
|
1845 |
-
|
1846 |
-
def __subclasscheck__(self, cls):
|
1847 |
-
raise TypeError("Concatenate cannot be used with issubclass().")
|
1848 |
-
|
1849 |
-
def __repr__(self):
|
1850 |
-
return 'typing_extensions.Concatenate'
|
1851 |
-
|
1852 |
-
def __getitem__(self, parameters):
|
1853 |
-
return _concatenate_getitem(self, parameters)
|
1854 |
-
|
1855 |
-
Concatenate = _ConcatenateAliasBase(_root=True)
|
1856 |
-
|
1857 |
-
# 3.10+
|
1858 |
-
if hasattr(typing, 'TypeGuard'):
|
1859 |
-
TypeGuard = typing.TypeGuard
|
1860 |
-
# 3.9
|
1861 |
-
elif sys.version_info[:2] >= (3, 9):
|
1862 |
-
class _TypeGuardForm(typing._SpecialForm, _root=True):
|
1863 |
-
def __repr__(self):
|
1864 |
-
return 'typing_extensions.' + self._name
|
1865 |
-
|
1866 |
-
@_TypeGuardForm
|
1867 |
-
def TypeGuard(self, parameters):
|
1868 |
-
"""Special typing form used to annotate the return type of a user-defined
|
1869 |
-
type guard function. ``TypeGuard`` only accepts a single type argument.
|
1870 |
-
At runtime, functions marked this way should return a boolean.
|
1871 |
-
|
1872 |
-
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
|
1873 |
-
type checkers to determine a more precise type of an expression within a
|
1874 |
-
program's code flow. Usually type narrowing is done by analyzing
|
1875 |
-
conditional code flow and applying the narrowing to a block of code. The
|
1876 |
-
conditional expression here is sometimes referred to as a "type guard".
|
1877 |
-
|
1878 |
-
Sometimes it would be convenient to use a user-defined boolean function
|
1879 |
-
as a type guard. Such a function should use ``TypeGuard[...]`` as its
|
1880 |
-
return type to alert static type checkers to this intention.
|
1881 |
-
|
1882 |
-
Using ``-> TypeGuard`` tells the static type checker that for a given
|
1883 |
-
function:
|
1884 |
-
|
1885 |
-
1. The return value is a boolean.
|
1886 |
-
2. If the return value is ``True``, the type of its argument
|
1887 |
-
is the type inside ``TypeGuard``.
|
1888 |
-
|
1889 |
-
For example::
|
1890 |
-
|
1891 |
-
def is_str(val: Union[str, float]):
|
1892 |
-
# "isinstance" type guard
|
1893 |
-
if isinstance(val, str):
|
1894 |
-
# Type of ``val`` is narrowed to ``str``
|
1895 |
-
...
|
1896 |
-
else:
|
1897 |
-
# Else, type of ``val`` is narrowed to ``float``.
|
1898 |
-
...
|
1899 |
-
|
1900 |
-
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
|
1901 |
-
form of ``TypeA`` (it can even be a wider form) and this may lead to
|
1902 |
-
type-unsafe results. The main reason is to allow for things like
|
1903 |
-
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
|
1904 |
-
a subtype of the former, since ``List`` is invariant. The responsibility of
|
1905 |
-
writing type-safe type guards is left to the user.
|
1906 |
-
|
1907 |
-
``TypeGuard`` also works with type variables. For more information, see
|
1908 |
-
PEP 647 (User-Defined Type Guards).
|
1909 |
-
"""
|
1910 |
-
item = typing._type_check(parameters, f'{self} accepts only single type.')
|
1911 |
-
return typing._GenericAlias(self, (item,))
|
1912 |
-
# 3.7-3.8
|
1913 |
-
elif sys.version_info[:2] >= (3, 7):
|
1914 |
-
class _TypeGuardForm(typing._SpecialForm, _root=True):
|
1915 |
-
|
1916 |
-
def __repr__(self):
|
1917 |
-
return 'typing_extensions.' + self._name
|
1918 |
-
|
1919 |
-
def __getitem__(self, parameters):
|
1920 |
-
item = typing._type_check(parameters,
|
1921 |
-
f'{self._name} accepts only a single type')
|
1922 |
-
return typing._GenericAlias(self, (item,))
|
1923 |
-
|
1924 |
-
TypeGuard = _TypeGuardForm(
|
1925 |
-
'TypeGuard',
|
1926 |
-
doc="""Special typing form used to annotate the return type of a user-defined
|
1927 |
-
type guard function. ``TypeGuard`` only accepts a single type argument.
|
1928 |
-
At runtime, functions marked this way should return a boolean.
|
1929 |
-
|
1930 |
-
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
|
1931 |
-
type checkers to determine a more precise type of an expression within a
|
1932 |
-
program's code flow. Usually type narrowing is done by analyzing
|
1933 |
-
conditional code flow and applying the narrowing to a block of code. The
|
1934 |
-
conditional expression here is sometimes referred to as a "type guard".
|
1935 |
-
|
1936 |
-
Sometimes it would be convenient to use a user-defined boolean function
|
1937 |
-
as a type guard. Such a function should use ``TypeGuard[...]`` as its
|
1938 |
-
return type to alert static type checkers to this intention.
|
1939 |
-
|
1940 |
-
Using ``-> TypeGuard`` tells the static type checker that for a given
|
1941 |
-
function:
|
1942 |
-
|
1943 |
-
1. The return value is a boolean.
|
1944 |
-
2. If the return value is ``True``, the type of its argument
|
1945 |
-
is the type inside ``TypeGuard``.
|
1946 |
-
|
1947 |
-
For example::
|
1948 |
-
|
1949 |
-
def is_str(val: Union[str, float]):
|
1950 |
-
# "isinstance" type guard
|
1951 |
-
if isinstance(val, str):
|
1952 |
-
# Type of ``val`` is narrowed to ``str``
|
1953 |
-
...
|
1954 |
-
else:
|
1955 |
-
# Else, type of ``val`` is narrowed to ``float``.
|
1956 |
-
...
|
1957 |
-
|
1958 |
-
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
|
1959 |
-
form of ``TypeA`` (it can even be a wider form) and this may lead to
|
1960 |
-
type-unsafe results. The main reason is to allow for things like
|
1961 |
-
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
|
1962 |
-
a subtype of the former, since ``List`` is invariant. The responsibility of
|
1963 |
-
writing type-safe type guards is left to the user.
|
1964 |
-
|
1965 |
-
``TypeGuard`` also works with type variables. For more information, see
|
1966 |
-
PEP 647 (User-Defined Type Guards).
|
1967 |
-
""")
|
1968 |
-
# 3.6
|
1969 |
-
else:
|
1970 |
-
class _TypeGuard(typing._FinalTypingBase, _root=True):
|
1971 |
-
"""Special typing form used to annotate the return type of a user-defined
|
1972 |
-
type guard function. ``TypeGuard`` only accepts a single type argument.
|
1973 |
-
At runtime, functions marked this way should return a boolean.
|
1974 |
-
|
1975 |
-
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
|
1976 |
-
type checkers to determine a more precise type of an expression within a
|
1977 |
-
program's code flow. Usually type narrowing is done by analyzing
|
1978 |
-
conditional code flow and applying the narrowing to a block of code. The
|
1979 |
-
conditional expression here is sometimes referred to as a "type guard".
|
1980 |
-
|
1981 |
-
Sometimes it would be convenient to use a user-defined boolean function
|
1982 |
-
as a type guard. Such a function should use ``TypeGuard[...]`` as its
|
1983 |
-
return type to alert static type checkers to this intention.
|
1984 |
-
|
1985 |
-
Using ``-> TypeGuard`` tells the static type checker that for a given
|
1986 |
-
function:
|
1987 |
-
|
1988 |
-
1. The return value is a boolean.
|
1989 |
-
2. If the return value is ``True``, the type of its argument
|
1990 |
-
is the type inside ``TypeGuard``.
|
1991 |
-
|
1992 |
-
For example::
|
1993 |
-
|
1994 |
-
def is_str(val: Union[str, float]):
|
1995 |
-
# "isinstance" type guard
|
1996 |
-
if isinstance(val, str):
|
1997 |
-
# Type of ``val`` is narrowed to ``str``
|
1998 |
-
...
|
1999 |
-
else:
|
2000 |
-
# Else, type of ``val`` is narrowed to ``float``.
|
2001 |
-
...
|
2002 |
-
|
2003 |
-
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
|
2004 |
-
form of ``TypeA`` (it can even be a wider form) and this may lead to
|
2005 |
-
type-unsafe results. The main reason is to allow for things like
|
2006 |
-
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
|
2007 |
-
a subtype of the former, since ``List`` is invariant. The responsibility of
|
2008 |
-
writing type-safe type guards is left to the user.
|
2009 |
-
|
2010 |
-
``TypeGuard`` also works with type variables. For more information, see
|
2011 |
-
PEP 647 (User-Defined Type Guards).
|
2012 |
-
"""
|
2013 |
-
|
2014 |
-
__slots__ = ('__type__',)
|
2015 |
-
|
2016 |
-
def __init__(self, tp=None, **kwds):
|
2017 |
-
self.__type__ = tp
|
2018 |
-
|
2019 |
-
def __getitem__(self, item):
|
2020 |
-
cls = type(self)
|
2021 |
-
if self.__type__ is None:
|
2022 |
-
return cls(typing._type_check(item,
|
2023 |
-
f'{cls.__name__[1:]} accepts only a single type.'),
|
2024 |
-
_root=True)
|
2025 |
-
raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
|
2026 |
-
|
2027 |
-
def _eval_type(self, globalns, localns):
|
2028 |
-
new_tp = typing._eval_type(self.__type__, globalns, localns)
|
2029 |
-
if new_tp == self.__type__:
|
2030 |
-
return self
|
2031 |
-
return type(self)(new_tp, _root=True)
|
2032 |
-
|
2033 |
-
def __repr__(self):
|
2034 |
-
r = super().__repr__()
|
2035 |
-
if self.__type__ is not None:
|
2036 |
-
r += f'[{typing._type_repr(self.__type__)}]'
|
2037 |
-
return r
|
2038 |
-
|
2039 |
-
def __hash__(self):
|
2040 |
-
return hash((type(self).__name__, self.__type__))
|
2041 |
-
|
2042 |
-
def __eq__(self, other):
|
2043 |
-
if not isinstance(other, _TypeGuard):
|
2044 |
-
return NotImplemented
|
2045 |
-
if self.__type__ is not None:
|
2046 |
-
return self.__type__ == other.__type__
|
2047 |
-
return self is other
|
2048 |
-
|
2049 |
-
TypeGuard = _TypeGuard(_root=True)
|
2050 |
-
|
2051 |
-
if hasattr(typing, "Self"):
|
2052 |
-
Self = typing.Self
|
2053 |
-
elif sys.version_info[:2] >= (3, 7):
|
2054 |
-
# Vendored from cpython typing._SpecialFrom
|
2055 |
-
class _SpecialForm(typing._Final, _root=True):
|
2056 |
-
__slots__ = ('_name', '__doc__', '_getitem')
|
2057 |
-
|
2058 |
-
def __init__(self, getitem):
|
2059 |
-
self._getitem = getitem
|
2060 |
-
self._name = getitem.__name__
|
2061 |
-
self.__doc__ = getitem.__doc__
|
2062 |
-
|
2063 |
-
def __getattr__(self, item):
|
2064 |
-
if item in {'__name__', '__qualname__'}:
|
2065 |
-
return self._name
|
2066 |
-
|
2067 |
-
raise AttributeError(item)
|
2068 |
-
|
2069 |
-
def __mro_entries__(self, bases):
|
2070 |
-
raise TypeError(f"Cannot subclass {self!r}")
|
2071 |
-
|
2072 |
-
def __repr__(self):
|
2073 |
-
return f'typing_extensions.{self._name}'
|
2074 |
-
|
2075 |
-
def __reduce__(self):
|
2076 |
-
return self._name
|
2077 |
-
|
2078 |
-
def __call__(self, *args, **kwds):
|
2079 |
-
raise TypeError(f"Cannot instantiate {self!r}")
|
2080 |
-
|
2081 |
-
def __or__(self, other):
|
2082 |
-
return typing.Union[self, other]
|
2083 |
-
|
2084 |
-
def __ror__(self, other):
|
2085 |
-
return typing.Union[other, self]
|
2086 |
-
|
2087 |
-
def __instancecheck__(self, obj):
|
2088 |
-
raise TypeError(f"{self} cannot be used with isinstance()")
|
2089 |
-
|
2090 |
-
def __subclasscheck__(self, cls):
|
2091 |
-
raise TypeError(f"{self} cannot be used with issubclass()")
|
2092 |
-
|
2093 |
-
@typing._tp_cache
|
2094 |
-
def __getitem__(self, parameters):
|
2095 |
-
return self._getitem(self, parameters)
|
2096 |
-
|
2097 |
-
@_SpecialForm
|
2098 |
-
def Self(self, params):
|
2099 |
-
"""Used to spell the type of "self" in classes.
|
2100 |
-
|
2101 |
-
Example::
|
2102 |
-
|
2103 |
-
from typing import Self
|
2104 |
-
|
2105 |
-
class ReturnsSelf:
|
2106 |
-
def parse(self, data: bytes) -> Self:
|
2107 |
-
...
|
2108 |
-
return self
|
2109 |
-
|
2110 |
-
"""
|
2111 |
-
|
2112 |
-
raise TypeError(f"{self} is not subscriptable")
|
2113 |
-
else:
|
2114 |
-
class _Self(typing._FinalTypingBase, _root=True):
|
2115 |
-
"""Used to spell the type of "self" in classes.
|
2116 |
-
|
2117 |
-
Example::
|
2118 |
-
|
2119 |
-
from typing import Self
|
2120 |
-
|
2121 |
-
class ReturnsSelf:
|
2122 |
-
def parse(self, data: bytes) -> Self:
|
2123 |
-
...
|
2124 |
-
return self
|
2125 |
-
|
2126 |
-
"""
|
2127 |
-
|
2128 |
-
__slots__ = ()
|
2129 |
-
|
2130 |
-
def __instancecheck__(self, obj):
|
2131 |
-
raise TypeError(f"{self} cannot be used with isinstance().")
|
2132 |
-
|
2133 |
-
def __subclasscheck__(self, cls):
|
2134 |
-
raise TypeError(f"{self} cannot be used with issubclass().")
|
2135 |
-
|
2136 |
-
Self = _Self(_root=True)
|
2137 |
-
|
2138 |
-
|
2139 |
-
if hasattr(typing, 'Required'):
|
2140 |
-
Required = typing.Required
|
2141 |
-
NotRequired = typing.NotRequired
|
2142 |
-
elif sys.version_info[:2] >= (3, 9):
|
2143 |
-
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
|
2144 |
-
def __repr__(self):
|
2145 |
-
return 'typing_extensions.' + self._name
|
2146 |
-
|
2147 |
-
@_ExtensionsSpecialForm
|
2148 |
-
def Required(self, parameters):
|
2149 |
-
"""A special typing construct to mark a key of a total=False TypedDict
|
2150 |
-
as required. For example:
|
2151 |
-
|
2152 |
-
class Movie(TypedDict, total=False):
|
2153 |
-
title: Required[str]
|
2154 |
-
year: int
|
2155 |
-
|
2156 |
-
m = Movie(
|
2157 |
-
title='The Matrix', # typechecker error if key is omitted
|
2158 |
-
year=1999,
|
2159 |
-
)
|
2160 |
-
|
2161 |
-
There is no runtime checking that a required key is actually provided
|
2162 |
-
when instantiating a related TypedDict.
|
2163 |
-
"""
|
2164 |
-
item = typing._type_check(parameters, f'{self._name} accepts only single type')
|
2165 |
-
return typing._GenericAlias(self, (item,))
|
2166 |
-
|
2167 |
-
@_ExtensionsSpecialForm
|
2168 |
-
def NotRequired(self, parameters):
|
2169 |
-
"""A special typing construct to mark a key of a TypedDict as
|
2170 |
-
potentially missing. For example:
|
2171 |
-
|
2172 |
-
class Movie(TypedDict):
|
2173 |
-
title: str
|
2174 |
-
year: NotRequired[int]
|
2175 |
-
|
2176 |
-
m = Movie(
|
2177 |
-
title='The Matrix', # typechecker error if key is omitted
|
2178 |
-
year=1999,
|
2179 |
-
)
|
2180 |
-
"""
|
2181 |
-
item = typing._type_check(parameters, f'{self._name} accepts only single type')
|
2182 |
-
return typing._GenericAlias(self, (item,))
|
2183 |
-
|
2184 |
-
elif sys.version_info[:2] >= (3, 7):
|
2185 |
-
class _RequiredForm(typing._SpecialForm, _root=True):
|
2186 |
-
def __repr__(self):
|
2187 |
-
return 'typing_extensions.' + self._name
|
2188 |
-
|
2189 |
-
def __getitem__(self, parameters):
|
2190 |
-
item = typing._type_check(parameters,
|
2191 |
-
'{} accepts only single type'.format(self._name))
|
2192 |
-
return typing._GenericAlias(self, (item,))
|
2193 |
-
|
2194 |
-
Required = _RequiredForm(
|
2195 |
-
'Required',
|
2196 |
-
doc="""A special typing construct to mark a key of a total=False TypedDict
|
2197 |
-
as required. For example:
|
2198 |
-
|
2199 |
-
class Movie(TypedDict, total=False):
|
2200 |
-
title: Required[str]
|
2201 |
-
year: int
|
2202 |
-
|
2203 |
-
m = Movie(
|
2204 |
-
title='The Matrix', # typechecker error if key is omitted
|
2205 |
-
year=1999,
|
2206 |
-
)
|
2207 |
-
|
2208 |
-
There is no runtime checking that a required key is actually provided
|
2209 |
-
when instantiating a related TypedDict.
|
2210 |
-
""")
|
2211 |
-
NotRequired = _RequiredForm(
|
2212 |
-
'NotRequired',
|
2213 |
-
doc="""A special typing construct to mark a key of a TypedDict as
|
2214 |
-
potentially missing. For example:
|
2215 |
-
|
2216 |
-
class Movie(TypedDict):
|
2217 |
-
title: str
|
2218 |
-
year: NotRequired[int]
|
2219 |
-
|
2220 |
-
m = Movie(
|
2221 |
-
title='The Matrix', # typechecker error if key is omitted
|
2222 |
-
year=1999,
|
2223 |
-
)
|
2224 |
-
""")
|
2225 |
-
else:
|
2226 |
-
# NOTE: Modeled after _Final's implementation when _FinalTypingBase available
|
2227 |
-
class _MaybeRequired(typing._FinalTypingBase, _root=True):
|
2228 |
-
__slots__ = ('__type__',)
|
2229 |
-
|
2230 |
-
def __init__(self, tp=None, **kwds):
|
2231 |
-
self.__type__ = tp
|
2232 |
-
|
2233 |
-
def __getitem__(self, item):
|
2234 |
-
cls = type(self)
|
2235 |
-
if self.__type__ is None:
|
2236 |
-
return cls(typing._type_check(item,
|
2237 |
-
'{} accepts only single type.'.format(cls.__name__[1:])),
|
2238 |
-
_root=True)
|
2239 |
-
raise TypeError('{} cannot be further subscripted'
|
2240 |
-
.format(cls.__name__[1:]))
|
2241 |
-
|
2242 |
-
def _eval_type(self, globalns, localns):
|
2243 |
-
new_tp = typing._eval_type(self.__type__, globalns, localns)
|
2244 |
-
if new_tp == self.__type__:
|
2245 |
-
return self
|
2246 |
-
return type(self)(new_tp, _root=True)
|
2247 |
-
|
2248 |
-
def __repr__(self):
|
2249 |
-
r = super().__repr__()
|
2250 |
-
if self.__type__ is not None:
|
2251 |
-
r += '[{}]'.format(typing._type_repr(self.__type__))
|
2252 |
-
return r
|
2253 |
-
|
2254 |
-
def __hash__(self):
|
2255 |
-
return hash((type(self).__name__, self.__type__))
|
2256 |
-
|
2257 |
-
def __eq__(self, other):
|
2258 |
-
if not isinstance(other, type(self)):
|
2259 |
-
return NotImplemented
|
2260 |
-
if self.__type__ is not None:
|
2261 |
-
return self.__type__ == other.__type__
|
2262 |
-
return self is other
|
2263 |
-
|
2264 |
-
class _Required(_MaybeRequired, _root=True):
|
2265 |
-
"""A special typing construct to mark a key of a total=False TypedDict
|
2266 |
-
as required. For example:
|
2267 |
-
|
2268 |
-
class Movie(TypedDict, total=False):
|
2269 |
-
title: Required[str]
|
2270 |
-
year: int
|
2271 |
-
|
2272 |
-
m = Movie(
|
2273 |
-
title='The Matrix', # typechecker error if key is omitted
|
2274 |
-
year=1999,
|
2275 |
-
)
|
2276 |
-
|
2277 |
-
There is no runtime checking that a required key is actually provided
|
2278 |
-
when instantiating a related TypedDict.
|
2279 |
-
"""
|
2280 |
-
|
2281 |
-
class _NotRequired(_MaybeRequired, _root=True):
|
2282 |
-
"""A special typing construct to mark a key of a TypedDict as
|
2283 |
-
potentially missing. For example:
|
2284 |
-
|
2285 |
-
class Movie(TypedDict):
|
2286 |
-
title: str
|
2287 |
-
year: NotRequired[int]
|
2288 |
-
|
2289 |
-
m = Movie(
|
2290 |
-
title='The Matrix', # typechecker error if key is omitted
|
2291 |
-
year=1999,
|
2292 |
-
)
|
2293 |
-
"""
|
2294 |
-
|
2295 |
-
Required = _Required(_root=True)
|
2296 |
-
NotRequired = _NotRequired(_root=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BreadBytes1/SB-Dashboard/old_app.py
DELETED
@@ -1,327 +0,0 @@
|
|
1 |
-
# ---
|
2 |
-
# jupyter:
|
3 |
-
# jupytext:
|
4 |
-
# text_representation:
|
5 |
-
# extension: .py
|
6 |
-
# format_name: light
|
7 |
-
# format_version: '1.5'
|
8 |
-
# jupytext_version: 1.14.2
|
9 |
-
# kernelspec:
|
10 |
-
# display_name: Python [conda env:bbytes] *
|
11 |
-
# language: python
|
12 |
-
# name: conda-env-bbytes-py
|
13 |
-
# ---
|
14 |
-
|
15 |
-
# +
|
16 |
-
import csv
|
17 |
-
import pandas as pd
|
18 |
-
from datetime import datetime, timedelta
|
19 |
-
import numpy as np
|
20 |
-
import datetime as dt
|
21 |
-
import matplotlib.pyplot as plt
|
22 |
-
from pathlib import Path
|
23 |
-
|
24 |
-
import streamlit as st
|
25 |
-
import plotly.express as px
|
26 |
-
import altair as alt
|
27 |
-
import dateutil.parser
|
28 |
-
import copy
|
29 |
-
|
30 |
-
|
31 |
-
# +
|
32 |
-
@st.experimental_memo
|
33 |
-
def get_hist_info(df_coin, principal_balance,plheader):
|
34 |
-
numtrades = int(len(df_coin))
|
35 |
-
numwin = int(sum(df_coin[plheader] > 0))
|
36 |
-
numloss = int(sum(df_coin[plheader] < 0))
|
37 |
-
winrate = int(np.round(100*numwin/numtrades,2))
|
38 |
-
|
39 |
-
grosswin = sum(df_coin[df_coin[plheader] > 0][plheader])
|
40 |
-
grossloss = sum(df_coin[df_coin[plheader] < 0][plheader])
|
41 |
-
if grossloss !=0:
|
42 |
-
pfactor = -1*np.round(grosswin/grossloss,2)
|
43 |
-
else:
|
44 |
-
pfactor = np.nan
|
45 |
-
return numtrades, numwin, numloss, winrate, pfactor
|
46 |
-
@st.experimental_memo
|
47 |
-
def get_rolling_stats(df, lev, otimeheader, days):
|
48 |
-
max_roll = (df[otimeheader].max() - df[otimeheader].min()).days
|
49 |
-
|
50 |
-
if max_roll >= days:
|
51 |
-
rollend = df[otimeheader].max()-timedelta(days=days)
|
52 |
-
rolling_df = df[df[otimeheader] >= rollend]
|
53 |
-
|
54 |
-
if len(rolling_df) > 0:
|
55 |
-
rolling_perc = rolling_df['Return Per Trade'].dropna().cumprod().values[-1]-1
|
56 |
-
else:
|
57 |
-
rolling_perc = np.nan
|
58 |
-
else:
|
59 |
-
rolling_perc = np.nan
|
60 |
-
return 100*rolling_perc
|
61 |
-
|
62 |
-
@st.experimental_memo
|
63 |
-
def filt_df(df, cheader, symbol_selections):
|
64 |
-
"""
|
65 |
-
Inputs: df (pd.DataFrame), cheader (str) and symbol_selections (list[str]).
|
66 |
-
|
67 |
-
Returns a filtered pd.DataFrame containing only data that matches symbol_selections (list[str])
|
68 |
-
from df[cheader].
|
69 |
-
"""
|
70 |
-
|
71 |
-
df = df.copy()
|
72 |
-
df = df[df[cheader].isin(symbol_selections)]
|
73 |
-
|
74 |
-
return df
|
75 |
-
|
76 |
-
@st.experimental_memo
|
77 |
-
def my_style(v, props=''):
|
78 |
-
props = 'color:red' if v < 0 else 'color:green'
|
79 |
-
return props
|
80 |
-
|
81 |
-
@st.cache(ttl=24*3600, allow_output_mutation=True)
|
82 |
-
def load_data(filename, otimeheader,fmat):
|
83 |
-
df = pd.read_csv(open(filename,'r'), sep='\t') # so as not to mutate cached value
|
84 |
-
df.columns = ['Trade','Signal','Entry Date','Buy Price', 'Sell Price','Exit Date', 'P/L per token', 'P/L %']
|
85 |
-
|
86 |
-
df['Buy Price'] = df['Buy Price'].str.replace('$', '', regex=True)
|
87 |
-
df['Sell Price'] = df['Sell Price'].str.replace('$', '', regex=True)
|
88 |
-
df['Buy Price'] = df['Buy Price'].str.replace(',', '', regex=True)
|
89 |
-
df['Sell Price'] = df['Sell Price'].str.replace(',', '', regex=True)
|
90 |
-
df['P/L per token'] = df['P/L per token'].str.replace('$', '', regex=True)
|
91 |
-
df['P/L per token'] = df['P/L per token'].str.replace(',', '', regex=True)
|
92 |
-
df['P/L %'] = df['P/L %'].str.replace('%', '', regex=True)
|
93 |
-
|
94 |
-
df['Buy Price'] = pd.to_numeric(df['Buy Price'])
|
95 |
-
df['Sell Price'] = pd.to_numeric(df['Sell Price'])
|
96 |
-
df['P/L per token'] = pd.to_numeric(df['P/L per token'])
|
97 |
-
df['P/L %'] = pd.to_numeric(df['P/L %'])
|
98 |
-
|
99 |
-
dateheader = 'Date'
|
100 |
-
theader = 'Time'
|
101 |
-
|
102 |
-
df[dateheader] = [tradetimes.split(" ")[0] for tradetimes in df[otimeheader].values]
|
103 |
-
df[theader] = [tradetimes.split(" ")[1] for tradetimes in df[otimeheader].values]
|
104 |
-
|
105 |
-
df[otimeheader]= [dateutil.parser.parse(date+' '+time)
|
106 |
-
for date,time in zip(df[dateheader],df[theader])]
|
107 |
-
|
108 |
-
df[otimeheader] = pd.to_datetime(df[otimeheader])
|
109 |
-
df['Exit Date'] = pd.to_datetime(df['Exit Date'])
|
110 |
-
df.sort_values(by=otimeheader, inplace=True)
|
111 |
-
|
112 |
-
df[dateheader] = [dateutil.parser.parse(date).date() for date in df[dateheader]]
|
113 |
-
df[theader] = [dateutil.parser.parse(time).time() for time in df[theader]]
|
114 |
-
df['Trade'] = [i+1 for i in range(len(df))] #reindex
|
115 |
-
|
116 |
-
return df
|
117 |
-
|
118 |
-
def runapp():
|
119 |
-
bot_selections = "Short Bread"
|
120 |
-
otimeheader = 'Entry Date'
|
121 |
-
plheader = 'Calculated Return %'
|
122 |
-
fmat = '%Y-%m-%d %H:%M:%S'
|
123 |
-
dollar_cap = 100000.00
|
124 |
-
fees = .075/100
|
125 |
-
st.header(f"{bot_selections} Performance Dashboard :bread: :moneybag:")
|
126 |
-
st.write("Welcome to the Trading Bot Dashboard by BreadBytes! You can use this dashboard to track " +
|
127 |
-
"the performance of our trading bots.")
|
128 |
-
# st.sidebar.header("FAQ")
|
129 |
-
|
130 |
-
# with st.sidebar.subheader("FAQ"):
|
131 |
-
# st.write(Path("FAQ_README.md").read_text())
|
132 |
-
st.subheader("Choose your settings:")
|
133 |
-
no_errors = True
|
134 |
-
|
135 |
-
data = load_data("SB-Trade-Log.csv",otimeheader,fmat)
|
136 |
-
df = data.copy(deep=True)
|
137 |
-
|
138 |
-
grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
|
139 |
-
'Sell Price' : 'max',
|
140 |
-
'P/L per token': 'mean',
|
141 |
-
'P/L %':lambda x: np.round(x.sum()/4,2)})
|
142 |
-
grouped_df.index = range(1, len(grouped_df)+1)
|
143 |
-
grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
|
144 |
-
'P/L per token':'Avg. P/L per token'}, inplace=True)
|
145 |
-
|
146 |
-
dateheader = 'Date'
|
147 |
-
theader = 'Time'
|
148 |
-
|
149 |
-
with st.form("user input"):
|
150 |
-
if no_errors:
|
151 |
-
with st.container():
|
152 |
-
col1, col2 = st.columns(2)
|
153 |
-
with col1:
|
154 |
-
try:
|
155 |
-
startdate = st.date_input("Start Date", value=pd.to_datetime(df[otimeheader]).min())
|
156 |
-
except:
|
157 |
-
st.error("Please select your exchange or upload a supported trade log file.")
|
158 |
-
no_errors = False
|
159 |
-
with col2:
|
160 |
-
try:
|
161 |
-
enddate = st.date_input("End Date", value=datetime.today())
|
162 |
-
except:
|
163 |
-
st.error("Please select your exchange or upload a supported trade log file.")
|
164 |
-
no_errors = False
|
165 |
-
#st.sidebar.subheader("Customize your Dashboard")
|
166 |
-
|
167 |
-
if no_errors and (enddate < startdate):
|
168 |
-
st.error("End Date must be later than Start date. Please try again.")
|
169 |
-
no_errors = False
|
170 |
-
with st.container():
|
171 |
-
col1,col2 = st.columns(2)
|
172 |
-
with col2:
|
173 |
-
lev = st.number_input('Leverage', min_value=1, value=1, max_value= 5, step=1)
|
174 |
-
with col1:
|
175 |
-
principal_balance = st.number_input('Starting Balance', min_value=0.00, value=1000.00, max_value= dollar_cap, step=.01)
|
176 |
-
|
177 |
-
#hack way to get button centered
|
178 |
-
c = st.columns(9)
|
179 |
-
with c[4]:
|
180 |
-
submitted = st.form_submit_button("Get Cookin'!")
|
181 |
-
|
182 |
-
signal_map = {'Long': 1, 'Short':-1} # 1 for long #-1 for short
|
183 |
-
|
184 |
-
df['Calculated Return %'] = (1-fees)*(df['Signal'].map(signal_map)*(df['Sell Price']-df['Buy Price'])/df['Buy Price'] - fees) #accounts for fees on open and close of trade
|
185 |
-
|
186 |
-
|
187 |
-
if submitted and principal_balance * lev > dollar_cap:
|
188 |
-
lev = np.floor(dollar_cap/principal_balance)
|
189 |
-
st.error(f"WARNING: (Starting Balance)*(Leverage) exceeds the ${dollar_cap} limit. Using maximum available leverage of {lev}")
|
190 |
-
|
191 |
-
if submitted and no_errors:
|
192 |
-
df = df[(df[dateheader] >= startdate) & (df[dateheader] <= enddate)]
|
193 |
-
|
194 |
-
if len(df) == 0:
|
195 |
-
st.error("There are no available trades matching your selections. Please try again!")
|
196 |
-
no_errors = False
|
197 |
-
if no_errors:
|
198 |
-
df['Return Per Trade'] = 1+lev*df['Calculated Return %'].values
|
199 |
-
|
200 |
-
df['Compounded Return'] = df['Return Per Trade'].cumprod()
|
201 |
-
df['New Balance'] = [min(dollar_cap/lev, bal*principal_balance) for bal in df['Compounded Return']]
|
202 |
-
df['Balance used in Trade'] = np.concatenate([[principal_balance], df['New Balance'].values[:-1]])
|
203 |
-
df['Net P/L Per Trade'] = (df['Return Per Trade']-1)*df['Balance used in Trade']
|
204 |
-
df['Cumulative P/L'] = df['Net P/L Per Trade'].cumsum()
|
205 |
-
|
206 |
-
cum_pl = df.loc[df.dropna().index[-1],'Cumulative P/L'] + principal_balance
|
207 |
-
|
208 |
-
effective_return = 100*((cum_pl - principal_balance)/principal_balance)
|
209 |
-
|
210 |
-
st.header(f"{bot_selections} Results")
|
211 |
-
if len(bot_selections) > 1:
|
212 |
-
st.metric(
|
213 |
-
"Total Account Balance",
|
214 |
-
f"${cum_pl:.2f}",
|
215 |
-
f"{100*(cum_pl-principal_balance)/(principal_balance):.2f} %",
|
216 |
-
)
|
217 |
-
|
218 |
-
st.line_chart(data=df.dropna(), x='Exit Date', y='Cumulative P/L', use_container_width=True)
|
219 |
-
|
220 |
-
df['Per Trade Return Rate'] = df['Return Per Trade']-1
|
221 |
-
|
222 |
-
totals = pd.DataFrame([], columns = ['# of Trades', 'Wins', 'Losses', 'Win Rate', 'Profit Factor'])
|
223 |
-
data = get_hist_info(df.dropna(), principal_balance,'Calculated Return %')
|
224 |
-
totals.loc[len(totals)] = list(i for i in data)
|
225 |
-
|
226 |
-
totals['Cum. P/L'] = cum_pl-principal_balance
|
227 |
-
totals['Cum. P/L (%)'] = 100*(cum_pl-principal_balance)/principal_balance
|
228 |
-
#results_df['Avg. P/L'] = (cum_pl-principal_balance)/results_df['# of Trades'].values[0]
|
229 |
-
#results_df['Avg. P/L (%)'] = 100*results_df['Avg. P/L'].values[0]/principal_balance
|
230 |
-
|
231 |
-
if df.empty:
|
232 |
-
st.error("Oops! None of the data provided matches your selection(s). Please try again.")
|
233 |
-
else:
|
234 |
-
#st.dataframe(totals.style.format({'# of Trades': '{:.0f}','Wins': '{:.0f}','Losses': '{:.0f}','Win Rate': '{:.2f}%','Profit Factor' : '{:.2f}', 'Avg. P/L (%)': '{:.2f}%', 'Cum. P/L (%)': '{:.2f}%', 'Cum. P/L': '{:.2f}', 'Avg. P/L': '{:.2f}'})
|
235 |
-
#.text_gradient(subset=['Win Rate'],cmap="RdYlGn", vmin = 0, vmax = 100)\
|
236 |
-
#.text_gradient(subset=['Profit Factor'],cmap="RdYlGn", vmin = 0, vmax = 2), use_container_width=True)
|
237 |
-
for row in totals.itertuples():
|
238 |
-
col1, col2, col3, col4 = st.columns(4)
|
239 |
-
c1, c2, c3, c4 = st.columns(4)
|
240 |
-
with col1:
|
241 |
-
st.metric(
|
242 |
-
"Total Trades",
|
243 |
-
f"{row._1:.0f}",
|
244 |
-
)
|
245 |
-
with c1:
|
246 |
-
st.metric(
|
247 |
-
"Profit Factor",
|
248 |
-
f"{row._5:.2f}",
|
249 |
-
)
|
250 |
-
with col2:
|
251 |
-
st.metric(
|
252 |
-
"Wins",
|
253 |
-
f"{row.Wins:.0f}",
|
254 |
-
)
|
255 |
-
with c2:
|
256 |
-
st.metric(
|
257 |
-
"Cumulative P/L",
|
258 |
-
f"${row._6:.2f}",
|
259 |
-
f"{row._7:.2f} %",
|
260 |
-
)
|
261 |
-
with col3:
|
262 |
-
st.metric(
|
263 |
-
"Losses",
|
264 |
-
f"{row.Losses:.0f}",
|
265 |
-
)
|
266 |
-
with c3:
|
267 |
-
st.metric(
|
268 |
-
"Rolling 7 Days",
|
269 |
-
"",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
|
270 |
-
f"{get_rolling_stats(df,lev, otimeheader, 7):.2f}%",
|
271 |
-
)
|
272 |
-
st.metric(
|
273 |
-
"Rolling 30 Days",
|
274 |
-
"",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
|
275 |
-
f"{get_rolling_stats(df,lev, otimeheader, 30):.2f}%",
|
276 |
-
)
|
277 |
-
|
278 |
-
with col4:
|
279 |
-
st.metric(
|
280 |
-
"Win Rate",
|
281 |
-
f"{row._4:.1f}%",
|
282 |
-
)
|
283 |
-
with c4:
|
284 |
-
st.metric(
|
285 |
-
"Rolling 90 Days",
|
286 |
-
"",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
|
287 |
-
f"{get_rolling_stats(df,lev, otimeheader, 90):.2f}%",
|
288 |
-
)
|
289 |
-
st.metric(
|
290 |
-
"Rolling 180 Days",
|
291 |
-
"",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
|
292 |
-
f"{get_rolling_stats(df,lev, otimeheader, 180):.2f}%",
|
293 |
-
)
|
294 |
-
if submitted:
|
295 |
-
grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
|
296 |
-
'Sell Price' : 'max',
|
297 |
-
'Net P/L Per Trade': 'mean',
|
298 |
-
'Calculated Return %' : lambda x: np.round(100*lev*x.sum(),3)})
|
299 |
-
grouped_df.index = range(1, len(grouped_df)+1)
|
300 |
-
grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
|
301 |
-
'Net P/L Per Trade':'Net P/L',
|
302 |
-
'Calculated Return %':'P/L %'}, inplace=True)
|
303 |
-
else:
|
304 |
-
grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
|
305 |
-
'Sell Price' : 'max',
|
306 |
-
'P/L per token': 'mean',
|
307 |
-
'Calculated Return %' : lambda x: np.round(100*x.sum(),3)})
|
308 |
-
grouped_df.index = range(1, len(grouped_df)+1)
|
309 |
-
grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
|
310 |
-
'P/L per token':'Net P/L',
|
311 |
-
'Calculated Return %':'P/L %'}, inplace=True)
|
312 |
-
st.subheader("Trade Logs")
|
313 |
-
grouped_df['Entry Date'] = pd.to_datetime(grouped_df['Entry Date'])
|
314 |
-
grouped_df['Exit Date'] = pd.to_datetime(grouped_df['Exit Date'])
|
315 |
-
st.dataframe(grouped_df.style.format({'Entry Date':'{:%m-%d-%Y %H:%M:%S}','Exit Date':'{:%m-%d-%Y %H:%M:%S}','Avg. Buy Price': '${:.2f}', 'Sell Price': '${:.2f}', 'Net P/L':'${:.3f}', 'P/L %':'{:.2f}%'})\
|
316 |
-
.applymap(my_style,subset=['Net P/L'])\
|
317 |
-
.applymap(my_style,subset=['P/L %']), use_container_width=True)
|
318 |
-
|
319 |
-
if __name__ == "__main__":
|
320 |
-
st.set_page_config(
|
321 |
-
"Trading Bot Dashboard",
|
322 |
-
layout="wide",
|
323 |
-
)
|
324 |
-
runapp()
|
325 |
-
# -
|
326 |
-
|
327 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|